hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5cf24e47e050b5dda249a8edab2d8988fc41928 | 954 | py | Python | setup.py | walwe/autolabel | 712400d38f80ea496bab5a8a963ffef6ebf8daa9 | [
"MIT"
] | 1 | 2020-04-04T17:20:38.000Z | 2020-04-04T17:20:38.000Z | setup.py | walwe/autolabel | 712400d38f80ea496bab5a8a963ffef6ebf8daa9 | [
"MIT"
] | null | null | null | setup.py | walwe/autolabel | 712400d38f80ea496bab5a8a963ffef6ebf8daa9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from pkg_resources import get_distribution
from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
version = get_distribution("autolabel").version
setup(
packages=find_packages(),
install_requires=[
'click',
'more-itertools',
'torchvision',
'torch',
'pillow',
'numpy'
],
entry_points='''
[console_scripts]
autolabel=autolabel.cli:main
''',
url='https://github.com/walwe/autolabel',
version=version,
author='walwe',
python_requires='>=3.6',
description='Autolabel is an image labeling tool using Neural Network',
long_description_content_type="text/markdown",
long_description=long_description,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| 25.783784 | 75 | 0.638365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 414 | 0.433962 |
c5cf38192c894cd50c8e95291a53bb4a08d2bc9f | 334 | py | Python | desafio_005_antecessor_e_sucessor.py | VagnerGit/PythonCursoEmVideo | 3e80e12fbf21f5be08c554d77fa9073dc0a3145f | [
"MIT"
] | null | null | null | desafio_005_antecessor_e_sucessor.py | VagnerGit/PythonCursoEmVideo | 3e80e12fbf21f5be08c554d77fa9073dc0a3145f | [
"MIT"
] | null | null | null | desafio_005_antecessor_e_sucessor.py | VagnerGit/PythonCursoEmVideo | 3e80e12fbf21f5be08c554d77fa9073dc0a3145f | [
"MIT"
] | null | null | null | """
Exercício Python 5:
Faça um programa que leia um número Inteiro e
mostre na tela o seu sucessor e seu antecessor.
"""
n = int(input('digite um numero inteiro '))
#ant = n-1
#post = n+1
#print('O antecessor de {} é {} e posterior é {}' .format(n, ant, post))
print('{} o antercessor é {} o sucessor é {}'.format(n, (n-1), (n+1)))
| 27.833333 | 72 | 0.643713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.841642 |
c5d26f124d7803a5d0483030310d5f3218904ee3 | 20 | py | Python | checkov/version.py | jmeredith16/checkov | 91dc9e970609c7ce53e325b8b70fec788dc12c96 | [
"Apache-2.0"
] | null | null | null | checkov/version.py | jmeredith16/checkov | 91dc9e970609c7ce53e325b8b70fec788dc12c96 | [
"Apache-2.0"
] | null | null | null | checkov/version.py | jmeredith16/checkov | 91dc9e970609c7ce53e325b8b70fec788dc12c96 | [
"Apache-2.0"
] | null | null | null | version = '2.0.706'
| 10 | 19 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.45 |
c5d2baa0bf3dd4ddad9c92f38934e077ced0d319 | 4,506 | py | Python | test/meshes.py | jtpils/optimesh | 24a8276235b1f4e86f2fb92cf814bf81e7fdbc48 | [
"MIT"
] | 1 | 2019-11-20T16:50:34.000Z | 2019-11-20T16:50:34.000Z | test/meshes.py | jtpils/optimesh | 24a8276235b1f4e86f2fb92cf814bf81e7fdbc48 | [
"MIT"
] | null | null | null | test/meshes.py | jtpils/optimesh | 24a8276235b1f4e86f2fb92cf814bf81e7fdbc48 | [
"MIT"
] | null | null | null | import os.path
import numpy
from scipy.spatial import Delaunay
import meshio
from meshplex import MeshTri
def simple0():
#
# 3___________2
# |\_ 2 _/|
# | \_ _/ |
# | 3 \4/ 1 |
# | _/ \_ |
# | _/ \_ |
# |/ 0 \|
# 0-----------1
#
X = numpy.array(
[
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.5, 0.5, 0.0],
]
)
cells = numpy.array([[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]])
return X, cells
def simple1():
#
# 3___________2
# |\_ 2 _/|
# | \_ _/ |
# | 3 \4/ 1 |
# | _/ \_ |
# | _/ \_ |
# |/ 0 \|
# 0-----------1
#
X = numpy.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.4, 0.5]])
cells = numpy.array([[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]])
return X, cells
def simple2():
#
# 3___________2
# |\_ 3 _/ \_
# | \_ _/ 2 \_
# | 4 \4/_________\5
# | _/ \_ _/
# | _/ \_ 1 _/
# |/ 0 \ /
# 0-----------1
#
X = numpy.array(
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.7, 0.5], [1.7, 0.5]]
)
cells = numpy.array([[0, 1, 4], [1, 5, 4], [2, 4, 5], [2, 3, 4], [3, 0, 4]])
return X, cells
def simple3():
#
# 5___________4___________3
# |\_ 6 _/ \_ 4 _/|
# | \_ _/ 5 \_ _/ |
# | 7 \6/_________\7/ 3 |
# | _/ \_ _/ \_ |
# | _/ \_ 1 _/ 2 \_ |
# |/ 0 \ / \|
# 0-----------1-----------2
#
X = numpy.array(
[
[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[2.0, 1.0],
[1.0, 1.0],
[0.0, 1.0],
[0.7, 0.5],
[1.7, 0.5],
]
)
cells = numpy.array(
[
[0, 1, 6],
[1, 7, 6],
[1, 2, 7],
[2, 3, 7],
[3, 4, 7],
[4, 6, 7],
[4, 5, 6],
[5, 0, 6],
]
)
return X, cells
def pacman():
this_dir = os.path.dirname(os.path.realpath(__file__))
mesh = meshio.read(os.path.join(this_dir, "meshes", "pacman.vtk"))
return mesh.points[:, :2], mesh.cells["triangle"]
def circle_gmsh():
this_dir = os.path.dirname(os.path.realpath(__file__))
mesh = meshio.read(os.path.join(this_dir, "meshes", "circle-gmsh.vtk"))
c = mesh.cells["triangle"].astype(numpy.int)
return mesh.points[:, :2], c
def circle_random():
n = 40
radius = 1.0
k = numpy.arange(n)
boundary_pts = radius * numpy.column_stack(
[numpy.cos(2 * numpy.pi * k / n), numpy.sin(2 * numpy.pi * k / n)]
)
# Compute the number of interior nodes such that all triangles can be somewhat
# equilateral.
edge_length = 2 * numpy.pi * radius / n
domain_area = numpy.pi - n * (
radius ** 2 / 2 * (edge_length - numpy.sin(edge_length))
)
cell_area = numpy.sqrt(3) / 4 * edge_length ** 2
target_num_cells = domain_area / cell_area
# Euler:
# 2 * num_points - num_boundary_edges - 2 = num_cells
# <=>
# num_interior_points ~= 0.5 * (num_cells + num_boundary_edges) + 1 - num_boundary_points
m = int(0.5 * (target_num_cells + n) + 1 - n)
# generate random points in circle; <http://mathworld.wolfram.com/DiskPointPicking.html>
numpy.random.seed(0)
r = numpy.random.rand(m)
alpha = 2 * numpy.pi * numpy.random.rand(m)
interior_pts = numpy.column_stack(
[numpy.sqrt(r) * numpy.cos(alpha), numpy.sqrt(r) * numpy.sin(alpha)]
)
pts = numpy.concatenate([boundary_pts, interior_pts])
tri = Delaunay(pts)
pts = numpy.column_stack([pts[:, 0], pts[:, 1], numpy.zeros(pts.shape[0])])
# Make sure there are exactly `n` boundary points
mesh = MeshTri(pts, tri.simplices)
assert numpy.sum(mesh.is_boundary_node) == n
return pts, tri.simplices
def circle_rotated():
pts, cells = circle_random()
# <https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula>
theta = numpy.pi / 4
k = numpy.array([1.0, 0.0, 0.0])
pts = (
pts * numpy.cos(theta)
+ numpy.cross(k, pts) * numpy.sin(theta)
+ numpy.outer(numpy.einsum("ij,j->i", pts, k), k) * (1.0 - numpy.cos(theta))
)
meshio.write_points_cells("out.vtk", pts, {"triangle": cells})
return pts, cells
| 25.896552 | 93 | 0.461607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,177 | 0.261207 |
c5d42cfeef185ddf6696d8600cebabde18dc035e | 25,439 | py | Python | odym/modules/test/DSM_test_known_results.py | DominikWiedenhofer/ODYM | 89aca3706b34df02d745f5d76cffc9f50dc2c3e7 | [
"MIT"
] | 3 | 2019-04-01T09:35:29.000Z | 2021-01-03T18:51:55.000Z | odym/modules/test/DSM_test_known_results.py | DominikWiedenhofer/ODYM | 89aca3706b34df02d745f5d76cffc9f50dc2c3e7 | [
"MIT"
] | null | null | null | odym/modules/test/DSM_test_known_results.py | DominikWiedenhofer/ODYM | 89aca3706b34df02d745f5d76cffc9f50dc2c3e7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 11 16:19:39 2014
"""
import os
import sys
import imp
# Put location of
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + '\\modules') # add ODYM module directory to system path
#NOTE: Hidden variable __file__ must be know to script for the directory structure to work.
# Therefore: When first using the model, run the entire script with F5 so that the __file__ variable can be created.
import dynamic_stock_model as dsm # remove and import the class manually if this unit test is run as standalone script
imp.reload(dsm)
import numpy as np
import unittest
###############################################################################
"""My Input for fixed lifetime"""
Time_T_FixedLT = np.arange(0,10)
Inflow_T_FixedLT = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lifetime_FixedLT = {'Type': 'Fixed', 'Mean': np.array([5])}
lifetime_FixedLT0 = {'Type': 'Fixed', 'Mean': np.array([0])}
#lifetime_FixedLT = {'Type': 'Fixed', 'Mean': np.array([5,5,5,5,5,5,5,5,5,5])}
lifetime_NormLT = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])}
lifetime_NormLT0 = {'Type': 'Normal', 'Mean': np.array([0]), 'StdDev': np.array([1.5])}
###############################################################################
"""My Output for fixed lifetime"""
Outflow_T_FixedLT = np.array([0, 0, 0, 0, 0, 1, 2, 3, 4, 5])
Outflow_TC_FixedLT = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 5, 0, 0, 0, 0, 0]])
Stock_T_FixedLT = np.array([1, 3, 6, 10, 15, 20, 25, 30, 35, 40])
StockChange_T_FixedLT = np.array([1, 2, 3, 4, 5, 5, 5, 5, 5, 5])
Stock_TC_FixedLT = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 3, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 5, 0, 0, 0, 0, 0],
[0, 2, 3, 4, 5, 6, 0, 0, 0, 0],
[0, 0, 3, 4, 5, 6, 7, 0, 0, 0],
[0, 0, 0, 4, 5, 6, 7, 8, 0, 0],
[0, 0, 0, 0, 5, 6, 7, 8, 9, 0],
[0, 0, 0, 0, 0, 6, 7, 8, 9, 10]])
Bal = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
"""My Output for normally distributed lifetime"""
Stock_TC_NormLT = np.array([[ 9.99570940e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.96169619e-01, 1.99914188e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.77249868e-01, 1.99233924e+00, 2.99871282e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.08788780e-01, 1.95449974e+00, 2.98850886e+00,
3.99828376e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 7.47507462e-01, 1.81757756e+00, 2.93174960e+00,
3.98467848e+00, 4.99785470e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 5.00000000e-01, 1.49501492e+00, 2.72636634e+00,
3.90899947e+00, 4.98084810e+00, 5.99742564e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 2.52492538e-01, 1.00000000e+00, 2.24252239e+00,
3.63515512e+00, 4.88624934e+00, 5.97701772e+00,
6.99699658e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 9.12112197e-02, 5.04985075e-01, 1.50000000e+00,
2.99002985e+00, 4.54394390e+00, 5.86349921e+00,
6.97318734e+00, 7.99656752e+00, 0.00000000e+00,
0.00000000e+00],
[ 2.27501319e-02, 1.82422439e-01, 7.57477613e-01,
2.00000000e+00, 3.73753731e+00, 5.45273268e+00,
6.84074908e+00, 7.96935696e+00, 8.99613846e+00,
0.00000000e+00],
[ 3.83038057e-03, 4.55002639e-02, 2.73633659e-01,
1.00997015e+00, 2.50000000e+00, 4.48504477e+00,
6.36152146e+00, 7.81799894e+00, 8.96552657e+00,
9.99570940e+00]])
Stock_T_NormLT = np.array([ 0.99957094, 2.9953115 , 5.96830193, 9.85008113,
14.4793678 , 19.60865447, 24.99043368, 30.46342411,
35.95916467, 41.45873561])
Outflow_T_NormLT = np.array([ 4.29060333e-04, 4.25944090e-03, 2.70095728e-02,
1.18220793e-01, 3.70713330e-01, 8.70713330e-01,
1.61822079e+00, 2.52700957e+00, 3.50425944e+00,
4.50042906e+00])
Outflow_TC_NormLT = np.array([[ 4.29060333e-04, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 3.40132023e-03, 8.58120666e-04, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 1.89197514e-02, 6.80264047e-03, 1.28718100e-03,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 6.84610878e-02, 3.78395028e-02, 1.02039607e-02,
1.71624133e-03, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 1.61281318e-01, 1.36922176e-01, 5.67592541e-02,
1.36052809e-02, 2.14530167e-03, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 2.47507462e-01, 3.22562636e-01, 2.05383263e-01,
7.56790055e-02, 1.70066012e-02, 2.57436200e-03,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 2.47507462e-01, 4.95014925e-01, 4.83843953e-01,
2.73844351e-01, 9.45987569e-02, 2.04079214e-02,
3.00342233e-03, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00],
[ 1.61281318e-01, 4.95014925e-01, 7.42522387e-01,
6.45125271e-01, 3.42305439e-01, 1.13518508e-01,
2.38092416e-02, 3.43248267e-03, -0.00000000e+00,
-0.00000000e+00],
[ 6.84610878e-02, 3.22562636e-01, 7.42522387e-01,
9.90029850e-01, 8.06406589e-01, 4.10766527e-01,
1.32438260e-01, 2.72105619e-02, 3.86154300e-03,
-0.00000000e+00],
[ 1.89197514e-02, 1.36922176e-01, 4.83843953e-01,
9.90029850e-01, 1.23753731e+00, 9.67687907e-01,
4.79227614e-01, 1.51358011e-01, 3.06118821e-02,
4.29060333e-03]])
StockChange_T_NormLT = np.array([ 0.99957094, 1.99574056, 2.97299043, 3.88177921, 4.62928667,
5.12928667, 5.38177921, 5.47299043, 5.49574056, 5.49957094])
"""My Output for Weibull-distributed lifetime"""
Stock_TC_WeibullLT = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # computed with Excel and taken from there
[0.367879441, 2, 0, 0, 0, 0, 0, 0, 0, 0],
[0.100520187, 0.735758882, 3, 0, 0, 0, 0, 0, 0, 0],
[0.023820879, 0.201040373, 1.103638324, 4, 0, 0, 0, 0, 0, 0],
[0.005102464, 0.047641758, 0.30156056, 1.471517765,5, 0, 0, 0, 0, 0],
[0.001009149, 0.010204929, 0.071462637, 0.402080746,1.839397206, 6, 0, 0, 0, 0],
[0.000186736, 0.002018297, 0.015307393, 0.095283516, 0.502600933, 2.207276647, 7, 0, 0, 0],
[3.26256E-05, 0.000373472, 0.003027446, 0.020409858, 0.119104394, 0.60312112, 2.575156088, 8, 0, 0],
[5.41828E-06, 6.52513E-05, 0.000560208, 0.004036594, 0.025512322, 0.142925273, 0.703641306, 2.943035529, 9, 0],
[8.59762E-07, 1.08366E-05, 9.78769E-05, 0.000746944, 0.005045743, 0.030614786, 0.166746152, 0.804161493, 3.310914971, 10]])
Stock_T_WeibullLT = np.array([1,2.367879441,3.836279069,5.328499576,6.825822547,8.324154666,9.822673522,11.321225,12.8197819,14.31833966])
Outflow_T_WeibullLT = np.array([0,0.632120559,1.531600372,2.507779493,3.502677029,4.50166788,5.501481144,6.501448519,7.5014431,8.501442241])
Outflow_TC_WeibullLT = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0.632120559, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0.267359255, 1.264241118, 0, 0, 0, 0, 0, 0, 0, 0],
[0.076699308, 0.534718509, 1.896361676, 0, 0, 0, 0, 0, 0, 0],
[0.018718414, 0.153398615, 0.802077764, 2.528482235, 0, 0, 0, 0, 0, 0],
[0.004093316, 0.037436829, 0.230097923, 1.069437018, 3.160602794, 0, 0, 0, 0, 0],
[0.000822413, 0.008186632, 0.056155243, 0.306797231, 1.336796273, 3.792723353, 0, 0, 0, 0],
[0.00015411, 0.001644825, 0.012279947, 0.074873658, 0.383496539, 1.604155527, 4.424843912, 0, 0, 0],
[2.72074E-05, 0.000308221, 0.002467238, 0.016373263, 0.093592072, 0.460195846, 1.871514782, 5.056964471, 0, 0],
[4.55852E-06, 5.44147E-05 , 0.000462331 , 0.00328965, 0.020466579, 0.112310487, 0.536895154, 2.138874037, 5.689085029, 0]])
StockChange_T_WeibullLT = np.array([1,1.367879441,1.468399628,1.492220507,1.497322971,1.49833212,1.498518856,1.498551481,1.4985569,1.498557759])
lifetime_WeibullLT = {'Type': 'Weibull', 'Shape': np.array([1.2]), 'Scale': np.array([1])}
InitialStock_WB = np.array([0.01, 0.01, 0.08, 0.2, 0.2, 2, 2, 3, 4, 7.50])
Inflow_WB = np.array([11631.1250671964, 1845.6048709861, 2452.0593141014, 1071.0305279511, 198.1868742385, 391.9674590243, 83.9599583940, 29.8447516023, 10.8731273138, 7.5000000000])
# We need 10 digits AFTER the . to get a 9 digits after the . overlap with np.testing.
# The total number of counting digits is higher, because there are up to 5 digits before the .
# For the stock-driven model with initial stock, colculated with Excel
Sc_InitialStock_2_Ref = np.array([[ 3.29968072, 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 3.28845263, 5.1142035 , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 3.2259967 , 5.09680099, 2.0068288 , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 3. , 5. , 2. , 4. , 0. ,
0. , 0. , 0. , 0. ],
[ 2.46759471, 4.64972578, 1.962015 , 3.98638888, 4.93427563,
0. , 0. , 0. , 0. ],
[ 1.65054855, 3.82454624, 1.82456634, 3.91067739, 4.91748538,
3.8721761 , 0. , 0. , 0. ],
[ 0.83350238, 2.55819937, 1.50076342, 3.63671549, 4.82409004,
3.85899993, 2.78772936, 0. , 0. ],
[ 0.30109709, 1.2918525 , 1.00384511, 2.9913133 , 4.48613916,
3.78570788, 2.77824333, 3.36180162, 0. ],
[ 0.07510039, 0.46667297, 0.5069268 , 2.00085849, 3.68999109,
3.5205007 , 2.72547754, 3.35036215, 3.66410986]])
Sc_InitialStock_2_Ref_Sum = np.array([ 3.29968072, 8.40265614, 10.32962649, 14. ,
18. , 20. , 20. , 20. , 20. ])
Oc_InitialStock_2_Ref = np.array([[ 1.41636982e-03, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 1.12280883e-02, 2.19524375e-03, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 6.24559363e-02, 1.74025106e-02, 8.61420234e-04,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 2.25996698e-01, 9.68009922e-02, 6.82879736e-03,
1.71697802e-03, -0.00000000e+00, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 5.32405289e-01, 3.50274224e-01, 3.79849998e-02,
1.36111209e-02, 2.11801070e-03, -0.00000000e+00,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 8.17046165e-01, 8.25179532e-01, 1.37448656e-01,
7.57114903e-02, 1.67902556e-02, 1.66211031e-03,
-0.00000000e+00, -0.00000000e+00, -0.00000000e+00],
[ 8.17046165e-01, 1.26634687e+00, 3.23802924e-01,
2.73961897e-01, 9.33953405e-02, 1.31761643e-02,
1.19661751e-03, -0.00000000e+00, -0.00000000e+00],
[ 5.32405289e-01, 1.26634687e+00, 4.96918311e-01,
6.45402188e-01, 3.37950879e-01, 7.32920558e-02,
9.48603036e-03, 1.44303487e-03, -0.00000000e+00],
[ 2.25996698e-01, 8.25179532e-01, 4.96918311e-01,
9.90454815e-01, 7.96148072e-01, 2.65207178e-01,
5.27657861e-02, 1.14394721e-02, 1.57279902e-03]])
I_InitialStock_2_Ref = np.array([ 3.30109709, 5.11639875, 2.00769022, 4.00171698, 4.93639364, 3.87383821, 2.78892598, 3.36324466, 3.66568266])
""" Test case with fixed lifetime for initial stock"""
Time_T_FixedLT_X = np.arange(1, 9, 1)
lifetime_FixedLT_X = {'Type': 'Fixed', 'Mean': np.array([5])}
InitialStock_X = np.array([0, 0, 0, 7, 5, 4, 3, 2])
Inflow_X = np.array([0, 0, 0, 7, 5, 4, 3, 2])
Time_T_FixedLT_XX = np.arange(1, 11, 1)
lifetime_NormLT_X = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])}
InitialStock_XX = np.array([0.01, 0.01, 0.08, 0.2, 0.2, 2, 2, 3, 4, 7.50])
Inflow_XX = np.array([ 2.61070664, 0.43955789, 0.87708508, 0.79210262, 0.4,
2.67555857, 2.20073139, 3.06983925, 4.01538044, 7.50321933])
""" Test case with normally distributed lifetime for initial stock and stock-driven model"""
Time_T_FixedLT_2 = np.arange(1, 10, 1)
lifetime_NormLT_2 = {'Type': 'Normal', 'Mean': np.array([5]), 'StdDev': np.array([1.5])}
InitialStock_2 = np.array([3,5,2,4])
FutureStock_2 = np.array([0,0,0,0,18,20,20,20,20])
ThisSwitchTime = 5 # First year with future stock curve, start counting from 1.
Inflow_2 = np.array([3.541625588, 5.227890554,2.01531097,4])
###############################################################################
"""Create Dynamic Stock Models and hand over the pre-defined values."""
# For zero lifetime: border case
myDSM0 = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_FixedLT0)
# For fixed LT
myDSM = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_FixedLT)
myDSM2 = dsm.DynamicStockModel(t=Time_T_FixedLT, s=Stock_T_FixedLT, lt=lifetime_FixedLT)
myDSMx = dsm.DynamicStockModel(t=Time_T_FixedLT_X, lt=lifetime_FixedLT_X)
TestInflow_X = myDSMx.compute_i_from_s(InitialStock=InitialStock_X)
myDSMxy = dsm.DynamicStockModel(t=Time_T_FixedLT_X, i=TestInflow_X, lt=lifetime_FixedLT_X)
# For zero normally distributed lifetime: border case
myDSM0n = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_NormLT0)
# For normally distributed Lt
myDSM3 = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_NormLT)
myDSM4 = dsm.DynamicStockModel(t=Time_T_FixedLT, s=Stock_T_NormLT, lt=lifetime_NormLT)
myDSMX = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, lt=lifetime_NormLT_X)
TestInflow_XX = myDSMX.compute_i_from_s(InitialStock=InitialStock_XX)
myDSMXY = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, i=TestInflow_XX, lt=lifetime_NormLT_X)
# Test compute_stock_driven_model_initialstock:
TestDSM_IntitialStock = dsm.DynamicStockModel(t=Time_T_FixedLT_2, s=FutureStock_2, lt=lifetime_NormLT_2)
Sc_InitialStock_2,Oc_InitialStock_2,I_InitialStock_2 = TestDSM_IntitialStock.compute_stock_driven_model_initialstock(InitialStock = InitialStock_2, SwitchTime = ThisSwitchTime)
# Compute stock back from inflow
TestDSM_IntitialStock_Verify = dsm.DynamicStockModel(t=Time_T_FixedLT_2, i=I_InitialStock_2, lt=lifetime_NormLT_2)
Sc_Stock_2 = TestDSM_IntitialStock_Verify.compute_s_c_inflow_driven()
Sc_Stock_2_Sum = Sc_Stock_2.sum(axis =1)
Sc_Stock_Sum = TestDSM_IntitialStock_Verify.compute_stock_total()
Sc_Outflow_t_c = TestDSM_IntitialStock_Verify.compute_o_c_from_s_c()
# For Weibull-distributed Lt
myDSMWB1 = dsm.DynamicStockModel(t=Time_T_FixedLT, i=Inflow_T_FixedLT, lt=lifetime_WeibullLT)
myDSMWB2 = dsm.DynamicStockModel(t=Time_T_FixedLT, s=Stock_T_WeibullLT, lt=lifetime_WeibullLT)
myDSMWB3 = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, lt=lifetime_WeibullLT)
TestInflow_WB = myDSMWB3.compute_i_from_s(InitialStock=InitialStock_XX)
myDSMWB4 = dsm.DynamicStockModel(t=Time_T_FixedLT_XX, i=TestInflow_WB, lt=lifetime_WeibullLT)
# Compute full stock model in correct order
###############################################################################
"""Unit Test Class"""
class KnownResultsTestCase(unittest.TestCase):
def test_inflow_driven_model_fixedLifetime_0(self):
"""Test Inflow Driven Model with Fixed product lifetime of 0."""
np.testing.assert_array_equal(myDSM0.compute_s_c_inflow_driven(), np.zeros(Stock_TC_FixedLT.shape))
np.testing.assert_array_equal(myDSM0.compute_stock_total(), np.zeros((Stock_TC_FixedLT.shape[0])))
np.testing.assert_array_equal(myDSM0.compute_stock_change(), np.zeros((Stock_TC_FixedLT.shape[0])))
np.testing.assert_array_equal(myDSM0.compute_outflow_mb(), Inflow_T_FixedLT)
np.testing.assert_array_equal(myDSM0.check_stock_balance(), Bal.transpose())
def test_inflow_driven_model_fixedLifetime(self):
"""Test Inflow Driven Model with Fixed product lifetime."""
np.testing.assert_array_equal(myDSM.compute_s_c_inflow_driven(), Stock_TC_FixedLT)
np.testing.assert_array_equal(myDSM.compute_stock_total(),Stock_T_FixedLT)
np.testing.assert_array_equal(myDSM.compute_o_c_from_s_c(), Outflow_TC_FixedLT)
np.testing.assert_array_equal(myDSM.compute_outflow_total(), Outflow_T_FixedLT)
np.testing.assert_array_equal(myDSM.compute_stock_change(), StockChange_T_FixedLT)
np.testing.assert_array_equal(myDSM.check_stock_balance(), Bal.transpose())
def test_stock_driven_model_fixedLifetime(self):
"""Test Stock Driven Model with Fixed product lifetime."""
np.testing.assert_array_equal(myDSM2.compute_stock_driven_model()[0], Stock_TC_FixedLT)
np.testing.assert_array_equal(myDSM2.compute_stock_driven_model()[1], Outflow_TC_FixedLT)
np.testing.assert_array_equal(myDSM2.compute_stock_driven_model()[2], Inflow_T_FixedLT)
np.testing.assert_array_equal(myDSM2.compute_outflow_total(), Outflow_T_FixedLT)
np.testing.assert_array_equal(myDSM2.compute_stock_change(), StockChange_T_FixedLT)
np.testing.assert_array_equal(myDSM2.check_stock_balance(), Bal.transpose())
def test_inflow_driven_model_normallyDistrLifetime_0(self):
"""Test Inflow Driven Model with Fixed product lifetime of 0."""
np.testing.assert_array_equal(myDSM0n.compute_s_c_inflow_driven(), np.zeros(Stock_TC_FixedLT.shape))
np.testing.assert_array_equal(myDSM0n.compute_stock_total(), np.zeros((Stock_TC_FixedLT.shape[0])))
np.testing.assert_array_equal(myDSM0n.compute_stock_change(), np.zeros((Stock_TC_FixedLT.shape[0])))
np.testing.assert_array_equal(myDSM0n.compute_outflow_mb(), Inflow_T_FixedLT)
np.testing.assert_array_equal(myDSM0n.check_stock_balance(), Bal.transpose())
def test_inflow_driven_model_normallyDistLifetime(self):
"""Test Inflow Driven Model with normally distributed product lifetime."""
np.testing.assert_array_almost_equal(myDSM3.compute_s_c_inflow_driven(), Stock_TC_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.compute_stock_total(), Stock_T_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.compute_o_c_from_s_c(), Outflow_TC_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.compute_outflow_total(), Outflow_T_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.compute_stock_change(), StockChange_T_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM3.check_stock_balance(), Bal.transpose(), 12)
def test_stock_driven_model_normallyDistLifetime(self):
"""Test Stock Driven Model with normally distributed product lifetime."""
np.testing.assert_array_almost_equal(
myDSM4.compute_stock_driven_model()[0], Stock_TC_NormLT, 8)
np.testing.assert_array_almost_equal(
myDSM4.compute_stock_driven_model()[1], Outflow_TC_NormLT, 8)
np.testing.assert_array_almost_equal(
myDSM4.compute_stock_driven_model()[2], Inflow_T_FixedLT, 8)
np.testing.assert_array_almost_equal(myDSM4.compute_outflow_total(), Outflow_T_NormLT, 8)
np.testing.assert_array_almost_equal(
myDSM4.compute_stock_change(), StockChange_T_NormLT, 8)
np.testing.assert_array_almost_equal(myDSM4.check_stock_balance(), Bal.transpose(), 12)
def test_inflow_driven_model_WeibullDistLifetime(self):
"""Test Inflow Driven Model with Weibull-distributed product lifetime."""
np.testing.assert_array_almost_equal(
myDSMWB1.compute_s_c_inflow_driven(), Stock_TC_WeibullLT, 9)
np.testing.assert_array_almost_equal(myDSMWB1.compute_stock_total(), Stock_T_WeibullLT, 8)
np.testing.assert_array_almost_equal(myDSMWB1.compute_o_c_from_s_c(), Outflow_TC_WeibullLT, 9)
np.testing.assert_array_almost_equal(myDSMWB1.compute_outflow_total(), Outflow_T_WeibullLT, 9)
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_change(), StockChange_T_WeibullLT, 9)
np.testing.assert_array_almost_equal(myDSMWB1.check_stock_balance(), Bal.transpose(), 12)
def test_stock_driven_model_WeibullDistLifetime(self):
"""Test Stock Driven Model with Weibull-distributed product lifetime."""
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_driven_model()[0], Stock_TC_WeibullLT, 8)
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_driven_model()[1], Outflow_TC_WeibullLT, 8)
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_driven_model()[2], Inflow_T_FixedLT, 8)
np.testing.assert_array_almost_equal(myDSMWB1.compute_outflow_total(), Outflow_T_WeibullLT, 9)
np.testing.assert_array_almost_equal(
myDSMWB1.compute_stock_change(), StockChange_T_WeibullLT, 8)
np.testing.assert_array_almost_equal(myDSMWB1.check_stock_balance(), Bal.transpose(), 12)
def test_inflow_from_stock_fixedLifetime(self):
"""Test computation of inflow from stock with Fixed product lifetime."""
np.testing.assert_array_equal(TestInflow_X, Inflow_X)
np.testing.assert_array_equal(myDSMxy.compute_s_c_inflow_driven()[-1, :], InitialStock_X)
def test_inflow_from_stock_normallyDistLifetime(self):
"""Test computation of inflow from stock with normally distributed product lifetime."""
np.testing.assert_array_almost_equal(TestInflow_XX, Inflow_XX, 8)
np.testing.assert_array_almost_equal(myDSMXY.compute_s_c_inflow_driven()[-1, :], InitialStock_XX, 9)
def test_inflow_from_stock_WeibullDistLifetime(self):
"""Test computation of inflow from stock with Weibull-distributed product lifetime."""
np.testing.assert_array_almost_equal(TestInflow_WB, Inflow_WB, 9)
np.testing.assert_array_almost_equal(myDSMWB4.compute_s_c_inflow_driven()[-1, :], InitialStock_WB, 9)
def test_compute_stock_driven_model_initialstock(self):
"""Test stock-driven model with initial stock given."""
np.testing.assert_array_almost_equal(I_InitialStock_2, I_InitialStock_2_Ref, 8)
np.testing.assert_array_almost_equal(Sc_InitialStock_2, Sc_InitialStock_2_Ref, 8)
np.testing.assert_array_almost_equal(Sc_InitialStock_2.sum(axis =1), Sc_InitialStock_2_Ref_Sum, 8)
np.testing.assert_array_almost_equal(Oc_InitialStock_2, Oc_InitialStock_2_Ref, 8)
if __name__ == '__main__':
unittest.main()
| 59.576112 | 182 | 0.626243 | 7,370 | 0.289713 | 0 | 0 | 0 | 0 | 0 | 0 | 2,908 | 0.114313 |
c5d4fff93384a7b4f91ff88b98ec480412a4e7d8 | 753 | py | Python | tox_docker/tests/util.py | tkdchen/tox-docker | 8d450ecf28a50f39d1a573c876756bddb9d4ae99 | [
"BSD-3-Clause"
] | null | null | null | tox_docker/tests/util.py | tkdchen/tox-docker | 8d450ecf28a50f39d1a573c876756bddb9d4ae99 | [
"BSD-3-Clause"
] | null | null | null | tox_docker/tests/util.py | tkdchen/tox-docker | 8d450ecf28a50f39d1a573c876756bddb9d4ae99 | [
"BSD-3-Clause"
] | null | null | null | import os
from docker.models.containers import Container
import docker
import pytest
from tox_docker.config import runas_name
def find_container(instance_name: str) -> Container:
# TODO: refactor this as a pytest fixture
# this is running in a child-process of the tox instance which
# spawned the container; so we need to pass the parent pid to
# get the right runas_name()
running_name = runas_name(instance_name, pid=os.getppid())
client = docker.from_env(version="auto")
for container in client.containers.list():
container.attrs["Config"].get("Labels", {})
if container.name == running_name:
return container
pytest.fail(f"No running container with instance name {running_name!r}")
| 31.375 | 76 | 0.718459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.36255 |
c5d59ef45ad87f614428ee780a00c2a59f6106c2 | 4,024 | py | Python | equipment/viewsets.py | aschrist/WebServerAndClient | 3aa0af2c444acac88a1b51b4cfd4bb8d0c36e640 | [
"BSD-3-Clause"
] | null | null | null | equipment/viewsets.py | aschrist/WebServerAndClient | 3aa0af2c444acac88a1b51b4cfd4bb8d0c36e640 | [
"BSD-3-Clause"
] | null | null | null | equipment/viewsets.py | aschrist/WebServerAndClient | 3aa0af2c444acac88a1b51b4cfd4bb8d0c36e640 | [
"BSD-3-Clause"
] | null | null | null | from django.core.exceptions import PermissionDenied
from rest_framework import viewsets, mixins
from rest_framework.decorators import action
from rest_framework.response import Response
from emstrack.mixins import UpdateModelUpdateByMixin, BasePermissionMixin
from equipment.models import EquipmentItem, EquipmentHolder, Equipment
from equipment.serializers import EquipmentItemSerializer, EquipmentSerializer
from hospital.viewsets import logger
from login.permissions import get_permissions
class EquipmentItemViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
UpdateModelUpdateByMixin,
viewsets.GenericViewSet):
"""
API endpoint for manipulating hospital equipment.
list:
Retrieve list of hospital equipment.
retrieve:
Retrieve an existing hospital equipment instance.
update:
Update existing hospital equipment instance.
partial_update:
Partially update existing hospital equipment instance.
"""
queryset = EquipmentItem.objects.all()
serializer_class = EquipmentItemSerializer
lookup_field = 'equipment_id'
# make sure both fields are looked up
def get_queryset(self):
# retrieve user
user = self.request.user
# return nothing if anonymous
if user.is_anonymous:
raise PermissionDenied()
# retrieve id
equipmentholder_id = int(self.kwargs['equipmentholder_id'])
logger.debug('kwargs = {}'.format(self.kwargs))
try:
# retrieve equipmentholder
equipmentholder = EquipmentHolder.objects.get(id=equipmentholder_id)
# read or write?
if self.request.method == 'GET':
is_write = False
elif (self.request.method == 'PUT' or
self.request.method == 'PATCH' or
self.request.method == 'DELETE'):
is_write = True
# is hospital?
if equipmentholder.is_hospital():
# check permission (and also existence)
if is_write:
if not get_permissions(user).check_can_write(hospital=equipmentholder.hospital.id):
raise PermissionDenied()
else:
if not get_permissions(user).check_can_read(hospital=equipmentholder.hospital.id):
raise PermissionDenied()
# is ambulance?
elif equipmentholder.is_ambulance():
# check permission (and also existence)
if is_write:
if not get_permissions(user).check_can_write(ambulance=equipmentholder.ambulance.id):
raise PermissionDenied()
else:
if not get_permissions(user).check_can_read(ambulance=equipmentholder.ambulance.id):
raise PermissionDenied()
else:
raise PermissionDenied()
except EquipmentHolder.DoesNotExist as e:
raise PermissionDenied()
# build queryset
filter = {'equipmentholder_id': equipmentholder_id}
return self.queryset.filter(**filter)
class EquipmentViewSet(BasePermissionMixin,
viewsets.GenericViewSet):
"""
API endpoint for manipulating equipment.
metadata
Partially update existing hospital instance.
"""
profile_field = 'equipments'
filter_field = 'id'
queryset = EquipmentHolder.objects.all()
@action(detail=True)
def metadata(self, request, pk=None, **kwargs):
"""
Retrive hospital equipment metadata.
"""
equipmentholder = self.get_object()
equipment_list = equipmentholder.equipmentitem_set.values('equipment')
equipment = Equipment.objects.filter(id__in=equipment_list)
serializer = EquipmentSerializer(equipment, many=True)
return Response(serializer.data)
| 32.715447 | 105 | 0.635189 | 3,523 | 0.875497 | 0 | 0 | 437 | 0.108598 | 0 | 0 | 885 | 0.21993 |
c5d6d1a64a423018703822d97798dfe358235126 | 1,081 | py | Python | HLTrigger/Configuration/python/HLT_75e33/paths/HLT_DoublePFPuppiJets128_DoublePFPuppiBTagDeepCSV_2p4_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTrigger/Configuration/python/HLT_75e33/paths/HLT_DoublePFPuppiJets128_DoublePFPuppiBTagDeepCSV_2p4_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTrigger/Configuration/python/HLT_75e33/paths/HLT_DoublePFPuppiJets128_DoublePFPuppiBTagDeepCSV_2p4_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:16:05.000Z | 2021-11-30T16:16:05.000Z | import FWCore.ParameterSet.Config as cms
from ..modules.hltBTagPFPuppiDeepCSV0p865DoubleEta2p4_cfi import *
from ..modules.hltDoublePFPuppiJets128Eta2p4MaxDeta1p6_cfi import *
from ..modules.hltDoublePFPuppiJets128MaxEta2p4_cfi import *
from ..modules.l1tDoublePFPuppiJet112offMaxEta2p4_cfi import *
from ..modules.l1tDoublePFPuppiJets112offMaxDeta1p6_cfi import *
from ..sequences.HLTAK4PFPuppiJetsReconstruction_cfi import *
from ..sequences.HLTBeginSequence_cfi import *
from ..sequences.HLTBtagDeepCSVSequencePFPuppiModEta2p4_cfi import *
from ..sequences.HLTEndSequence_cfi import *
from ..sequences.HLTParticleFlowSequence_cfi import *
HLT_DoublePFPuppiJets128_DoublePFPuppiBTagDeepCSV_2p4 = cms.Path(
HLTBeginSequence +
l1tDoublePFPuppiJet112offMaxEta2p4 +
l1tDoublePFPuppiJets112offMaxDeta1p6 +
HLTParticleFlowSequence +
HLTAK4PFPuppiJetsReconstruction +
hltDoublePFPuppiJets128MaxEta2p4 +
hltDoublePFPuppiJets128Eta2p4MaxDeta1p6 +
HLTBtagDeepCSVSequencePFPuppiModEta2p4 +
hltBTagPFPuppiDeepCSV0p865DoubleEta2p4 +
HLTEndSequence
)
| 41.576923 | 68 | 0.848289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c5d80087a7524e830b02b5b4d7549aa8f65625d8 | 891 | py | Python | convert_m2.py | Alex92rus/ErrorDetectionProject | e611ceaaaa1e5e18bc9a6ae4a02ed421db058da8 | [
"Apache-2.0"
] | 1 | 2018-11-30T23:50:27.000Z | 2018-11-30T23:50:27.000Z | convert_m2.py | Alex92rus/ErrorDetectionProject | e611ceaaaa1e5e18bc9a6ae4a02ed421db058da8 | [
"Apache-2.0"
] | null | null | null | convert_m2.py | Alex92rus/ErrorDetectionProject | e611ceaaaa1e5e18bc9a6ae4a02ed421db058da8 | [
"Apache-2.0"
] | null | null | null |
def extract_to_m2(filename, annot_triples):
"""
Extracts error detection annotations in m2 file format
Args:
filename: the output m2 file
annot_triples: the annotations of form (sentence, indexes, selections)
"""
with open(filename, 'w+') as m2_file:
for triple in annot_triples:
s_line = 'S ' + triple[0] + '\n'
m2_file.write(s_line)
for i in range(len(triple[1])):
if triple[2][i] == 1:
a_line = 'A '
if isinstance(triple[1][i], int):
a_line += str(triple[1][i]) + ' ' + str(triple[1][i] + 1)
else:
a_line += triple[1][i] + ' ' + triple[1][i]
a_line += '|||IG|||IG|||REQUIRED|||-NONE-|||1\n'
m2_file.write(a_line)
m2_file.write('\n') | 38.73913 | 81 | 0.47587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.289562 |
c5d90016a90a331eb727c3cf478e5adf99c5cfde | 8,986 | py | Python | Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py | VincentWei/mdolphin-core | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | [
"Apache-2.0"
] | 6 | 2017-05-31T01:46:45.000Z | 2018-06-12T10:53:30.000Z | Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py | FMSoftCN/mdolphin-core | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | [
"Apache-2.0"
] | null | null | null | Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py | FMSoftCN/mdolphin-core | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | [
"Apache-2.0"
] | 2 | 2017-07-17T06:02:42.000Z | 2018-09-19T10:08:38.000Z | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.net.layouttestresults import LayoutTestResults
class CommitQueueTaskDelegate(object):
def run_command(self, command):
raise NotImplementedError("subclasses must implement")
def command_passed(self, message, patch):
raise NotImplementedError("subclasses must implement")
def command_failed(self, message, script_error, patch):
raise NotImplementedError("subclasses must implement")
def refetch_patch(self, patch):
raise NotImplementedError("subclasses must implement")
def layout_test_results(self):
raise NotImplementedError("subclasses must implement")
def archive_last_layout_test_results(self, patch):
raise NotImplementedError("subclasses must implement")
# We could make results_archive optional, but for now it's required.
def report_flaky_tests(self, patch, flaky_tests, results_archive):
raise NotImplementedError("subclasses must implement")
class CommitQueueTask(object):
def __init__(self, delegate, patch):
self._delegate = delegate
self._patch = patch
self._script_error = None
def _validate(self):
# Bugs might get closed, or patches might be obsoleted or r-'d while the
# commit-queue is processing.
self._patch = self._delegate.refetch_patch(self._patch)
if self._patch.is_obsolete():
return False
if self._patch.bug().is_closed():
return False
if not self._patch.committer():
return False
if not self._patch.review() != "-":
return False
# Reviewer is not required. Missing reviewers will be caught during
# the ChangeLog check during landing.
return True
def _run_command(self, command, success_message, failure_message):
try:
self._delegate.run_command(command)
self._delegate.command_passed(success_message, patch=self._patch)
return True
except ScriptError, e:
self._script_error = e
self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch)
return False
def _clean(self):
return self._run_command([
"clean",
],
"Cleaned working directory",
"Unable to clean working directory")
def _update(self):
# FIXME: Ideally the status server log message should include which revision we updated to.
return self._run_command([
"update",
],
"Updated working directory",
"Unable to update working directory")
def _apply(self):
return self._run_command([
"apply-attachment",
"--no-update",
"--non-interactive",
self._patch.id(),
],
"Applied patch",
"Patch does not apply")
def _build(self):
return self._run_command([
"build",
"--no-clean",
"--no-update",
"--build-style=both",
],
"Built patch",
"Patch does not build")
def _build_without_patch(self):
return self._run_command([
"build",
"--force-clean",
"--no-update",
"--build-style=both",
],
"Able to build without patch",
"Unable to build without patch")
def _test(self):
return self._run_command([
"build-and-test",
"--no-clean",
"--no-update",
# Notice that we don't pass --build, which means we won't build!
"--test",
"--non-interactive",
],
"Passed tests",
"Patch does not pass tests")
def _build_and_test_without_patch(self):
return self._run_command([
"build-and-test",
"--force-clean",
"--no-update",
"--build",
"--test",
"--non-interactive",
],
"Able to pass tests without patch",
"Unable to pass tests without patch (tree is red?)")
def _failing_results_from_last_run(self):
results = self._delegate.layout_test_results()
if not results:
return [] # Makes callers slighty cleaner to not have to deal with None
return results.failing_test_results()
def _land(self):
# Unclear if this should pass --quiet or not. If --parent-command always does the reporting, then it should.
return self._run_command([
"land-attachment",
"--force-clean",
"--ignore-builders",
"--non-interactive",
"--parent-command=commit-queue",
self._patch.id(),
],
"Landed patch",
"Unable to land patch")
def _report_flaky_tests(self, flaky_test_results, results_archive):
self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive)
def _test_patch(self):
if self._test():
return True
first_results = self._failing_results_from_last_run()
first_failing_tests = [result.filename for result in first_results]
first_results_archive = self._delegate.archive_last_layout_test_results(self._patch)
if self._test():
# Only report flaky tests if we were successful at archiving results.
if first_results_archive:
self._report_flaky_tests(first_results, first_results_archive)
return True
second_results = self._failing_results_from_last_run()
second_failing_tests = [result.filename for result in second_results]
if first_failing_tests != second_failing_tests:
# We could report flaky tests here, but since run-webkit-tests
# is run with --exit-after-N-failures=1, we would need to
# be careful not to report constant failures as flaky due to earlier
# flaky test making them not fail (no results) in one of the runs.
# See https://bugs.webkit.org/show_bug.cgi?id=51272
return False
if self._build_and_test_without_patch():
return self.report_failure() # The error from the previous ._test() run is real, report it.
return False # Tree must be red, just retry later.
def report_failure(self):
if not self._validate():
return False
raise self._script_error
def run(self):
if not self._validate():
return False
if not self._clean():
return False
if not self._update():
return False
if not self._apply():
return self.report_failure()
if not self._patch.is_rollout():
if not self._build():
if not self._build_without_patch():
return False
return self.report_failure()
if not self._test_patch():
return False
# Make sure the patch is still valid before landing (e.g., make sure
# no one has set commit-queue- since we started working on the patch.)
if not self._validate():
return False
# FIXME: We should understand why the land failure occured and retry if possible.
if not self._land():
return self.report_failure()
return True
| 38.076271 | 135 | 0.639217 | 7,325 | 0.815157 | 0 | 0 | 0 | 0 | 0 | 0 | 3,823 | 0.42544 |
c5d9c1458570637eeb6f3871c9d53fd91009b27f | 8,710 | py | Python | tark/transcript/models.py | Ensembl/tark | 8e08a00a6f53f7fbe5954ba70e337d0d1709df52 | [
"Apache-2.0"
] | 5 | 2019-06-21T22:11:46.000Z | 2022-01-17T06:58:56.000Z | tark/transcript/models.py | Ensembl/tark | 8e08a00a6f53f7fbe5954ba70e337d0d1709df52 | [
"Apache-2.0"
] | 12 | 2020-07-28T09:26:09.000Z | 2022-03-25T18:08:11.000Z | tark/transcript/models.py | Ensembl/tark | 8e08a00a6f53f7fbe5954ba70e337d0d1709df52 | [
"Apache-2.0"
] | 6 | 2020-01-08T11:09:59.000Z | 2021-07-27T04:14:32.000Z | """
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from assembly.models import Assembly
from tark.fields import ChecksumField
from sequence.models import Sequence
from gene.models import Gene
from session.models import Session
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class Transcript(models.Model):
MANY2ONE_RELATED = {'SEQUENCE': 'sequence', 'SESSION': 'session', 'ASSEMBLY': 'assembly'}
ONE2MANY_RELATED = {'RELEASE_SET': 'transcript_release_set', 'GENE': 'genes',
'TRANSLATION': "translations", "EXONTRANSCRIPT": "exons"
}
# You'll normally want to ensure that you've set an appropriate related_name argument on the relationship,
# that you can use as the field name.
transcript_id = models.AutoField(primary_key=True)
stable_id = models.CharField(max_length=64)
stable_id_version = models.PositiveIntegerField()
assembly = models.ForeignKey(Assembly, models.DO_NOTHING, blank=True, null=True)
loc_start = models.PositiveIntegerField(blank=True, null=True)
loc_end = models.PositiveIntegerField(blank=True, null=True)
loc_strand = models.IntegerField(blank=True, null=True)
loc_region = models.CharField(max_length=42, blank=True, null=True)
loc_checksum = ChecksumField(unique=True, max_length=20, blank=True, null=True)
exon_set_checksum = ChecksumField(unique=True, max_length=20, blank=True, null=True)
transcript_checksum = ChecksumField(unique=True, max_length=20, blank=True, null=True)
sequence = models.ForeignKey(Sequence, models.DO_NOTHING, db_column='seq_checksum', blank=True, null=True)
session = models.ForeignKey(Session, models.DO_NOTHING, blank=True, null=True)
transcript_release_set = models.ManyToManyField('release.ReleaseSet', through='release.TranscriptReleaseTag',
related_name='transcript_release_set')
biotype = models.CharField(max_length=40, blank=True, null=True)
genes = models.ManyToManyField('gene.Gene', through='transcript.TranscriptGene')
exons = models.ManyToManyField('exon.Exon', through='exon.ExonTranscript')
translations = models.ManyToManyField('translation.Translation', through='translation.TranslationTranscript')
class Meta:
managed = False
db_table = 'transcript'
@classmethod
def fetch_mane_transcript_and_type(cls, transcript_id=None):
transcript = None
source = "Ensembl"
if transcript_id is not None:
transcript = Transcript.objects.get(pk=transcript_id)
if transcript is not None:
try:
source = transcript.transcript_release_set.all()[:1].get().source.shortname
except Exception as e:
logger.error("Exception from get_mane_transcript " + str(e))
if "Ensembl" in source:
raw_sql = "SELECT DISTINCT\
t1.transcript_id, t1.stable_id as ens_stable_id, t1.stable_id_version as ens_stable_id_version,\
relationship_type.shortname as mane_type,\
t2.stable_id as refseq_stable_id, t2.stable_id_version as refseq_stable_id_version \
FROM \
transcript t1 \
JOIN transcript_release_tag trt1 ON t1.transcript_id=trt1.feature_id \
JOIN transcript_release_tag_relationship ON \
trt1.transcript_release_id=transcript_release_tag_relationship.transcript_release_object_id \
JOIN transcript_release_tag trt2 ON \
transcript_release_tag_relationship.transcript_release_subject_id=trt2.transcript_release_id \
JOIN transcript t2 ON trt2.feature_id=t2.transcript_id \
JOIN relationship_type ON \
transcript_release_tag_relationship.relationship_type_id=relationship_type.relationship_type_id"
else:
raw_sql = "SELECT DISTINCT\
t1.transcript_id, t1.stable_id as ens_stable_id, t1.stable_id_version as ens_stable_id_version,\
relationship_type.shortname as mane_type,\
t2.stable_id as refseq_stable_id, t2.stable_id_version as refseq_stable_id_version \
FROM \
transcript t1 \
JOIN transcript_release_tag trt1 ON t1.transcript_id=trt1.feature_id \
JOIN transcript_release_tag_relationship ON \
trt1.transcript_release_id=transcript_release_tag_relationship.transcript_release_subject_id \
JOIN transcript_release_tag trt2 ON \
transcript_release_tag_relationship.transcript_release_object_id=trt2.transcript_release_id \
JOIN transcript t2 ON trt2.feature_id=t2.transcript_id \
JOIN relationship_type ON \
transcript_release_tag_relationship.relationship_type_id=relationship_type.relationship_type_id"
if transcript_id is not None:
raw_sql = raw_sql + " WHERE t1.transcript_id=%s limit 1"
mane_transcripts = Transcript.objects.raw(raw_sql, [transcript_id])
mane_transcript_dict = {}
if mane_transcripts is not None and len(list(mane_transcripts)) > 0:
mane_transcript = mane_transcripts[0]
mane_transcript_dict = {"mane_transcript_stableid":
"{}.{}".format(mane_transcript.refseq_stable_id,
mane_transcript.refseq_stable_id_version),
"mane_transcript_type": mane_transcript.mane_type}
return mane_transcript_dict
else:
raw_sql = "SELECT DISTINCT\
t1.transcript_id, t1.stable_id as ens_stable_id, t1.stable_id_version as ens_stable_id_version,\
relationship_type.shortname as mane_type,\
t2.stable_id as refseq_stable_id, t2.stable_id_version as refseq_stable_id_version,\
gn1.name as ens_gene_name \
FROM \
transcript t1 \
JOIN transcript_release_tag trt1 ON t1.transcript_id=trt1.feature_id \
JOIN transcript_release_tag_relationship ON \
trt1.transcript_release_id=transcript_release_tag_relationship.transcript_release_object_id \
JOIN transcript_release_tag trt2 ON \
transcript_release_tag_relationship.transcript_release_subject_id=trt2.transcript_release_id \
JOIN transcript t2 ON trt2.feature_id=t2.transcript_id \
JOIN relationship_type ON \
transcript_release_tag_relationship.relationship_type_id=relationship_type.relationship_type_id\
JOIN transcript_gene tg1 ON \
t1.transcript_id=tg1.transcript_id \
JOIN gene gene1 ON \
tg1.gene_id=gene1.gene_id \
JOIN gene_names gn1 ON \
gene1.name_id=gn1.external_id \
where gn1.primary_id=1"
mane_transcripts = Transcript.objects.raw(raw_sql)
return mane_transcripts
class TranscriptGene(models.Model):
gene_transcript_id = models.AutoField(primary_key=True)
gene = models.ForeignKey(Gene, models.DO_NOTHING, blank=True, null=True)
transcript = models.ForeignKey(Transcript, models.DO_NOTHING, blank=True, null=True)
session = models.ForeignKey(Session, models.DO_NOTHING, blank=True, null=True)
class Meta:
managed = False
db_table = 'transcript_gene'
unique_together = (('gene', 'transcript'),)
| 54.099379 | 120 | 0.651091 | 7,741 | 0.888749 | 0 | 0 | 5,201 | 0.59713 | 0 | 0 | 5,139 | 0.590011 |
c5da0613cb69180ef2e512685b8a5cf90b29b513 | 2,086 | py | Python | scripts/facebook_account_scraping.py | nvanderperren/social-media-archiving | 54522e08298b062af8cbe1156127cdebfeede30f | [
"MIT"
] | null | null | null | scripts/facebook_account_scraping.py | nvanderperren/social-media-archiving | 54522e08298b062af8cbe1156127cdebfeede30f | [
"MIT"
] | null | null | null | scripts/facebook_account_scraping.py | nvanderperren/social-media-archiving | 54522e08298b062af8cbe1156127cdebfeede30f | [
"MIT"
] | null | null | null | #!usr/bin/env python3
# -*- coding: utf-8 -*-
#
# @author Nastasia Vanderperren
#
# get posts of a fb page, group or account
# returns a json lines files with a line for each post
#
from argparse import ArgumentParser
from datetime import date, datetime
from facebook_scraper import get_posts
from json import dumps, JSONEncoder
class DateTimeEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, (date, datetime)):
return str(o)
else:
return super().default(o)
def write_posts(account, posts):
today = date.today().strftime("%Y%m%d")
with open("{}_{}_facebook.jsonl".format(today, account), 'w') as output_file:
for post in posts:
del post['text']
output_file.write(dumps(post, cls=DateTimeEncoder, ensure_ascii=False))
output_file.write("\n")
output_file.close()
def get_fb_posts(args):
account = args.account
reactions = args.reactions
comments = args.comments
pages = args.pages
cookies = None
if args.cookies:
cookies = args.cookies
if args.group:
posts = get_posts(group=account, cookies=cookies, pages=pages, extra_info=True, options={"comments": comments, "reactors": reactions})
else:
posts = get_posts(account=account, cookies=cookies, pages=pages, extra_info=True, options={"comments": comments, "reactors": reactions})
write_posts(account, posts)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('account', help="name of the account", type=str)
parser.add_argument('--cookies', help="cookie file for getting data of a private account", required=False)
parser.add_argument('--reactions', help="extract likes and so from posts", action='store_true')
parser.add_argument('--comments', help="scrape comments too", action='store_true')
parser.add_argument('--group', help="account is a group", action='store_true')
parser.add_argument('--pages', help="number of pages to scrape", type=int, default=10)
args = parser.parse_args()
get_fb_posts(args) | 38.62963 | 144 | 0.685523 | 183 | 0.087728 | 0 | 0 | 0 | 0 | 0 | 0 | 539 | 0.258389 |
c5da227c6c2f1600816a6c1c38324a392ed24d2b | 548 | py | Python | ked/gui/ked.py | idealtitude/ked | fb4135bffb065cf176a2450af53ab8e3408fdaa9 | [
"MIT"
] | null | null | null | ked/gui/ked.py | idealtitude/ked | fb4135bffb065cf176a2450af53ab8e3408fdaa9 | [
"MIT"
] | null | null | null | ked/gui/ked.py | idealtitude/ked | fb4135bffb065cf176a2450af53ab8e3408fdaa9 | [
"MIT"
] | null | null | null | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk as gtk
class Ked:
def __init__(self, app_path, ufile=None):
glade_layout = f'{app_path}/data/ked_layout.glade'
self.builder = gtk.Builder()
self.builder.add_from_file(glade_layout)
win = self.builder.get_object("KedMain")
win.connect("delete-event", gtk.main_quit)
win.show()
def echo(self, msg):
print(f'Message: {msg}')
def start_ked(app_path, user_file=None):
main = Ked(app_path)
gtk.main()
| 21.92 | 58 | 0.644161 | 381 | 0.695255 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.155109 |
c5dac43a188b9363ed3359b80117310c93a48c15 | 375 | py | Python | patchy/__init__.py | peternara/graph-based-image-classification-gcn | 60e93b47691e960b7f06f7a5dc11191efe881178 | [
"MIT"
] | 44 | 2017-02-26T16:52:48.000Z | 2022-02-17T18:50:02.000Z | patchy/__init__.py | hungerzs/graph-based-image-classification | d44182c6a28b4ab9a691a9cb1ecd4c3b851875a8 | [
"MIT"
] | 2 | 2018-11-14T05:11:25.000Z | 2020-06-23T16:24:41.000Z | patchy/__init__.py | hungerzs/graph-based-image-classification | d44182c6a28b4ab9a691a9cb1ecd4c3b851875a8 | [
"MIT"
] | 13 | 2018-04-26T07:46:35.000Z | 2022-02-28T15:38:53.000Z | from .patchy import PatchySan
from .helper.labeling import labelings,\
scanline,\
betweenness_centrality
from .helper.neighborhood_assembly import neighborhood_assemblies,\
neighborhoods_weights_to_root,\
neighborhoods_grid_spiral
| 37.5 | 73 | 0.538667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c5db8db5817eef82da2ecde3434b6691281fcd17 | 14,862 | py | Python | analysis/202106--uncertainty_vs_flux/compare_images.py | rsiverd/ultracool | cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1 | [
"BSD-2-Clause"
] | null | null | null | analysis/202106--uncertainty_vs_flux/compare_images.py | rsiverd/ultracool | cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1 | [
"BSD-2-Clause"
] | null | null | null | analysis/202106--uncertainty_vs_flux/compare_images.py | rsiverd/ultracool | cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Compare an image file and its associated uncertainty image.
#
# Rob Siverd
# Created: 2021-06-03
# Last modified: 2021-06-03
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Logging setup:
import logging
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
## Current version:
__version__ = "0.0.1"
## Python version-agnostic module reloading:
try:
reload # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
except ImportError:
from imp import reload # Python 3.0 - 3.3
## Modules:
#import argparse
#import shutil
import resource
import signal
#import glob
import gc
import os
import sys
import time
#import vaex
#import calendar
#import ephem
import numpy as np
#from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#import scipy.linalg as sla
#import scipy.signal as ssig
#import scipy.ndimage as ndi
#import scipy.optimize as opti
#import scipy.interpolate as stp
#import scipy.spatial.distance as ssd
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
#import matplotlib.ticker as mt
#import matplotlib._pylab_helpers as hlp
#from matplotlib.colors import LogNorm
#import matplotlib.colors as mplcolors
#import matplotlib.collections as mcoll
#import matplotlib.gridspec as gridspec
#from functools import partial
#from collections import OrderedDict
#from collections.abc import Iterable
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
#import pandas as pd
#import statsmodels.api as sm
#import statsmodels.formula.api as smf
#from statsmodels.regression.quantile_regression import QuantReg
#import PIL.Image as pli
#import seaborn as sns
#import cmocean
import theil_sen as ts
#import window_filter as wf
#import itertools as itt
_have_np_vers = float('.'.join(np.__version__.split('.')[:2]))
##--------------------------------------------------------------------------##
## Disable buffering on stdout/stderr:
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
## Home-brew robust statistics:
try:
import robust_stats
reload(robust_stats)
rs = robust_stats
except ImportError:
logger.error("module robust_stats not found! Install and retry.")
sys.stderr.write("\nError! robust_stats module not found!\n"
"Please install and try again ...\n\n")
sys.exit(1)
## Home-brew KDE:
#try:
# import my_kde
# reload(my_kde)
# mk = my_kde
#except ImportError:
# logger.error("module my_kde not found! Install and retry.")
# sys.stderr.write("\nError! my_kde module not found!\n"
# "Please install and try again ...\n\n")
# sys.exit(1)
## Fast FITS I/O:
#try:
# import fitsio
#except ImportError:
# logger.error("fitsio module not found! Install and retry.")
# sys.stderr.write("\nError: fitsio module not found!\n")
# sys.exit(1)
## Various from astropy:
try:
# import astropy.io.ascii as aia
import astropy.io.fits as pf
# import astropy.io.votable as av
# import astropy.table as apt
# import astropy.time as astt
# import astropy.wcs as awcs
# from astropy import constants as aconst
# from astropy import coordinates as coord
# from astropy import units as uu
except ImportError:
# logger.error("astropy module not found! Install and retry.")
sys.stderr.write("\nError: astropy module not found!\n")
sys.exit(1)
## Star extraction:
#try:
# import easy_sep
# reload(easy_sep)
#except ImportError:
# logger.error("easy_sep module not found! Install and retry.")
# sys.stderr.write("Error: easy_sep module not found!\n\n")
# sys.exit(1)
#pse = easy_sep.EasySEP()
##--------------------------------------------------------------------------##
## Colors for fancy terminal output:
NRED = '\033[0;31m' ; BRED = '\033[1;31m'
NGREEN = '\033[0;32m' ; BGREEN = '\033[1;32m'
NYELLOW = '\033[0;33m' ; BYELLOW = '\033[1;33m'
NBLUE = '\033[0;34m' ; BBLUE = '\033[1;34m'
NMAG = '\033[0;35m' ; BMAG = '\033[1;35m'
NCYAN = '\033[0;36m' ; BCYAN = '\033[1;36m'
NWHITE = '\033[0;37m' ; BWHITE = '\033[1;37m'
ENDC = '\033[0m'
## Suppress colors in cron jobs:
if (os.getenv('FUNCDEF') == '--nocolors'):
NRED = '' ; BRED = ''
NGREEN = '' ; BGREEN = ''
NYELLOW = '' ; BYELLOW = ''
NBLUE = '' ; BBLUE = ''
NMAG = '' ; BMAG = ''
NCYAN = '' ; BCYAN = ''
NWHITE = '' ; BWHITE = ''
ENDC = ''
## Fancy text:
degree_sign = u'\N{DEGREE SIGN}'
## Dividers:
halfdiv = '-' * 40
fulldiv = '-' * 80
##--------------------------------------------------------------------------##
## Save FITS image with clobber (astropy / pyfits):
#def qsave(iname, idata, header=None, padkeys=1000, **kwargs):
# this_func = sys._getframe().f_code.co_name
# parent_func = sys._getframe(1).f_code.co_name
# sys.stderr.write("Writing to '%s' ... " % iname)
# if header:
# while (len(header) < padkeys):
# header.append() # pad header
# if os.path.isfile(iname):
# os.remove(iname)
# pf.writeto(iname, idata, header=header, **kwargs)
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
## Save FITS image with clobber (fitsio):
#def qsave(iname, idata, header=None, **kwargs):
# this_func = sys._getframe().f_code.co_name
# parent_func = sys._getframe(1).f_code.co_name
# sys.stderr.write("Writing to '%s' ... " % iname)
# #if os.path.isfile(iname):
# # os.remove(iname)
# fitsio.write(iname, idata, clobber=True, header=header, **kwargs)
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
def ldmap(things):
return dict(zip(things, range(len(things))))
def argnear(vec, val):
return (np.abs(vec - val)).argmin()
##--------------------------------------------------------------------------##
## New-style string formatting (more at https://pyformat.info/):
#oldway = '%s %s' % ('one', 'two')
#newway = '{} {}'.format('one', 'two')
#oldway = '%d %d' % (1, 2)
#newway = '{} {}'.format(1, 2)
# With padding:
#oldway = '%10s' % ('test',) # right-justified
#newway = '{:>10}'.format('test') # right-justified
#oldway = '%-10s' % ('test',) # left-justified
#newway = '{:10}'.format('test') # left-justified
# Ordinally:
#newway = '{1} {0}'.format('one', 'two') # prints "two one"
# Dictionarily:
#newway = '{lastname}, {firstname}'.format(firstname='Rob', lastname='Siverd')
# Centered (new-only):
#newctr = '{:^10}'.format('test') # prints " test "
# Numbers:
#oldway = '%06.2f' % (3.141592653589793,)
#newway = '{:06.2f}'.format(3.141592653589793)
##--------------------------------------------------------------------------##
## Quick ASCII I/O:
#data_file = 'data.txt'
#gftkw = {'encoding':None} if (_have_np_vers >= 1.14) else {}
#gftkw.update({'names':True, 'autostrip':True})
#gftkw.update({'delimiter':'|', 'comments':'%0%0%0%0'})
#gftkw.update({'loose':True, 'invalid_raise':False})
#all_data = np.genfromtxt(data_file, dtype=None, **gftkw)
#all_data = aia.read(data_file)
#all_data = pd.read_csv(data_file)
#all_data = pd.read_table(data_file, delim_whitespace=True)
#all_data = pd.read_table(data_file, skipinitialspace=True)
#all_data = pd.read_table(data_file, sep='|')
#fields = all_data.dtype.names
#if not fields:
# x = all_data[:, 0]
# y = all_data[:, 1]
#else:
# x = all_data[fields[0]]
# y = all_data[fields[1]]
#vot_file = 'neato.xml'
#vot_data = av.parse_single_table(vot_file)
#vot_data = av.parse_single_table(vot_file).to_table()
##--------------------------------------------------------------------------##
## Quick FITS I/O:
ifile = 'SPITZER_I2_44772864_0004_0000_2_cbcd.fits'
ufile = 'SPITZER_I2_44772864_0004_0000_2_cbunc.fits'
idata, ihdrs = pf.getdata(ifile, header=True)
udata, uhdrs = pf.getdata(ufile, header=True)
gain = ihdrs['GAIN']
exptime = ihdrs['EXPTIME']
fluxconv = ihdrs['FLUXCONV']
ignore = np.isnan(idata) | np.isnan(udata)
isafe = idata[~ignore]
usafe = udata[~ignore]
ignore = (isafe <= 0.0)
iclean = isafe[~ignore]
uclean = usafe[~ignore]
ui_ratio = uclean / iclean
## Try to reproduce the idata:udata relationship ...
icounts = iclean / fluxconv * exptime * gain # in electrons
ucounts = uclean / fluxconv * exptime * gain # in electrons
#icounts -= np.median(icounts)
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
## Estimate icounts:ucounts relationship from bright pixels:
cutoff = 1e3
bright = (icounts >= cutoff)
ic_fit = icounts[bright]
uc_fit = ucounts[bright]
vc_fit = uc_fit**2
sys.stderr.write("Fitting variance(counts) for bright pixels ... ")
model = ts.linefit(ic_fit, vc_fit)
sys.stderr.write("done.\n")
#model = np.array([375., 1.05])
## A line for plotting:
pcounts = np.linspace(0.1, 3e4, 1000)
pcounts = np.logspace(-1.0, 4.5, 1000)
pvarian = model[0] + model[1] * pcounts
##--------------------------------------------------------------------------##
## Theil-Sen line-fitting (linear):
#model = ts.linefit(xvals, yvals)
#icept, slope = ts.linefit(xvals, yvals)
## Theil-Sen line-fitting (loglog):
#xvals, yvals = np.log10(original_xvals), np.log10(original_yvals)
#xvals, yvals = np.log10(df['x'].values), np.log10(df['y'].values)
#llmodel = ts.linefit(np.log10(xvals), np.log10(yvals))
#icept, slope = ts.linefit(xvals, yvals)
#fit_exponent = slope
#fit_multiplier = 10**icept
#bestfit_x = np.arange(5000)
#bestfit_y = fit_multiplier * bestfit_x**fit_exponent
## Log-log evaluator:
#def loglog_eval(xvals, model):
# icept, slope = model
# return 10**icept * xvals**slope
#def loglog_eval(xvals, icept, slope):
# return 10**icept * xvals**slope
##--------------------------------------------------------------------------##
## Plot config:
# gridspec examples:
# https://matplotlib.org/users/gridspec.html
#gs1 = gridspec.GridSpec(4, 4)
#gs1.update(wspace=0.025, hspace=0.05) # set axis spacing
#ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=3) # top-left + center + right
#ax2 = plt.subplot2grid((3, 3), (1, 0), colspan=2) # mid-left + mid-center
#ax3 = plt.subplot2grid((3, 3), (1, 2), rowspan=2) # mid-right + bot-right
#ax4 = plt.subplot2grid((3, 3), (2, 0)) # bot-left
#ax5 = plt.subplot2grid((3, 3), (2, 1)) # bot-center
##--------------------------------------------------------------------------##
#plt.style.use('bmh') # Bayesian Methods for Hackers style
fig_dims = (12, 10)
fig = plt.figure(1, figsize=fig_dims)
plt.gcf().clf()
#fig, axs = plt.subplots(2, 2, sharex=True, figsize=fig_dims, num=1)
# sharex='col' | sharex='row'
#fig.frameon = False # disable figure frame drawing
#fig.subplots_adjust(left=0.07, right=0.95)
#ax1 = plt.subplot(gs[0, 0])
ax1 = fig.add_subplot(111)
#ax1 = fig.add_axes([0, 0, 1, 1])
#ax1.patch.set_facecolor((0.8, 0.8, 0.8))
#ax1.grid(True)
#ax1.axis('off')
ax1.grid(True)
#ax1.scatter(iclean, uclean, lw=0, s=5)
ax1.scatter(icounts, ucounts**2, lw=0, s=5)
ax1.plot(pcounts, pvarian, c='r')
ax1.set_yscale('log')
ax1.set_xscale('log')
plot_name = 'gain_log.png'
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
fig.savefig(plot_name, bbox_inches='tight')
ax1.set_xscale('linear')
ax1.set_yscale('linear')
plot_name = 'gain_lin.png'
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
fig.savefig(plot_name, bbox_inches='tight')
## Disable axis offsets:
#ax1.xaxis.get_major_formatter().set_useOffset(False)
#ax1.yaxis.get_major_formatter().set_useOffset(False)
#ax1.plot(kde_pnts, kde_vals)
#blurb = "some text"
#ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes)
#ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes,
# va='top', ha='left', bbox=dict(facecolor='white', pad=10.0))
# fontdict={'family':'monospace'}) # fixed-width
#colors = cm.rainbow(np.linspace(0, 1, len(plot_list)))
#for camid, c in zip(plot_list, colors):
# cam_data = subsets[camid]
# xvalue = cam_data['CCDATEMP']
# yvalue = cam_data['PIX_MED']
# yvalue = cam_data['IMEAN']
# ax1.scatter(xvalue, yvalue, color=c, lw=0, label=camid)
#mtickpos = [2,5,7]
#ndecades = 1.0 # for symlog, set width of linear portion in units of dex
#nonposx='mask' | nonposx='clip' | nonposy='mask' | nonposy='clip'
#ax1.set_xscale('log', basex=10, nonposx='mask', subsx=mtickpos)
#ax1.set_xscale('log', nonposx='clip', subsx=[3])
#ax1.set_yscale('symlog', basey=10, linthreshy=0.1, linscaley=ndecades)
#ax1.xaxis.set_major_formatter(formatter) # re-format x ticks
#ax1.set_ylim(ax1.get_ylim()[::-1])
#ax1.set_xlabel('whatever', labelpad=30) # push X label down
#ax1.set_xticks([1.0, 3.0, 10.0, 30.0, 100.0])
#ax1.set_xticks([1, 2, 3], ['Jan', 'Feb', 'Mar'])
#for label in ax1.get_xticklabels():
# label.set_rotation(30)
# label.set_fontsize(14)
#ax1.xaxis.label.set_fontsize(18)
#ax1.yaxis.label.set_fontsize(18)
#ax1.set_xlim(nice_limits(xvec, pctiles=[1,99], pad=1.2))
#ax1.set_ylim(nice_limits(yvec, pctiles=[1,99], pad=1.2))
#spts = ax1.scatter(x, y, lw=0, s=5)
##cbar = fig.colorbar(spts, orientation='vertical') # old way
#cbnorm = mplcolors.Normalize(*spts.get_clim())
#scm = plt.cm.ScalarMappable(norm=cbnorm, cmap=spts.cmap)
#scm.set_array([])
#cbar = fig.colorbar(scm, orientation='vertical')
#cbar = fig.colorbar(scm, ticks=cs.levels, orientation='vertical') # contours
#cbar.formatter.set_useOffset(False)
#cbar.update_ticks()
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
#fig.savefig(plot_name, bbox_inches='tight')
######################################################################
# CHANGELOG (compare_images.py):
#---------------------------------------------------------------------
#
# 2021-06-03:
# -- Increased __version__ to 0.0.1.
# -- First created compare_images.py.
#
| 32.379085 | 78 | 0.603485 | 241 | 0.016216 | 0 | 0 | 0 | 0 | 0 | 0 | 11,592 | 0.779976 |
c5dbec713bc194a6916e548695dd8b86bc98a04d | 3,317 | py | Python | tests/utils/mock_server.py | ant-lastline/cb-lastline-connector | ccdf2c7e33dc94acb382fb4694b9a65b44142b26 | [
"MIT"
] | 2 | 2017-01-23T22:09:23.000Z | 2018-07-16T04:37:41.000Z | tests/utils/mock_server.py | ant-lastline/cb-lastline-connector | ccdf2c7e33dc94acb382fb4694b9a65b44142b26 | [
"MIT"
] | null | null | null | tests/utils/mock_server.py | ant-lastline/cb-lastline-connector | ccdf2c7e33dc94acb382fb4694b9a65b44142b26 | [
"MIT"
] | 1 | 2016-08-16T23:45:29.000Z | 2016-08-16T23:45:29.000Z | import logging
import os
try:
import simplejson as json
except ImportError:
import json
from flask import Flask, request, make_response, Response
from cStringIO import StringIO
import zipfile
def get_mocked_server(binary_directory):
mocked_cb_server = Flask('cb')
files = os.listdir(binary_directory)
@mocked_cb_server.route('/api/v1/binary', methods=['GET', 'POST'])
def binary_search_endpoint():
if request.method == 'GET':
query_string = request.args.get('q', '')
rows = int(request.args.get('rows', 10))
start = int(request.args.get('start', 0))
elif request.method == 'POST':
parsed_data = json.loads(request.data)
if 'q' in parsed_data:
query_string = parsed_data['q']
else:
query_string = ''
if 'rows' in parsed_data:
rows = int(parsed_data['rows'])
else:
rows = 10
if 'start' in parsed_data:
start = int(parsed_data['start'])
else:
start = 0
else:
return make_response('Invalid Request', 500)
return Response(response=json.dumps(binary_search(query_string, rows, start)),
mimetype='application/json')
def binary_search(q, rows, start):
return {
'results':
[json.load(open(os.path.join(binary_directory, fn), 'r')) for fn in files[start:start+rows]],
'terms': '',
'total_results': len(files),
'start': start,
'elapsed': 0.1,
'highlights': [],
'facets': {}
}
@mocked_cb_server.route('/api/v1/binary/<md5sum>/summary')
def get_binary_summary(md5sum):
filepath = os.path.join(binary_directory, '%s.json' % md5sum.lower())
if not os.path.exists(filepath):
return Response("File not found", 404)
binary_data = open(filepath, 'r').read()
return Response(response=binary_data, mimetype='application/json')
@mocked_cb_server.route('/api/v1/binary/<md5sum>')
def get_binary(md5sum):
metadata_filepath = os.path.join(binary_directory, '%s.json' % md5sum.lower())
content_filepath = os.path.join(binary_directory, '%s' % md5sum.lower())
for filepath in [metadata_filepath, content_filepath]:
if not os.path.exists(filepath):
return Response("File not found", 404)
zipfile_contents = StringIO()
zf = zipfile.ZipFile(zipfile_contents, 'w', zipfile.ZIP_DEFLATED, False)
zf.writestr('filedata', open(content_filepath, 'r').read())
zf.writestr('metadata', open(metadata_filepath, 'r').read())
zf.close()
return Response(response=zipfile_contents.getvalue(), mimetype='application/zip')
@mocked_cb_server.route('/api/info')
def info():
return Response(response=json.dumps({"version": "5.1.0"}), mimetype='application/json')
return mocked_cb_server
if __name__ == '__main__':
mydir = os.path.dirname(os.path.abspath(__file__))
binaries_dir = os.path.join(mydir, '..', 'data', 'binary_data')
mock_server = get_mocked_server(binaries_dir)
mock_server.run('127.0.0.1', 7982, debug=True) | 34.195876 | 109 | 0.601447 | 0 | 0 | 0 | 0 | 2,312 | 0.697015 | 0 | 0 | 469 | 0.141393 |
c5dde82f6e5df42892863767c657cfd603744b98 | 1,048 | py | Python | neural_networks/softmax_loss.py | Yao-Shao/Maching-Learning-only-with-Numpy | 8e86f96d8be278b97b16d6f0235cd86dca97db29 | [
"MIT"
] | 1 | 2019-09-25T06:36:26.000Z | 2019-09-25T06:36:26.000Z | hw3/hw3_code/neural_networks/softmax_loss.py | Junlin-Yin/Data-Mining-Homework | 043f836e3dd30f32b5b06f40af61ae55b9287fbc | [
"MIT"
] | null | null | null | hw3/hw3_code/neural_networks/softmax_loss.py | Junlin-Yin/Data-Mining-Homework | 043f836e3dd30f32b5b06f40af61ae55b9287fbc | [
"MIT"
] | null | null | null | import numpy as np
def softmax_loss(in_, label):
'''
The softmax loss computing process
inputs:
in_ : the output of previous layer, shape: [number of images, number of kinds of labels]
label : the ground true of these images, shape: [1, number of images]
outputs
loss : the average loss, scale variable
accuracy: the accuracy of the classification
sentivity : the sentivity for in, shape: [number of images, number of kinds of labels]
'''
n, k = in_.shape
in_ = in_ - np.tile(np.max(in_, axis=1, keepdims=True), (1, k))
h = np.exp(in_)
total = np.sum(h, axis=1, keepdims=True)
probs = h / np.tile(total, k)
idx = (np.arange(n), label.flatten() - 1)
loss = -np.sum(np.log(probs[idx])) / n
max_idx = np.argmax(probs, axis=1)
accuracy = np.sum(max_idx == (label - 1).flatten()) / n
sensitivity = np.zeros((n, k))
sensitivity[idx] = -1
sensitivity = sensitivity + probs
return loss, accuracy, sensitivity
| 34.933333 | 102 | 0.60687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.451336 |
c5de930741f4d8551e11905355564ca78fd72a62 | 320 | py | Python | alertmanager_telegram/config.py | medeirosjrm/alertmanager-telegram | ff9701936ec766b7992399fd741d8a8a4dab3957 | [
"Apache-2.0"
] | 5 | 2020-05-20T11:37:37.000Z | 2021-11-23T09:04:14.000Z | alertmanager_telegram/config.py | medeirosjrm/alertmanager-telegram | ff9701936ec766b7992399fd741d8a8a4dab3957 | [
"Apache-2.0"
] | null | null | null | alertmanager_telegram/config.py | medeirosjrm/alertmanager-telegram | ff9701936ec766b7992399fd741d8a8a4dab3957 | [
"Apache-2.0"
] | 3 | 2021-01-31T17:57:08.000Z | 2021-11-24T13:33:31.000Z | import os
TELEGRAM_CHAT_ID = os.environ.get("TELEGRAM_CHAT_ID")
if not TELEGRAM_CHAT_ID:
raise ValueError("No TELEGRAM_CHAT_ID set for application")
TELEGRAM_TOKEN = os.environ.get("TELEGRAM_TOKEN")
if not TELEGRAM_TOKEN:
raise ValueError("No TELEGRAM_TOKEN set for application")
TEMPLATES_AUTO_RELOAD = True
| 26.666667 | 63 | 0.796875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.35625 |
c5df387f75013ac455f39f6b741fd2c666ef73e0 | 9,935 | py | Python | data-processing/tests/test_parse_tex.py | rishamsidhu/scholar-reader | fa97cd5fb57305ff96ac060e225d1c331da31654 | [
"Apache-2.0"
] | null | null | null | data-processing/tests/test_parse_tex.py | rishamsidhu/scholar-reader | fa97cd5fb57305ff96ac060e225d1c331da31654 | [
"Apache-2.0"
] | null | null | null | data-processing/tests/test_parse_tex.py | rishamsidhu/scholar-reader | fa97cd5fb57305ff96ac060e225d1c331da31654 | [
"Apache-2.0"
] | null | null | null | from common.parse_tex import (
BeginDocumentExtractor,
BibitemExtractor,
DocumentclassExtractor,
EquationExtractor,
MacroExtractor,
PlaintextExtractor,
)
from common.types import MacroDefinition
from entities.sentences.extractor import SentenceExtractor
def test_extract_plaintext_with_newlines():
extractor = PlaintextExtractor()
plaintext_segments = list(
extractor.parse(
"main.tex",
"This sentence is followed by a newline.\nThis is the second sentence.",
)
)
# Earlier versions of the plaintext extractor inadvertently removed newlines, which are needed
# to accurately perform downstream tasks like sentence boundary detection. This test makes sure
# that the newlines are preserved.
plaintext = "".join([segment.text for segment in plaintext_segments])
assert (
plaintext
== "This sentence is followed by a newline.\nThis is the second sentence."
)
def test_extract_sentences():
extractor = SentenceExtractor()
sentences = list(
extractor.parse(
"main.tex",
"This is the first \\macro[arg]{sentence}. This is the second sentence.",
)
)
assert len(sentences) == 2
sentence1 = sentences[0]
assert sentence1.start == 0
assert sentence1.end == 40
assert sentences[0].text == "This is the first argsentence."
sentence2 = sentences[1]
assert sentence2.start == 41
assert sentence2.end == 69
assert sentences[1].text == "This is the second sentence."
def test_ignore_periods_in_equations():
extractor = SentenceExtractor()
sentences = list(
extractor.parse("main.tex", "This sentence has an $ equation. In $ the middle.")
)
assert len(sentences) == 1
assert sentences[0].text == "This sentence has an [[math]] the middle."
def test_extract_equation_from_dollar_sign():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "$x + y$"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.content_start == 1
assert equation.end == 7
assert equation.content_tex == "x + y"
assert equation.tex == "$x + y$"
def test_extract_equation_from_equation_environment():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\begin{equation}x\\end{equation}"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.content_start == 16
assert equation.end == 31
assert equation.content_tex == "x"
assert equation.tex == "\\begin{equation}x\\end{equation}"
def test_extract_equation_from_star_environment():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\begin{equation*}x\\end{equation*}"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.end == 33
def test_extract_equation_environment_with_argument():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\begin{array}{c}x\\end{array}"))
assert len(equations) == 1
equation = equations[0]
assert equation.content_start == 16
def test_extract_equation_from_double_dollar_signs():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "$$x$$"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.end == 5
def test_dont_extract_equation_from_command_argument_brackets():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\documentclass[11pt]{article}"))
assert len(equations) == 0
def test_extract_equation_from_brackets():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\[x + y\\]"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.content_start == 2
assert equation.end == 9
def test_extract_nested_equations():
extractor = EquationExtractor()
equations = list(
extractor.parse("main.tex", "$x + \\hbox{\\begin{equation}y\\end{equation}}$")
)
assert len(equations) == 2
outer = next(filter(lambda e: e.start == 0, equations))
assert outer.end == 44
inner = next(filter(lambda e: e.start == 11, equations))
assert inner.end == 42
def test_handle_unclosed_environments():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "$x + \\hbox{\\begin{equation}y}$"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.end == 30
def test_ignore_escaped_dollar_sign():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\$\\$"))
assert len(equations) == 0
def test_extract_begindocument():
extractor = BeginDocumentExtractor()
tex = "\\RequirePackage[hyperindex]{hyperref}\n\\begin{document}"
begindocument = extractor.parse(tex)
assert begindocument.start == 38
assert begindocument.end == 54
def test_extract_documentclass_after_comment_ending_with_whitespace():
extractor = DocumentclassExtractor()
tex = "\n\n%\\documentclass{IEEEtran} \n\\documentclass{article}"
documentclass = extractor.parse(tex)
assert documentclass is not None
def test_documentclass_after_macro():
# In some TeX files, the documentclass isn't declared until after some initial macros.
# We still want to detect the documentclass in these documents.
extractor = DocumentclassExtractor()
tex = "\\def\year{2020}\n\\documentclass{article}"
documentclass = extractor.parse(tex)
assert documentclass is not None
def test_extract_bibitems():
tex = "\n".join(
[
"\\bibitem[label]{key1}",
"token1",
"\\newblock \\emph{token2}",
"\\newblock token3",
"\\bibitem[label]{key2}",
"token4",
"\\newblock \\emph{token5}",
]
)
extractor = BibitemExtractor()
bibitems = list(extractor.parse(tex))
assert len(bibitems) == 2
assert bibitems[0].key == "key1"
assert bibitems[0].text == "token1 token2 token3"
assert bibitems[1].key == "key2"
assert bibitems[1].text == "token4 token5"
def test_extract_bibitem_tokens_from_curly_braces():
tex = "\n".join(["\\bibitem[label]{key1}", "token1 {token2} {token3}",])
extractor = BibitemExtractor()
bibitems = list(extractor.parse(tex))
assert len(bibitems) == 1
assert bibitems[0].key == "key1"
assert bibitems[0].text == "token1 token2 token3"
def test_extract_bibitems_from_environment():
tex = "\n".join(
[
"\\begin{thebibliography}",
"\\bibitem[label]{key1}",
"token1",
"\\end{thebibliography}",
]
)
extractor = BibitemExtractor()
bibitems = list(extractor.parse(tex))
assert len(bibitems) == 1
assert bibitems[0].key == "key1"
assert bibitems[0].text == "token1"
def test_extract_bibitem_stop_at_newline():
tex = "\n".join(
["\\bibitem[label]{key1}", "token1", "", "text after bibliography (to ignore)"]
)
extractor = BibitemExtractor()
bibitems = list(extractor.parse(tex))
assert len(bibitems) == 1
assert bibitems[0].key == "key1"
assert bibitems[0].text == "token1"
def test_extract_macro():
tex = "\\macro"
extractor = MacroExtractor()
macros = list(extractor.parse(tex, MacroDefinition("macro", "")))
assert len(macros) == 1
assert macros[0].start == 0
assert macros[0].end == 6
def test_extract_macro_with_delimited_parameter():
tex = "\\macro arg."
extractor = MacroExtractor()
macros = list(extractor.parse(tex, MacroDefinition("macro", "#1.")))
assert len(macros) == 1
assert macros[0].start == 0
assert macros[0].end == 11
assert macros[0].tex == "\\macro arg."
def test_extract_macro_with_undelimited_parameter():
# the scanner for undelimited parameter '#1' should match the first non-blank token 'a'.
tex = "\\macro a"
extractor = MacroExtractor()
macros = list(extractor.parse(tex, MacroDefinition("macro", "#1")))
assert len(macros) == 1
assert macros[0].start == 0
assert macros[0].end == 9
assert macros[0].tex == "\\macro a"
def test_extract_macro_balance_nested_braces_for_argument():
tex = "\\macro{{nested}}"
extractor = MacroExtractor()
macros = list(extractor.parse(tex, MacroDefinition("macro", "#1")))
assert len(macros) == 1
assert macros[0].start == 0
assert macros[0].end == 16
assert macros[0].tex == "\\macro{{nested}}"
def test_sentence_splitting_end_points():
extractor = SentenceExtractor()
sentences = list(
extractor.parse(
"main.tex",
"This is a sentence. Next we describe two items. 1) The first item. 2) The second item.",
)
)
assert len(sentences) == 4
sentence_end_points = [[0, 19], [20, 47], [48, 66], [67, 86]]
for i, [start, end] in enumerate(sentence_end_points):
assert sentences[i].start == start
assert sentences[i].end == end
def test_sentence_splitting_end_points_and_more_text():
extractor = SentenceExtractor()
sentences = list(
extractor.parse(
"main.tex",
"This sentence. has extra. text. 1. first 2. second 3. third. And some extra. stuff.",
)
)
assert len(sentences) == 8
sentence_end_points = [
[0, 14],
[15, 25],
[26, 31],
[32, 40],
[41, 50],
[51, 60],
[61, 76],
[77, 83],
]
for i, [start, end] in enumerate(sentence_end_points):
assert sentences[i].start == start
assert sentences[i].end == end
| 31.046875 | 101 | 0.650025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,233 | 0.224761 |
c5e00eaee5bf1dc4f3ef2430a8612b17e78c2729 | 282 | py | Python | appexemple/__main__.py | yoannmos/Inupdater-AppExemple | ac62430a8ca02505eb73afaff698500b7f46ea31 | [
"MIT"
] | null | null | null | appexemple/__main__.py | yoannmos/Inupdater-AppExemple | ac62430a8ca02505eb73afaff698500b7f46ea31 | [
"MIT"
] | null | null | null | appexemple/__main__.py | yoannmos/Inupdater-AppExemple | ac62430a8ca02505eb73afaff698500b7f46ea31 | [
"MIT"
] | null | null | null | import sys
from pathlib import Path
from appexemple import __version__
print(
f"""
Hello you are in App Exemple version {__version__}\n
sys.argv[-1] : {sys.argv[-1]}\n
Path().cwd() : {Path().cwd()}\n
Path(__file__) : {Path(__file__)},\n
"""
)
input("Press [Enter] to quit.")
| 17.625 | 52 | 0.663121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.659574 |
c5e3c6d2f8a1234b127fb72e14337b67358d7125 | 1,793 | py | Python | {{cookiecutter.repo_slug}}/tests/unit/user/test_managers.py | ikhomutov/cookiecutter-shmango | ec5a6ff94256650552e11992662a617b503418e4 | [
"MIT"
] | null | null | null | {{cookiecutter.repo_slug}}/tests/unit/user/test_managers.py | ikhomutov/cookiecutter-shmango | ec5a6ff94256650552e11992662a617b503418e4 | [
"MIT"
] | 83 | 2020-04-21T04:12:47.000Z | 2021-09-03T01:47:27.000Z | {{cookiecutter.repo_slug}}/tests/unit/user/test_managers.py | ikhomutov/cookiecutter-shmango | ec5a6ff94256650552e11992662a617b503418e4 | [
"MIT"
] | null | null | null | import pytest
pytestmark = pytest.mark.django_db
class TestUserManagers:
def test_create_user(self, django_user_model, faker):
email = faker.email()
password = faker.password()
user = django_user_model.objects.create_user(email, password)
assert user.email == email
assert user.check_password(password)
def test_create_user_empty_password(self, django_user_model, faker):
email = faker.email()
user = django_user_model.objects.create_user(email)
assert user.email == email
assert not user.has_usable_password()
def test_create_user_raises_error_on_empty_email(self, django_user_model):
with pytest.raises(ValueError):
django_user_model.objects.create_user(email='')
def test_create_superuser(self, django_user_model, faker):
email = faker.email()
password = faker.password()
user = django_user_model.objects.create_superuser(email, password)
assert user.email == email
assert user.check_password(password)
assert user.is_superuser
assert user.is_staff
def test_create_superuser_raises_error_on_false_is_superuser(
self, django_user_model, faker
):
with pytest.raises(ValueError):
django_user_model.objects.create_superuser(
email=faker.email(),
password=faker.password(),
is_superuser=False,
)
def test_create_superuser_raises_error_on_false_is_staff(
self, django_user_model, faker
):
with pytest.raises(ValueError):
django_user_model.objects.create_superuser(
email=faker.email(),
password=faker.password(),
is_staff=False,
)
| 33.830189 | 78 | 0.662019 | 1,740 | 0.970441 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.001115 |
c5e54fc9fa5630a02a90b1f99c8ec0d552707d95 | 6,021 | py | Python | sqlalchemy/sqlalchemy-0.3.6+codebay/test/base/dependency.py | nakedible/vpnease-l2tp | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | [
"WTFPL"
] | 5 | 2015-04-16T08:36:17.000Z | 2017-05-12T17:20:12.000Z | sqlalchemy/sqlalchemy-0.3.6+codebay/test/base/dependency.py | nakedible/vpnease-l2tp | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | [
"WTFPL"
] | null | null | null | sqlalchemy/sqlalchemy-0.3.6+codebay/test/base/dependency.py | nakedible/vpnease-l2tp | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | [
"WTFPL"
] | 4 | 2015-03-19T14:39:51.000Z | 2019-01-23T08:22:55.000Z | from testbase import PersistTest
import sqlalchemy.topological as topological
import unittest, sys, os
from sqlalchemy import util
# TODO: need assertion conditions in this suite
class DependencySorter(topological.QueueDependencySorter):pass
class DependencySortTest(PersistTest):
def assert_sort(self, tuples, node, collection=None):
print str(node)
def assert_tuple(tuple, node):
if node.cycles:
cycles = [i.item for i in node.cycles]
else:
cycles = []
if tuple[0] is node.item or tuple[0] in cycles:
tuple.pop()
if tuple[0] is node.item or tuple[0] in cycles:
return
elif len(tuple) > 1 and tuple[1] is node.item:
assert False, "Tuple not in dependency tree: " + str(tuple)
for c in node.children:
assert_tuple(tuple, c)
for tuple in tuples:
assert_tuple(list(tuple), node)
if collection is None:
collection = []
items = util.Set()
def assert_unique(node):
for item in [n.item for n in node.cycles or [node,]]:
assert item not in items
items.add(item)
if item in collection:
collection.remove(item)
for c in node.children:
assert_unique(c)
assert_unique(node)
assert len(collection) == 0
def testsort(self):
rootnode = 'root'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
subnode1 = 'subnode1'
subnode2 = 'subnode2'
subnode3 = 'subnode3'
subnode4 = 'subnode4'
subsubnode1 = 'subsubnode1'
tuples = [
(subnode3, subsubnode1),
(node2, subnode1),
(node2, subnode2),
(rootnode, node2),
(rootnode, node3),
(rootnode, node4),
(node4, subnode3),
(node4, subnode4)
]
head = DependencySorter(tuples, []).sort()
self.assert_sort(tuples, head)
def testsort2(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
node5 = 'node5'
node6 = 'node6'
node7 = 'node7'
tuples = [
(node1, node2),
(node3, node4),
(node4, node5),
(node5, node6),
(node6, node2)
]
head = DependencySorter(tuples, [node7]).sort()
self.assert_sort(tuples, head, [node7])
def testsort3(self):
['Mapper|Keyword|keywords,Mapper|IKAssociation|itemkeywords', 'Mapper|Item|items,Mapper|IKAssociation|itemkeywords']
node1 = 'keywords'
node2 = 'itemkeyowrds'
node3 = 'items'
tuples = [
(node1, node2),
(node3, node2),
(node1,node3)
]
head1 = DependencySorter(tuples, [node1, node2, node3]).sort()
head2 = DependencySorter(tuples, [node3, node1, node2]).sort()
head3 = DependencySorter(tuples, [node3, node2, node1]).sort()
# TODO: figure out a "node == node2" function
#self.assert_(str(head1) == str(head2) == str(head3))
print "\n" + str(head1)
print "\n" + str(head2)
print "\n" + str(head3)
def testsort4(self):
node1 = 'keywords'
node2 = 'itemkeyowrds'
node3 = 'items'
node4 = 'hoho'
tuples = [
(node1, node2),
(node4, node1),
(node1, node3),
(node3, node2)
]
head = DependencySorter(tuples, []).sort()
self.assert_sort(tuples, head)
def testsort5(self):
# this one, depenending on the weather,
node1 = 'node1' #'00B94190'
node2 = 'node2' #'00B94990'
node3 = 'node3' #'00B9A9B0'
node4 = 'node4' #'00B4F210'
tuples = [
(node4, node1),
(node1, node2),
(node4, node3),
(node2, node3),
(node4, node2),
(node3, node3)
]
allitems = [
node1,
node2,
node3,
node4
]
head = DependencySorter(tuples, allitems).sort()
self.assert_sort(tuples, head)
def testcircular(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
node5 = 'node5'
tuples = [
(node4, node5),
(node5, node4),
(node1, node2),
(node2, node3),
(node3, node1),
(node4, node1)
]
head = DependencySorter(tuples, []).sort(allow_all_cycles=True)
self.assert_sort(tuples, head)
def testcircular2(self):
# this condition was arising from ticket:362
# and was not treated properly by topological sort
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
tuples = [
(node1, node2),
(node3, node1),
(node2, node4),
(node3, node2),
(node2, node3)
]
head = DependencySorter(tuples, []).sort(allow_all_cycles=True)
self.assert_sort(tuples, head)
def testcircular3(self):
nodes = {}
tuples = [('Question', 'Issue'), ('ProviderService', 'Issue'), ('Provider', 'Question'), ('Question', 'Provider'), ('ProviderService', 'Question'), ('Provider', 'ProviderService'), ('Question', 'Answer'), ('Issue', 'Question')]
head = DependencySorter(tuples, []).sort(allow_all_cycles=True)
self.assert_sort(tuples, head)
def testbigsort(self):
tuples = []
for i in range(0,1500, 2):
tuples.append((i, i+1))
head = DependencySorter(tuples, []).sort()
if __name__ == "__main__":
unittest.main()
| 31.036082 | 235 | 0.51038 | 5,736 | 0.952666 | 0 | 0 | 0 | 0 | 0 | 0 | 948 | 0.157449 |
c5e5cecdf9201e00eb336ee9b2d39e6ed8ce5136 | 10,854 | py | Python | tensorcircuit/backends.py | refraction-ray/tensorcircuit | 666154f4dbdb25164c0e778a96ee56ac22323a6c | [
"MIT"
] | 21 | 2020-04-19T23:29:11.000Z | 2022-03-12T12:12:57.000Z | tensorcircuit/backends.py | refraction-ray/tensorcircuit | 666154f4dbdb25164c0e778a96ee56ac22323a6c | [
"MIT"
] | 3 | 2020-10-19T12:18:44.000Z | 2022-02-10T01:24:46.000Z | tensorcircuit/backends.py | refraction-ray/tensorcircuit | 666154f4dbdb25164c0e778a96ee56ac22323a6c | [
"MIT"
] | 7 | 2020-07-15T18:08:00.000Z | 2021-12-30T08:17:13.000Z | """
backend magic inherited from tensornetwork
"""
from typing import Union, Text, Any, Optional, Callable, Sequence
from functools import partial
from scipy.linalg import expm
import numpy as np
import warnings
from tensornetwork.backends.tensorflow import tensorflow_backend
from tensornetwork.backends.numpy import numpy_backend
from tensornetwork.backends.jax import jax_backend
from tensornetwork.backends.shell import shell_backend
from tensornetwork.backends.pytorch import pytorch_backend
from tensornetwork.backends import base_backend
Tensor = Any
libjax: Any
jnp: Any
jsp: Any
torchlib: Any
tf: Any
class NumpyBackend(numpy_backend.NumPyBackend): # type: ignore
def expm(self, a: Tensor) -> Tensor:
return expm(a)
def abs(self, a: Tensor) -> Tensor:
return np.abs(a)
def sin(self, a: Tensor) -> Tensor:
return np.sin(a)
def cos(self, a: Tensor) -> Tensor:
return np.cos(a)
def i(self, dtype: Any = None) -> Tensor:
if not dtype:
dtype = npdtype # type: ignore
if isinstance(dtype, str):
dtype = getattr(np, dtype)
return np.array(1j, dtype=dtype)
def is_tensor(self, a: Any) -> bool:
if isinstance(a, np.ndarray):
return True
return False
def real(self, a: Tensor) -> Tensor:
return np.real(a)
def cast(self, a: Tensor, dtype: str) -> Tensor:
return a.astype(getattr(np, dtype))
def grad(self, f: Callable[..., Any]) -> Callable[..., Any]:
raise NotImplementedError("numpy backend doesn't support AD")
def jit(self, f: Callable[..., Any]) -> Callable[..., Any]:
warnings.warn("numpy backend has no parallel as jit, just do nothing")
return f
# raise NotImplementedError("numpy backend doesn't support jit compiling")
def vmap(self, f: Callable[..., Any]) -> Any:
warnings.warn(
"numpy backend has no intrinsic vmap like interface"
", use vectorize instead (plain for loop)"
)
return np.vectorize(f)
class JaxBackend(jax_backend.JaxBackend): # type: ignore
# Jax doesn't support 64bit dtype, unless claim
# from jax.config import config
# config.update("jax_enable_x64", True)
# at very beginning, i.e. before import tensorcircuit
def __init__(self) -> None:
global libjax # Jax module
global jnp # jax.numpy module
global jsp # jax.scipy module
super(JaxBackend, self).__init__()
try:
import jax
except ImportError:
raise ImportError(
"Jax not installed, please switch to a different "
"backend or install Jax."
)
libjax = jax
jnp = libjax.numpy
jsp = libjax.scipy
self.name = "jax"
# it is already child of numpy backend, and self.np = self.jax.np
def convert_to_tensor(self, tensor: Tensor) -> Tensor:
result = jnp.asarray(tensor)
return result
def abs(self, a: Tensor) -> Tensor:
return jnp.abs(a)
def sin(self, a: Tensor) -> Tensor:
return jnp.sin(a)
def cos(self, a: Tensor) -> Tensor:
return jnp.cos(a)
def i(self, dtype: Any = None) -> Tensor:
if not dtype:
dtype = npdtype # type: ignore
if isinstance(dtype, str):
dtype = getattr(jnp, dtype)
return np.array(1j, dtype=dtype)
def real(self, a: Tensor) -> Tensor:
return jnp.real(a)
def cast(self, a: Tensor, dtype: str) -> Tensor:
return a.astype(getattr(jnp, dtype))
def expm(self, a: Tensor) -> Tensor:
return jsp.linalg.expm(a)
# currently expm in jax doesn't support AD, it will raise an AssertError, see https://github.com/google/jax/issues/2645
def is_tensor(self, a: Any) -> bool:
if not isinstance(a, jnp.ndarray):
return False
# isinstance(np.eye(1), jax.numpy.ndarray) = True!
if getattr(a, "_value", None) is not None:
return True
return False
def grad(
self, f: Callable[..., Any], argnums: Union[int, Sequence[int]] = 0
) -> Any:
# TODO
return libjax.grad(f, argnums=argnums)
def jit(self, f: Callable[..., Any]) -> Any:
return libjax.jit(f)
def vmap(self, f: Callable[..., Any]) -> Any:
return libjax.vmap(f)
# since tf doesn't support in&out axes options, we don't support them in universal backend
class TensorFlowBackend(tensorflow_backend.TensorFlowBackend): # type: ignore
def __init__(self) -> None:
global tf
super(TensorFlowBackend, self).__init__()
try:
import tensorflow
except ImportError:
raise ImportError(
"Tensorflow not installed, please switch to a "
"different backend or install Tensorflow."
)
tf = tensorflow
self.name = "tensorflow"
def expm(self, a: Tensor) -> Tensor:
return tf.linalg.expm(a)
def sin(self, a: Tensor) -> Tensor:
return tf.math.sin(a)
def cos(self, a: Tensor) -> Tensor:
return tf.math.cos(a)
def i(self, dtype: Any = None) -> Tensor:
if not dtype:
dtype = getattr(tf, dtypestr) # type: ignore
if isinstance(dtype, str):
dtype = getattr(tf, dtype)
return tf.constant(1j, dtype=dtype)
def is_tensor(self, a: Any) -> bool:
if isinstance(a, tf.Tensor) or isinstance(a, tf.Variable):
return True
return False
def abs(self, a: Tensor) -> Tensor:
return tf.math.abs(a)
def real(self, a: Tensor) -> Tensor:
return tf.math.real(a)
def cast(self, a: Tensor, dtype: str) -> Tensor:
return tf.cast(a, dtype=getattr(tf, dtype))
def grad(
self, f: Callable[..., Any], argnums: Union[int, Sequence[int]] = 0
) -> Callable[..., Any]:
# experimental attempt
# Note: tensorflow grad is gradient while jax grad is derivative, they are different with a conjugate!
def wrapper(*args: Any, **kws: Any) -> Any:
with tf.GradientTape() as t:
t.watch(args)
y = f(*args, **kws)
if isinstance(argnums, int):
x = args[argnums]
else:
x = [args[i] for i in argnums]
g = t.gradient(y, x)
return g
return wrapper
def jit(self, f: Callable[..., Any]) -> Any:
return tf.function(f)
def vmap(self, f: Callable[..., Any]) -> Any:
def wrapper(f: Callable[..., Any], args: Sequence[Any]) -> Any:
return f(*args)
wrapper = partial(wrapper, f)
def own_vectorized_map(f: Callable[..., Any], *args: Any) -> Any:
return tf.vectorized_map(f, args)
return partial(own_vectorized_map, wrapper)
class PyTorchBackend(pytorch_backend.PyTorchBackend): # type: ignore
def __init__(self) -> None:
super(PyTorchBackend, self).__init__()
global torchlib
try:
import torch
except ImportError:
raise ImportError(
"PyTorch not installed, please switch to a different "
"backend or install PyTorch."
)
torchlib = torch
self.name = "pytorch"
def expm(self, a: Tensor) -> Tensor:
raise NotImplementedError("pytorch backend doesn't support expm")
# in 2020, torch has no expm, hmmm. but that's ok, it doesn't support complex numbers which is more severe issue.
# see https://github.com/pytorch/pytorch/issues/9983
def sin(self, a: Tensor) -> Tensor:
return torchlib.sin(a)
def cos(self, a: Tensor) -> Tensor:
return torchlib.cos(a)
def i(self, dtype: Any = None) -> Tensor:
raise NotImplementedError(
"pytorch backend doesn't support imaginary numbers at all!"
)
def real(self, a: Tensor) -> Tensor:
return a
# hmm, in torch, everyone is real.
def is_tensor(self, a: Any) -> bool:
if isinstance(a, torchlib.Tensor):
return True
return False
def cast(self, a: Tensor, dtype: str) -> Tensor:
return a.type(getattr(torchlib, dtype))
def grad(
self, f: Callable[..., Any], argnums: Union[int, Sequence[int]] = 0
) -> Callable[..., Any]:
def wrapper(*args: Any, **kws: Any) -> Any:
x = []
if isinstance(argnums, int):
argnumsl = [argnums]
# if you also call lhs as argnums, something weird may happen
# the reason is that python then take it as local vars
else:
argnumsl = argnums # type: ignore
for i, arg in enumerate(args):
if i in argnumsl:
x.append(arg.requires_grad_(True))
else:
x.append(arg)
y = f(*x, **kws)
y.backward()
gs = [x[i].grad for i in argnumsl]
if len(gs) == 1:
gs = gs[0]
return gs
return wrapper
def vmap(self, f: Callable[..., Any]) -> Any:
warnings.warn(
"pytorch backend has no intrinsic vmap like interface"
", use plain for loop for compatibility"
)
# the vmap support is vey limited, f must return one tensor
# nested list of tensor as return is not supported
def vmapf(*args: Tensor, **kws: Any) -> Tensor:
r = []
for i in range(args[0].shape[0]):
nargs = [arg[i] for arg in args]
r.append(f(*nargs, **kws))
return torchlib.stack(r)
return vmapf
# raise NotImplementedError("pytorch backend doesn't support vmap")
# There seems to be no map like architecture in pytorch for now
# see https://discuss.pytorch.org/t/fast-way-to-use-map-in-pytorch/70814
def jit(self, f: Callable[..., Any]) -> Any:
return f # do nothing here until I figure out what torch.jit is for and how does it work
# see https://github.com/pytorch/pytorch/issues/36910
_BACKENDS = {
"tensorflow": TensorFlowBackend,
"numpy": NumpyBackend,
"jax": JaxBackend,
"shell": shell_backend.ShellBackend, # no intention to maintain this one
"pytorch": PyTorchBackend, # no intention to fully maintain this one
}
def get_backend(
backend: Union[Text, base_backend.BaseBackend]
) -> base_backend.BaseBackend:
if isinstance(backend, base_backend.BaseBackend):
return backend
if backend not in _BACKENDS:
raise ValueError("Backend '{}' does not exist".format(backend))
return _BACKENDS[backend]()
| 32.4 | 127 | 0.586788 | 9,656 | 0.889626 | 0 | 0 | 0 | 0 | 0 | 0 | 2,465 | 0.227105 |
c5e73f85a5d535b9725cdc5daf85b03ea7c65ebb | 904 | py | Python | venv/lib/python3.7/site-packages/webdriver_manager/microsoft.py | wayshon/pylogin | 12ecfddc3ceaf552a42f62608027924541c63254 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.7/site-packages/webdriver_manager/microsoft.py | wayshon/pylogin | 12ecfddc3ceaf552a42f62608027924541c63254 | [
"Apache-2.0"
] | 7 | 2019-12-04T23:08:08.000Z | 2022-02-10T12:47:38.000Z | venv/lib/python3.7/site-packages/webdriver_manager/microsoft.py | wayshon/pylogin | 12ecfddc3ceaf552a42f62608027924541c63254 | [
"Apache-2.0"
] | null | null | null | from webdriver_manager.driver import EdgeDriver, IEDriver
from webdriver_manager.manager import DriverManager
from webdriver_manager import utils
class EdgeDriverManager(DriverManager):
def __init__(self, version=None,
os_type=utils.os_name()):
super(EdgeDriverManager, self).__init__()
self.driver = EdgeDriver(version=version,
os_type=os_type)
def install(self, path=None):
# type: () -> str
return self._file_manager.download_binary(self.driver, path).path
class IEDriverManager(DriverManager):
def __init__(self, version=None, os_type=utils.os_type()):
super(IEDriverManager, self).__init__()
self.driver = IEDriver(version=version, os_type=os_type)
def install(self, path=None):
# type: () -> str
return self._file_manager.download_driver(self.driver, path).path
| 34.769231 | 73 | 0.683628 | 752 | 0.831858 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.037611 |
c5e808c5b0a9a0e6ef94c188d135b5046b4111b6 | 4,459 | py | Python | examples/plot_replay_experiment.py | dataiku-research/cardinal | 052ea2273e1e6a389e257e3775873620378fe908 | [
"Apache-2.0"
] | 17 | 2021-02-13T16:29:16.000Z | 2022-03-08T03:16:12.000Z | examples/plot_replay_experiment.py | SoftwareImpacts/SIMPAC-2021-174 | 052ea2273e1e6a389e257e3775873620378fe908 | [
"Apache-2.0"
] | 5 | 2021-02-15T14:09:41.000Z | 2021-03-23T23:31:25.000Z | examples/plot_replay_experiment.py | SoftwareImpacts/SIMPAC-2021-174 | 052ea2273e1e6a389e257e3775873620378fe908 | [
"Apache-2.0"
] | 1 | 2021-12-24T17:41:09.000Z | 2021-12-24T17:41:09.000Z | """
Replay and experiment
=====================
In a previous example, we have shown how experiments can be resumed.
Cardinal also allows for experiments to be replayed, meaning that
one can save intermediate data to be able to run analysis on the
experiment without having to retrain all the models. Let us now
see how the ReplayCache allows it.
"""
import shutil
import os
import numpy as np
import dataset
from sklearn.datasets import load_iris
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from cardinal.random import RandomSampler
from cardinal.uncertainty import MarginSampler
from cardinal.cache import ReplayCache, ShelveStore, SqliteStore
from cardinal.utils import SampleSelector
##############################################################################
# Since we will be looking at the cache, we need a utility function to display
# a tree folder.
def print_folder_tree(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
#############################################################################
# We load the data and define the parameters of this experiment:
#
# * ``batch_size`` is the number of samples that will be annotated and added to
# the training set at each iteration,
# * ``n_iter`` is the number of iterations in our simulation
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=1)
batch_size = 5
n_iter = 10
model = SVC(probability=True)
sampler = MarginSampler(model, batch_size)
experiment_config = dict(sampler='margin')
CACHE_PATH = './cache'
DATABASE_PATH = './cache.db'
value_store = ShelveStore(DATABASE_PATH)
#############################################################################
# We define our experiment in a dedicated function since we want to run it
# several times. We also create a dedicated exception that we will rise to
# simulate an interruption in the experiment.
#
# Note the use of the SampleSelector utils that facilitate the handing of
# indices in an active learning experiment.
#
# In the end, all values for all iterations are kept. The cache structure
# is human readable and can be shared for better reproducibility.
with ReplayCache(CACHE_PATH, value_store, keys=experiment_config) as cache:
# Create a selector with one sample from each class and persist it
init_selector = SampleSelector(X_train.shape[0])
init_selector.add_to_selected([np.where(y_train == i)[0][0] for i in np.unique(y)])
selector = cache.persisted_value('selector', init_selector)
predictions = cache.persisted_value('prediction', None)
for j, prev_selector, prev_predictions in cache.iter(range(n_iter), selector.previous(), predictions.previous()):
print('Computing iteration {}'.format(j))
model.fit(X_train[prev_selector.selected], y_train[prev_selector.selected])
sampler.fit(X_train[prev_selector.selected], y_train[prev_selector.selected])
prev_selector.add_to_selected(sampler.select_samples(X_train[prev_selector.non_selected]))
selector.set(prev_selector)
predictions.set(model.predict(X_test))
# All the values for the experiment are kept
print_folder_tree('./cache')
# This code could have been added to the script afterward to computer any metric.
def compute_contradictions(previous_prediction, current_prediction):
if previous_prediction is None:
return 0
return (previous_prediction != current_prediction).sum()
cache.compute_metric('contradictions', compute_contradictions, predictions.previous(), predictions.current())
from matplotlib import pyplot as plt
contradictions = value_store.get('contradictions')
plt.plot(contradictions['iteration'], contradictions['value'])
plt.xlabel('Iteration')
plt.ylabel('Contradictions')
plt.title('Evolution of Contradictions during active learning experiment on Iris dataset')
plt.show()
value_store.close()
#############################################################################
# We clean all the cache folder.
shutil.rmtree(CACHE_PATH)
os.remove(DATABASE_PATH)
| 36.54918 | 117 | 0.685804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,932 | 0.433281 |
c5e914518d0f8750fb4b83382f0d2bacb4dbb4ed | 17,585 | py | Python | cryspy/B_parent_classes/cl_3_data.py | eandklahn/cryspy | a664cee1e1ffd5f23e54295a11e479d7d4cda7e5 | [
"MIT"
] | null | null | null | cryspy/B_parent_classes/cl_3_data.py | eandklahn/cryspy | a664cee1e1ffd5f23e54295a11e479d7d4cda7e5 | [
"MIT"
] | null | null | null | cryspy/B_parent_classes/cl_3_data.py | eandklahn/cryspy | a664cee1e1ffd5f23e54295a11e479d7d4cda7e5 | [
"MIT"
] | null | null | null | """Parent class DataN."""
import os
import os.path
from warnings import warn
from typing import Union, NoReturn
from pycifstar import Data, to_data
from cryspy.A_functions_base.function_1_markdown import md_to_html
from cryspy.A_functions_base.function_1_objects import \
get_functions_of_objet, get_table_html_for_variables
from cryspy.B_parent_classes.cl_1_item import ItemN
from cryspy.B_parent_classes.cl_2_loop import LoopN
class DataN(object):
"""Data container of loops and items."""
def __repr__(self):
"""
Magic method print() is redefined.
Returns
-------
TYPE
DESCRIPTION.
"""
ls_out = [f"# Object '{self.get_name():}'"]
for item in self.items:
if isinstance(item, ItemN):
ls_out.append(f"{4*' ':}.{item.get_name():}")
else:
ls_out.append(f"{4*' ':}.{item.get_name():} (loop)")
method = self.methods_html()
if method != "":
ls_out.append(f"\n# Methods:\n{method:}\n")
return "\n".join(ls_out)
def _repr_html_(self):
"""Representation in HTML format."""
ls_html = [f"<h2>Object '{self.get_name():}'</h2>"]
ls_html.append(self.attributes_to_html())
ls_html.append(get_table_html_for_variables(self))
report = self.report_html()
if report != "":
ls_html.append(f"<h2>Description </h2> {report:}")
ls_html.append(f"<h2>Classes and methods</h2>")
try:
names = sorted([obj.__name__ for obj in self.CLASSES_MANDATORY])
if len(names) != 0:
ls_html.append("<b>Mandatory classes: </b>")
ls_html.append(f"{', '.join(names):}.<br>")
except AttributeError:
pass
try:
names = sorted([obj.__name__ for obj in self.CLASSES_OPTIONAL])
if len(names) != 0:
ls_html.append("<b>Optional classes: </b>")
ls_html.append(f"{', '.join(names):}.<br>")
except AttributeError:
pass
method = self.methods_html()
if method != "":
ls_html.append(f"<b>Methods: </b> {method:}")
return " ".join(ls_html)
def methods_html(self):
ls_html = [f".{func_name}" for func_name in
get_functions_of_objet(self)]
return ", ".join(ls_html)+"."
def attributes_to_html(self) -> str:
"""Representation of defined parameters in HTML format.
"""
ls_html = ["<table>"]
ls_html.append("<tr><th>Attribute</th><th> Note </th></tr>")
items_sorted = sorted(self.items, key=lambda item: item.get_name())
for item in items_sorted:
item_type = item.__doc__.strip().split("\n")[0]
ls_html.append(f"<tr><td>.{item.get_name():}</td>\
<td>{item_type:}</td></tr>")
ls_html.append("</table>")
return " ".join(ls_html)
def __str__(self):
"""
Magic method str() is redefined.
Returns
-------
TYPE
DESCRIPTION.
"""
return self.to_cif()
def __getattr__(self, name):
"""
Magic method __getattr__ is slightly changed for special attributes.
Parameters
----------
name : TYPE
DESCRIPTION.
Raises
------
AttributeError
DESCRIPTION.
Returns
-------
res : TYPE
DESCRIPTION.
"""
for item in self.items:
if name.lower() == item.get_name():
return item
raise AttributeError(f"Attribute '{name:}' is not defined")
def is_attribute(self, name):
"""Temporary construction.
Better to use:
try:
obj = self.attribute_name
except AttributeError as e:
obj = ...
"""
for item in self.items:
if name.lower() == item.get_name():
return True
return False
def __setattr__(self, name, value) -> NoReturn:
"""
Rules to set attribute.
Parameters
----------
name : TYPE
DESCRIPTION.
value : TYPE
DESCRIPTION.
Returns
-------
NoReturn
DESCRIPTION.
"""
flag_items, flag_direct = False, True
if name == "data_name":
flag_direct = False
val_new = str(value).strip()
elif name == "items":
flag_items = True
self.add_items(value)
else:
cls_value = type(value)
if cls_value in self.CLASSES:
l_name = [item.get_name() for item in self.items]
name_new = value.get_name()
if name_new in l_name:
self.items.pop(l_name.index(name))
self.items.append(value)
flag_items, flag_direct = True, False
if name_new != name:
warn(f"Access to variable by '{name_new:}'.", UserWarning)
if flag_items:
pass
elif flag_direct:
self.__dict__[name] = value
else:
self.__dict__[name] = val_new
def add_items(self, items: list):
"""Add items."""
l_name = [item.get_name() for item in items]
s_name = set(l_name)
if len(s_name) != len(l_name):
warn("Double items were given.", UserWarning)
items_unique = [items[l_name.index(name)] for name in s_name]
else:
items_unique = items
l_ind_del = []
for ind_item, item in enumerate(self.items):
if item.get_name() in s_name:
l_ind_del.append(ind_item)
l_ind_del.reverse()
for ind in l_ind_del:
self.items.pop(ind)
for item in items_unique:
if isinstance(item, self.CLASSES):
self.items.append(item)
@classmethod
def make_container(cls, cls_mandatory, cls_optional, prefix):
"""Create DataN object as a container for items."""
if cls is not DataN:
warn("The method 'make_container' is used only for DataN class.")
return
obj = cls()
obj.__dict__["CLASSES_MANDATORY"] = cls_mandatory
obj.__dict__["CLASSES_OPTIONAL"] = cls_optional
obj.__dict__["CLASSES"] = cls_mandatory+cls_optional
obj.__dict__["PREFIX"] = prefix
obj.__dict__["D_DEFAULT"] = {}
obj.__dict__["items"] = []
obj.__dict__["data_name"] = ""
return obj
@classmethod
def get_mandatory_attributes(cls, separator: str = "_"):
"""Get a list of mandatory attributes from mandatory classes."""
l_res = []
for cls_obj in cls.CLASSES_MANDATORY:
if issubclass(cls_obj, ItemN):
cls_item = cls_obj
else: #LoopN
cls_item = cls_obj.ITEM_CLASS
l_res.extend([f"{cls_item.PREFIX:}{separator:}{name_cif:}"
for name_cif in cls_item.ATTR_MANDATORY_CIF])
return l_res
def __getitem__(self, name: Union[int, str]):
"""
Get item by index or predefined index.
Parameters
----------
name : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
if isinstance(name, int):
return self.items[name]
elif isinstance(name, str):
for item in self.items:
if name.lower() == item.get_name():
return item
return None
def get_name(self) -> str:
"""Name of object."""
name = self.PREFIX
data_name = self.data_name
if data_name is not None:
name = f"{name:}_{data_name:}"
return name.lower()
def get_variable_names(self) -> list:
"""
Get names of variable as a list.
(((#prefix, #NAME), (#prefix, #NAME), (#attribute, #index))
Returns
-------
list
List of names of variable.
"""
prefix = self.PREFIX
data_name = self.data_name
l_var = []
for item in self.items:
l_var.extend(item.get_variable_names())
l_var_out = [((prefix, data_name), ) + var for var in l_var]
return l_var_out
def is_variables(self) -> bool:
"""Define is there variables or not."""
flag = False
for item in self.items:
if item.is_variables():
flag = True
break
return flag
def get_variable_by_name(self, name: tuple) -> Union[float, int, str]:
"""
Get variable given by name.
Parameters
----------
name : tuple
(((#prefix, #data_name), (#prefix, #loop_name),
(#attribute, #index_item))
Returns
-------
Union[float, int, str]
DESCRIPTION.
"""
prefix = self.PREFIX
data_name = self.data_name
prefix_d, prefix_n = name[0], name[1]
if prefix_d != (prefix, data_name):
return None
name_sh = tuple(name[1:])
for item in self.items:
if isinstance(item, ItemN):
prefix = item.PREFIX
elif isinstance(item, LoopN):
item_cls = item.ITEM_CLASS
if item_cls is ItemN:
prefix = item[0].PREFIX
else:
prefix = item_cls.PREFIX
else:
raise AttributeError(
f"Unknown type object '{type(item).__name__:}'")
if prefix == prefix_n[0]:
res = item.get_variable_by_name(name_sh)
if res is not None:
return res
return None
def set_variable_by_name(self, name: tuple, value) -> NoReturn:
"""
Set value to variable given by name.
Parameters
----------
name : tuple
DESCRIPTION.
value : TYPE
DESCRIPTION.
Returns
-------
NoReturn
DESCRIPTION.
"""
prefix = self.PREFIX
data_name = self.data_name
prefix_d, prefix_n = name[0], name[1]
if prefix_d != (prefix, data_name):
return
name_sh = tuple(name[1:])
for item in self.items:
if isinstance(item, ItemN):
prefix = item.PREFIX
elif isinstance(item, LoopN):
item_cls = item.ITEM_CLASS
if item_cls is ItemN:
prefix = item[0].PREFIX
else:
prefix = item_cls.PREFIX
else:
raise AttributeError(
f"Unknown type object '{type(item).__name__:}'")
if prefix == prefix_n[0]:
item.set_variable_by_name(name_sh, value)
def is_defined(self) -> bool:
"""
If all mandatory attributes is defined.
Returns
-------
bool
DESCRIPTION.
"""
flag = True
for item in self.items:
if not(item.is_defined()):
flag = False
if isinstance(item, ItemN):
warn(f"{item.PREFIX:} is not fully described.",
UserWarning)
break
elif isinstance(item, LoopN):
warn(f"{item.ITEM_CLASS.PREFIX:} is not fully described.",
UserWarning)
break
if flag:
cls_items = [type(item) for item in self.items]
for cls_mand in self.CLASSES_MANDATORY:
if not(cls_mand in cls_items):
flag = False
warn(f"The object of {cls_mand.__name__:} is not defined.",
UserWarning)
break
return flag
def form_object(self):
"""Form object."""
pass
def to_cif(self, separator="_") -> str:
"""Print information about object in string in STAR format.
Arguments
---------
prefix: prefix in front of label of attribute
separator: separator between prefix and attribute ("_" or ".")
flag: for undefined attribute "." will be printed
flag_minimal if it's True the minimal set of object will be printed
Returns
-------
A string in STAR/CIF format
"""
ls_out = []
if self.data_name is None:
ls_out.append("data_\n")
else:
ls_out.append(f"data_{self.data_name:}\n")
l_item = self.items
l_s_itemn = [item.to_cif(separator=separator)+"\n"
for item in l_item if isinstance(item, ItemN)]
l_s_loopn = [item.to_cif(separator=separator)+"\n"
for item in l_item if isinstance(item, LoopN)]
if l_s_loopn != []:
n_max_loop = max([len(_) for _ in l_s_loopn])
if n_max_loop < 1000:
n_max_loop = 1000
else:
n_max_loop = 10000
l_n_max_item = [len(_) for _ in l_s_itemn]
ls_out.extend([_1 for _1, _2 in zip(l_s_itemn, l_n_max_item)
if _2 <= n_max_loop])
ls_out.extend([_ for _ in l_s_loopn])
ls_out.extend([_1 for _1, _2 in zip(l_s_itemn, l_n_max_item)
if _2 > n_max_loop])
return "\n".join(ls_out)
@classmethod
def from_cif(cls, string: str):
"""Generate object from string of CIF format."""
cif_data = Data()
flag = cif_data.take_from_string(string)
cif_items = cif_data.items
cif_loops = cif_data.loops
items = []
flag = True
n_mandatory = len(cls.CLASSES_MANDATORY)
for i_cls, cls_ in enumerate(cls.CLASSES):
flag = i_cls >= n_mandatory
if issubclass(cls_, ItemN):
prefix_cls = cls_.PREFIX
if cif_items.is_prefix(prefix_cls):
cif_items_prefix = cif_items[prefix_cls]
cif_string = str(cif_items_prefix)
obj_prefix = cls_.from_cif(cif_string)
if obj_prefix is not None:
items.append(obj_prefix)
flag = True
elif issubclass(cls_, LoopN):
prefix_cls = cls_.ITEM_CLASS.PREFIX
for cif_loop in cif_loops:
if cif_loop.is_prefix("_"+prefix_cls):
cif_string = str(cif_loop)
obj_prefix = cls_.from_cif(cif_string)
if obj_prefix is not None:
items.append(obj_prefix)
flag = True
if (not(flag)):
warn(f"Mandatory class: '{cls_.__name__:}' is not given.",
UserWarning)
break
if not(flag):
return None
data_name = cif_data.name
obj = cls(data_name=data_name, items=items)
obj.form_object()
return obj
@classmethod
def from_cif_file(cls, f_name: str):
"""Read from cif file."""
if not(os.path.isfile(f_name)):
raise UserWarning(f"File {f_name:} is not found.")
return None
str_from_cif = str(to_data(f_name))
obj = cls.from_cif(str_from_cif)
obj.file_input = f_name
return obj
def copy(self, data_name: str = ""):
"""Deep copy of object with new data name."""
s_cif = self.to_cif()
obj_new = type(self).from_cif(s_cif)
obj_new.data_name = data_name
return obj_new
def report(self):
return ""
def report_html(self):
return md_to_html(self.report())
def plots(self):
l_res = []
for item in self.items:
for plot in item.plots():
if plot is not None:
l_res.append(plot)
return l_res
def fix_variables(self):
"""Fix variables."""
for item in self.items:
item.fix_variables()
def set_variable(self, name: str, index=None):
"""Set refinement for variable given by name.
Index parameters is used only for objects given as a matrix.
"""
name_sh = name.strip(".").lower()
l_name = name_sh.split(".")
name_1 = l_name[0]
for item in self.items:
if name_1 == item.get_name():
if len(l_name) == 1:
attr_refs = []
if isinstance(item, ItemN):
attr_refs = item.ATTR_REF
elif isinstance(item, LoopN):
item_class = item.ITEM_CLASS
if item_class is ItemN:
if len(self.items) != 0:
attr_refs = item.items[0].ATTR_REF
else:
attr_refs = item_class.ATTR_REF
for attr_ref in attr_refs:
item.set_variable(attr_ref, index=index)
else:
item.set_variable(".".join(l_name[1:]), index=index)
| 31.068905 | 79 | 0.511459 | 17,147 | 0.975092 | 0 | 0 | 3,173 | 0.180438 | 0 | 0 | 4,523 | 0.257208 |
c5e955f6a5fb02a343d52d2f8212c94c8ad218bb | 204 | py | Python | vivisect/tests/vivbins.py | mubix/vivisect | 2900c0bf59838cb9fc398a8668f76f887b7f54e7 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-11-12T03:50:06.000Z | 2017-11-12T03:50:06.000Z | vivisect/tests/vivbins.py | mubix/vivisect | 2900c0bf59838cb9fc398a8668f76f887b7f54e7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/tests/vivbins.py | mubix/vivisect | 2900c0bf59838cb9fc398a8668f76f887b7f54e7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import os
import unittest
def require(f):
def skipit(*args, **kwargs):
raise unittest.SkipTest('VIVBINS env var...')
if os.getenv('VIVBINS') == None:
return skipit
return f
| 17 | 53 | 0.617647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.142157 |
c5e956eebd2928391f5d2ba00f6fd36130e8bbd1 | 894 | py | Python | hackerearth/Algorithms/Restoring trees/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerearth/Algorithms/Restoring trees/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerearth/Algorithms/Restoring trees/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
n = int(input())
start = list(map(int, input().strip().split()))
finish = list(map(int, input().strip().split()))
vertexes = [0] * n
for i, v in enumerate(start):
vertexes[v] = i
parents = [-1] * n
parent = vertexes[0]
for i in range(1, n):
cur = vertexes[i]
if finish[cur] - i > 1:
parents[cur] = parent
parent = cur
else:
parents[cur] = parent
while finish[cur] == finish[parents[cur]]:
cur = parents[cur]
parent = parents[cur]
if parent == vertexes[0]:
break
for i in range(n):
parents[i] += 1
print(*parents)
| 26.294118 | 94 | 0.569351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.306488 |
c5ea213be6b9121c63848b236674311dd3d54793 | 32 | py | Python | src/machine_learning/collaborative_filtering/__init__.py | przemek1990/machine-learning | d278b867757cf9223e079ff77cdd8e1b4a9b3f36 | [
"Apache-2.0"
] | null | null | null | src/machine_learning/collaborative_filtering/__init__.py | przemek1990/machine-learning | d278b867757cf9223e079ff77cdd8e1b4a9b3f36 | [
"Apache-2.0"
] | null | null | null | src/machine_learning/collaborative_filtering/__init__.py | przemek1990/machine-learning | d278b867757cf9223e079ff77cdd8e1b4a9b3f36 | [
"Apache-2.0"
] | null | null | null | __author__ = 'przemyslaw.pioro'
| 16 | 31 | 0.78125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.5625 |
c5eb3429d56063aad6eca63d3f25a6fb4eaf84b5 | 1,674 | py | Python | Sorting/Sorts.py | niranjan09/DataStructures_Algorithms | df2801f7ea48a39a55a6d79fd66ad200a2de0145 | [
"MIT"
] | null | null | null | Sorting/Sorts.py | niranjan09/DataStructures_Algorithms | df2801f7ea48a39a55a6d79fd66ad200a2de0145 | [
"MIT"
] | null | null | null | Sorting/Sorts.py | niranjan09/DataStructures_Algorithms | df2801f7ea48a39a55a6d79fd66ad200a2de0145 | [
"MIT"
] | null | null | null | import time
def swap(arr, i , j):
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
def selection_sort(arr):
for i in range(len(arr)):
for j in range(i+1, len(arr)):
if(arr[i] > arr[j]):
swap(arr, i, j)
return arr
def bubble_sort(arr):
swapped = True
while(swapped == True):
swapped = False
for i in range(len(arr)-1):
if(arr[i] > arr[i+1]):
swap(arr, i, i+1)
swapped = True
return arr
def insertion_sort(arr):
for i in range(1, len(arr)):
if(arr[i] < arr[i-1]):
temp = i-1
print arr[i], arr[temp], arr
while(arr[i] < arr[temp] and temp>=0):
print "shifting...", arr[i], arr[temp]
temp-=1
arr.insert(temp+1, arr[i])
del arr[i+1]
return arr
def merge_sort(arr):
arr_len = len(arr)
if(arr_len == 1):
return arr
a = merge_sort(arr[:arr_len/2])
b = merge_sort(arr[arr_len/2:])
c = []
i_a = i_b = 0
a_len = len(a)
b_len = len(b)
while(i_a < a_len and i_b < b_len):
if(a[i_a]<b[i_b]):
c.append(a[i_a])
i_a+=1
else:
c.append(b[i_b])
i_b+=1
if(i_a < a_len):
for remaining in a[i_a:]:
c.append(remaining)
if(i_b < b_len):
for remaining in b[i_b:]:
c.append(remaining)
return c
def quick_sort(arr):
if(len(arr)<1):
return arr
pivot = arr[len(arr) // 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quick_sort(left) + middle + quick_sort(right)
def direct_addressing_sort(arr):
maximum = max(arr)
minimum = min(arr)
a = len(range(minimum, maximum+1))*[0]
for i in range(len(arr)):
a[i]+=1
print a
print(time.time())
print(quick_sort([-2.2, -2.2, -2.2]*5+[1.1]*5))
print(time.time())
| 19.465116 | 53 | 0.599164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.007766 |
c5ed957119e3f69933dbfb4c1879651fe6bb40ff | 65 | py | Python | __init__.py | Moviesbazar/Pdiskuploader_bot | fcefd0e674843c12e797d464dd4f520f98a36632 | [
"Apache-2.0"
] | 1 | 2021-10-30T19:24:19.000Z | 2021-10-30T19:24:19.000Z | __init__.py | Moviesbazar/Pdiskuploader_bot | fcefd0e674843c12e797d464dd4f520f98a36632 | [
"Apache-2.0"
] | null | null | null | __init__.py | Moviesbazar/Pdiskuploader_bot | fcefd0e674843c12e797d464dd4f520f98a36632 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
Source Code of Pdiskuploaderbot
"""
| 9.285714 | 31 | 0.676923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.938462 |
c5ef4f6813348dc5856623519c73597cbe031935 | 329 | py | Python | Beam/TextAnalysis/setup.py | Balaviknesh/YelpHelp | 4e928b0f13a8f5a3baf916c293302266c3d54dda | [
"Apache-2.0"
] | null | null | null | Beam/TextAnalysis/setup.py | Balaviknesh/YelpHelp | 4e928b0f13a8f5a3baf916c293302266c3d54dda | [
"Apache-2.0"
] | null | null | null | Beam/TextAnalysis/setup.py | Balaviknesh/YelpHelp | 4e928b0f13a8f5a3baf916c293302266c3d54dda | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
from setuptools.command.install import install as _install
class Install(_install):
def run(self):
_install.do_egg_install(self)
import nltk
nltk.download("popular")
setup(
cmdclass={'install': Install},
install_requires=['nltk'],
setup_requires=['nltk'])
| 20.5625 | 58 | 0.68693 | 134 | 0.407295 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.091185 |
c5ef76f9a1c232b338813b5ca11e5c92d815d45d | 947 | py | Python | code/test3/1.py | Bc-Gg/Algorithms | 0c35fd4e002ff4b0ad6ebb243df3df278e366595 | [
"MIT"
] | 8 | 2022-03-13T10:25:33.000Z | 2022-03-30T08:26:00.000Z | code/test3/1.py | Bc-Gg/Algorithms | 0c35fd4e002ff4b0ad6ebb243df3df278e366595 | [
"MIT"
] | null | null | null | code/test3/1.py | Bc-Gg/Algorithms | 0c35fd4e002ff4b0ad6ebb243df3df278e366595 | [
"MIT"
] | 2 | 2022-03-20T12:09:52.000Z | 2022-03-21T03:43:01.000Z | '''
author : bcgg
可惜时间爆了
其实写的很好
中间很多可以改进
'''
ans = 0
def merge(arr, l, m, r):
global ans
n1 = m - l + 1
n2 = r - m
L = [0] * (n1)
R = [0] * (n2)
for i in range(0, n1):
L[i] = arr[l + i]
for j in range(0, n2):
R[j] = arr[m + 1 + j]
i = 0
j = 0
k = l
while i < n1 and j < n2:
if L[i] <= R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
ans += m - i + 1
k += 1
while i < n1:
arr[k] = L[i]
i += 1
k += 1
while j < n2:
arr[k] = R[j]
j += 1
k += 1
def mergeSort(arr, l, r):
if l < r:
m = int((l + (r - 1)) / 2)
mergeSort(arr, l, m)
mergeSort(arr, m + 1, r)
merge(arr, l, m, r)
if __name__ == '__main__':
arr = list(map(int, input().split(',')))
n = len(arr)
mergeSort(arr, 0, n - 1)
print(ans)
| 18.568627 | 44 | 0.36642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.098278 |
c5ef8d7c1493136be617512f99564772f9d404af | 1,354 | py | Python | Python/sir_cost.py | Wasim5620/SIRmodel | bbca1431673dc5450f290db1235eb73e92e74979 | [
"MIT"
] | 26 | 2018-08-08T20:40:21.000Z | 2022-01-13T19:46:40.000Z | Python/sir_cost.py | Wasim5620/SIRmodel | bbca1431673dc5450f290db1235eb73e92e74979 | [
"MIT"
] | 24 | 2020-03-25T19:35:43.000Z | 2022-02-10T11:46:50.000Z | Python/sir_cost.py | Wasim5620/SIRmodel | bbca1431673dc5450f290db1235eb73e92e74979 | [
"MIT"
] | 9 | 2017-07-22T04:23:15.000Z | 2021-03-19T09:42:35.000Z | # cost function for the SIR model for python 2.7
# Marisa Eisenberg (marisae@umich.edu)
# Yu-Han Kao (kaoyh@umich.edu) -7-9-17
import numpy as np
import sir_ode
from scipy.stats import poisson
from scipy.stats import norm
from scipy.integrate import odeint as ode
def NLL(params, data, times): #negative log likelihood
params = np.abs(params)
data = np.array(data)
res = ode(sir_ode.model, sir_ode.x0fcn(params,data), times, args=(params,))
y = sir_ode.yfcn(res, params)
nll = sum(y) - sum(data*np.log(y))
# note this is a slightly shortened version--there's an additive constant term missing but it
# makes calculation faster and won't alter the threshold. Alternatively, can do:
# nll = -sum(np.log(poisson.pmf(np.round(data),np.round(y)))) # the round is b/c Poisson is for (integer) count data
# this can also barf if data and y are too far apart because the dpois will be ~0, which makes the log angry
# ML using normally distributed measurement error (least squares)
# nll = -sum(np.log(norm.pdf(data,y,0.1*np.mean(data)))) # example WLS assuming sigma = 0.1*mean(data)
# nll = sum((y - data)**2) # alternatively can do OLS but note this will mess with the thresholds
# for the profile! This version of OLS is off by a scaling factor from
# actual LL units.
return nll
| 46.689655 | 117 | 0.697194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 956 | 0.706056 |
c5efdfd7527a9d610cd0e6cd62680c8957d0b58f | 14,512 | py | Python | restler/unit_tests/test_basic_functionality_end_to_end.py | Ayudjj/mvp | a0ba706a2156e31cf6053b639b57aa1b9acad442 | [
"MIT"
] | 1 | 2020-12-05T14:23:08.000Z | 2020-12-05T14:23:08.000Z | restler/unit_tests/test_basic_functionality_end_to_end.py | Ayudjj/mvp | a0ba706a2156e31cf6053b639b57aa1b9acad442 | [
"MIT"
] | null | null | null | restler/unit_tests/test_basic_functionality_end_to_end.py | Ayudjj/mvp | a0ba706a2156e31cf6053b639b57aa1b9acad442 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
""" Runs functional tests, which invoke the RESTler engine and check the RESTler output logs
for correctness.
When new baseline logs are necessary due to known breaking changes in the logic, a run that
matches the test should be run manually and the appropriate logs should be replaced in the
unit_tests/log_baseline_test_files directory. Each log is named <test-type_log-type.txt>
"""
import unittest
import os
import glob
import sys
import shutil
import subprocess
import utils.logger as logger
from collections import namedtuple
from test_servers.log_parser import *
Test_File_Directory = os.path.join(
os.path.dirname(__file__), 'log_baseline_test_files'
)
Restler_Path = os.path.join(os.path.dirname(__file__), '..', 'restler.py')
Common_Settings = [
"python", "-B", Restler_Path, "--use_test_socket",
'--custom_mutations', f'{os.path.join(Test_File_Directory, "test_dict.json")}',
"--garbage_collection_interval", "30", "--host", "unittest"
]
class FunctionalityTests(unittest.TestCase):
def get_experiments_dir(self):
""" Returns the most recent experiments directory that contains the restler logs
@return: The experiments dir
@rtype : Str
"""
results_dir = os.path.join(os.getcwd(), 'RestlerResults')
# Return the newest experiments directory in RestlerResults
return max(glob.glob(os.path.join(results_dir, 'experiment*/')), key=os.path.getmtime)
def get_network_log_path(self, dir, log_type):
""" Returns the path to the network log of the specified type
@param dir: The directory that contains the log
@type dir: Str
@param log_type: The type of network log to get
@type log_type: Str
@return: The path to the network log
@rtype : Str
"""
return glob.glob(os.path.join(dir, 'logs', f'network.{log_type}.*.1.txt'))[0]
def tearDown(self):
try:
shutil.rmtree(self.get_experiments_dir())
except Exception as err:
print(f"tearDown function failed: {err!s}.\n"
"Experiments directory was not deleted.")
def test_smoke_test(self):
""" This checks that the directed smoke test executes all
of the expected requests in the correct order with correct
arguments from the dictionary.
"""
args = Common_Settings + [
'--fuzzing_mode', 'directed-smoke-test',
'--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}'
]
result = subprocess.run(args, capture_output=True)
if result.stderr:
self.fail(result.stderr)
try:
result.check_returncode()
except subprocess.CalledProcessError:
self.fail(f"Restler returned non-zero exit code: {result.returncode}")
experiments_dir = self.get_experiments_dir()
try:
default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "smoke_test_testing_log.txt"))
test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Smoke test failed: Fuzzing")
try:
default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "smoke_test_gc_log.txt"))
test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Smoke test failed: Garbage Collector")
def test_create_once(self):
""" This checks that a directed smoke test, using create once endpoints,
executes all of the expected requests in the correct order with correct
arguments from the dictionary.
"""
args = Common_Settings + [
'--fuzzing_mode', 'directed-smoke-test',
'--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}',
'--settings', f'{os.path.join(Test_File_Directory, "test_settings_createonce.json")}'
]
result = subprocess.run(args, capture_output=True)
if result.stderr:
self.fail(result.stderr)
try:
result.check_returncode()
except subprocess.CalledProcessError:
self.fail(f"Restler returned non-zero exit code: {result.returncode}")
experiments_dir = self.get_experiments_dir()
try:
default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "create_once_testing_log.txt"))
test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Create-once failed: Fuzzing")
try:
default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "create_once_pre_log.txt"))
test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_PREPROCESSING))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Create-once failed: Preprocessing")
try:
default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "create_once_gc_log.txt"))
test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Create-once failed: Garbage Collector")
def test_checkers(self):
""" This checks that a directed smoke test, with checkers enabled (sans namespacerule,
payloadbody, examples), bugs planted for each checker, and a main driver bug, will
produce the appropriate bug buckets and the requests will be sent in the correct order.
"""
args = Common_Settings + [
'--fuzzing_mode', 'directed-smoke-test',
'--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar_bugs.py")}',
'--enable_checkers', '*'
]
result = subprocess.run(args, capture_output=True)
if result.stderr:
self.fail(result.stderr)
try:
result.check_returncode()
except subprocess.CalledProcessError:
self.fail(f"Restler returned non-zero exit code: {result.returncode}")
experiments_dir = self.get_experiments_dir()
try:
default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "checkers_testing_log.txt"))
test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Checkers failed: Fuzzing")
try:
default_parser = BugLogParser(os.path.join(Test_File_Directory, "checkers_bug_buckets.txt"))
test_parser = BugLogParser(os.path.join(experiments_dir, 'bug_buckets', 'bug_buckets.txt'))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Checkers failed: Bug Buckets")
try:
default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "checkers_gc_log.txt"))
test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Checkers failed: Garbage Collector")
def test_multi_dict(self):
""" This checks that the directed smoke test executes all of the expected
requests in the correct order when a second dictionary is specified in the
settings file to be used for one of the endpoints.
"""
args = Common_Settings + [
'--fuzzing_mode', 'directed-smoke-test',
'--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}',
'--settings', f'{os.path.join(Test_File_Directory, "test_settings_multidict.json")}'
]
result = subprocess.run(args, capture_output=True)
if result.stderr:
self.fail(result.stderr)
try:
result.check_returncode()
except subprocess.CalledProcessError:
self.fail(f"Restler returned non-zero exit code: {result.returncode}")
experiments_dir = self.get_experiments_dir()
try:
default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "multidict_testing_log.txt"))
test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Multi-dict failed: Fuzzing")
try:
default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "multidict_gc_log.txt"))
test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Multi-dict failed: Garbage Collector")
def test_fuzz(self):
""" This checks that a bfs-cheap fuzzing run executes all of the expected
requests in the correct order with correct arguments from the dictionary.
The test runs for 3 minutes and checks 100 sequences
"""
Fuzz_Time = 0.1 # 6 minutes
Num_Sequences = 300
args = Common_Settings + [
'--fuzzing_mode', 'bfs-cheap',
'--restler_grammar',f'{os.path.join(Test_File_Directory, "test_grammar.py")}',
'--time_budget', f'{Fuzz_Time}', '--enable_checkers', '*',
'--disable_checkers', 'namespacerule'
]
result = subprocess.run(args, capture_output=True)
if result.stderr:
self.fail(result.stderr)
try:
result.check_returncode()
except subprocess.CalledProcessError:
self.fail(f"Restler returned non-zero exit code: {result.returncode}")
experiments_dir = self.get_experiments_dir()
try:
default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "fuzz_testing_log.txt"), max_seq=Num_Sequences)
test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING), max_seq=Num_Sequences)
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Fuzz failed: Fuzzing")
def test_payload_body_checker(self):
""" This checks that the payload body checker sends all of the correct
requests in the correct order and an expected 500 bug is logged.
"""
args = Common_Settings + [
'--fuzzing_mode', 'directed-smoke-test',
'--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}',
'--enable_checkers', 'payloadbody'
]
result = subprocess.run(args, capture_output=True)
if result.stderr:
self.fail(result.stderr)
try:
result.check_returncode()
except subprocess.CalledProcessError:
self.fail(f"Restler returned non-zero exit code: {result.returncode}")
experiments_dir = self.get_experiments_dir()
try:
default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "payloadbody_testing_log.txt"))
test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING))
except TestFailedException:
self.fail("Payload body failed: Fuzzing")
try:
default_parser = BugLogParser(os.path.join(Test_File_Directory, "payloadbody_bug_buckets.txt"))
test_parser = BugLogParser(os.path.join(experiments_dir, 'bug_buckets', 'bug_buckets.txt'))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Payload body failed: Bug Buckets")
try:
default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "payloadbody_gc_log.txt"))
test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Payload body failed: Garbage Collector")
def test_examples_checker(self):
""" This checks that the examples checker sends the correct requests
in the correct order when query or body examples are present
"""
args = Common_Settings + [
'--fuzzing_mode', 'directed-smoke-test',
'--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}',
'--enable_checkers', 'examples'
]
result = subprocess.run(args, capture_output=True)
if result.stderr:
self.fail(result.stderr)
try:
result.check_returncode()
except subprocess.CalledProcessError:
self.fail(f"Restler returned non-zero exit code: {result.returncode}")
experiments_dir = self.get_experiments_dir()
try:
default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "examples_testing_log.txt"))
test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING))
except TestFailedException:
self.fail("Payload body failed: Fuzzing")
try:
default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "examples_gc_log.txt"))
test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC))
self.assertTrue(default_parser.diff_log(test_parser))
except TestFailedException:
self.fail("Payload body failed: Garbage Collector")
| 44.790123 | 134 | 0.673512 | 13,465 | 0.927853 | 0 | 0 | 0 | 0 | 0 | 0 | 5,272 | 0.363286 |
c5f002e8316318f1277879acd723602d8667b472 | 623 | py | Python | conf_site/api/tests/test_conference.py | pydata/conf_site | 9a14459c36a0f2f0f552161c18de734ee87855da | [
"MIT"
] | 13 | 2015-05-22T17:10:22.000Z | 2021-07-15T16:45:19.000Z | conf_site/api/tests/test_conference.py | pydata/conf_site | 9a14459c36a0f2f0f552161c18de734ee87855da | [
"MIT"
] | 758 | 2015-03-18T13:39:25.000Z | 2022-03-31T13:14:09.000Z | conf_site/api/tests/test_conference.py | pydata/conf_site | 9a14459c36a0f2f0f552161c18de734ee87855da | [
"MIT"
] | 16 | 2015-03-24T18:53:17.000Z | 2020-10-22T21:30:02.000Z | from django.urls import reverse
from rest_framework import status
from conf_site.api.tests import ConferenceSiteAPITestCase
class ConferenceSiteAPIConferenceTestCase(ConferenceSiteAPITestCase):
def test_conference_api_anonymous_user(self):
response = self.client.get(reverse("conference-detail"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
"title": self.conference.title,
"start_date": self.conference.start_date.strftime("%Y-%m-%d"),
"end_date": self.conference.end_date.strftime("%Y-%m-%d"),
})
| 36.647059 | 74 | 0.717496 | 494 | 0.792937 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.109149 |
c5f1690fb5bdf9e0c170536780d8205e10259b5d | 94 | py | Python | backend/app/app/crud/__init__.py | luovkle/FastAPI-Note-Taking | 317d92e75cbba3a6e633d6cf3d0bed0021412967 | [
"MIT"
] | null | null | null | backend/app/app/crud/__init__.py | luovkle/FastAPI-Note-Taking | 317d92e75cbba3a6e633d6cf3d0bed0021412967 | [
"MIT"
] | null | null | null | backend/app/app/crud/__init__.py | luovkle/FastAPI-Note-Taking | 317d92e75cbba3a6e633d6cf3d0bed0021412967 | [
"MIT"
] | null | null | null | from .crud_user import crud_user # noqa: F401
from .crud_note import crud_note # noqa: F401
| 31.333333 | 46 | 0.765957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.255319 |
c5f1d3547439c81f976d6e6a8b962ce796bdbe68 | 125 | py | Python | fhirbug/constants.py | VerdantAI/fhirbug | 8a8e2555c0edfeee0a7edbc8d67f2fcb2edd3c2d | [
"MIT"
] | 8 | 2019-01-06T18:11:20.000Z | 2022-02-24T02:06:55.000Z | fhirbug/constants.py | VerdantAI/fhirbug | 8a8e2555c0edfeee0a7edbc8d67f2fcb2edd3c2d | [
"MIT"
] | 5 | 2019-01-25T14:15:35.000Z | 2021-06-01T23:22:41.000Z | fhirbug/constants.py | VerdantAI/fhirbug | 8a8e2555c0edfeee0a7edbc8d67f2fcb2edd3c2d | [
"MIT"
] | 3 | 2020-10-14T23:09:29.000Z | 2021-08-09T19:27:31.000Z |
# Audit Event Outcomes
AUDIT_SUCCESS = "0"
AUDIT_MINOR_FAILURE = "4"
AUDIT_SERIOUS_FAILURE = "8"
AUDIT_MAJOR_FAILURE = "12"
| 17.857143 | 27 | 0.76 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.28 |
c5f2f4c298f27619335b8d96d680e00a6c2bd3c9 | 2,653 | py | Python | python_materials/web-server-s3.py | C-Lizzo14/CS488S21 | 7b8e1ab2dfe692aff9c9dd6c283a4ba378cfc1e6 | [
"MIT"
] | 2 | 2022-01-25T13:45:30.000Z | 2022-02-07T18:21:20.000Z | python_materials/web-server-s3.py | jyuan2pace/CS488OER | 38ab9f1d10bf1e36af6eb79574893c83b2e6456f | [
"CC0-1.0"
] | null | null | null | python_materials/web-server-s3.py | jyuan2pace/CS488OER | 38ab9f1d10bf1e36af6eb79574893c83b2e6456f | [
"CC0-1.0"
] | 2 | 2022-02-03T22:33:45.000Z | 2022-03-03T05:18:50.000Z | # Python3.7+
import socket
import json
HOST, PORT = '', 1600
def parse_request(text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip(b'\r\n')
requests = request_line.split()
params_dict = {}
if requests[0] == b'POST':
request_body = text.splitlines()[-1]
request_body = request_body.rstrip(b'\r\n')
params_list = request_body.split(b'&')
for pair in params_list:
print(pair)
(key, value)=pair.split(b'=')
params_dict[key]=value
# Break down the request line into components
requests = requests + [params_dict]
return requests
def handle_login(params_dict):
with open("userdb.json", 'r') as result_f:
creds=json.load(result_f)
username=params_dict[b'Uname']
userpasswd=params_dict[b'Pass']
if creds[username.decode("utf-8")] == userpasswd.decode("utf-8"):
return True
else:
return False
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((HOST, PORT))
listen_socket.listen(1)
print(f'Serving HTTP on port {PORT} ...')
while True:
client_connection, client_address = listen_socket.accept()
request_data = client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
f'< {line}\n' for line in request_data.splitlines()
))
requests = parse_request(request_data)
print(requests)
if requests[0] == b'GET':
http_response = b"""\
HTTP/1.1 200 OK
<!DOCTYPE html>
<html>
<head>
<title>Login Form</title>
<link rel="icon" href="data:,">
</head>
<body>
<h2>Login Page</h2><br>
<div class="login">
<form id="login" method="post" action="?">
<label><b>User Name
</b>
</label>
<input type="text" name="Uname" id="Uname" placeholder="Username">
<br><br>
<label><b>Password
</b>
</label>
<input type="Password" name="Pass" id="Pass" placeholder="Password">
<br><br>
<button type="submit" name="log" id="log" value="submitted"> Submit </button>
<br><br>
</form>
</div>
</body>
</html>
"""
else:
if handle_login(requests[3]):
http_response = b"""\
HTTP/1.1 200 OK
SUCCESS
"""
else:
http_response = b"""\
HTTP/1.1 200 OK
FAILED TO LOG IN
"""
client_connection.sendall(http_response)
client_connection.close()
| 27.635417 | 92 | 0.583867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,102 | 0.415379 |
c5f32783266b0ae2f58b621ca0211597718a479a | 323 | py | Python | task_queue/management/commands/run_scheduler.py | 2600box/harvest | 57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd | [
"Apache-2.0"
] | 9 | 2019-03-26T14:50:00.000Z | 2020-11-10T16:44:08.000Z | task_queue/management/commands/run_scheduler.py | 2600box/harvest | 57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd | [
"Apache-2.0"
] | 22 | 2019-03-02T23:16:13.000Z | 2022-02-27T10:36:36.000Z | task_queue/management/commands/run_scheduler.py | 2600box/harvest | 57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd | [
"Apache-2.0"
] | 5 | 2019-04-24T00:51:30.000Z | 2020-11-06T18:31:49.000Z | import asyncio
from django.core.management.base import BaseCommand
from Harvest.utils import get_logger
from task_queue.scheduler import QueueScheduler
logger = get_logger(__name__)
class Command(BaseCommand):
help = "Run the queue consumer"
def handle(self, *args, **options):
QueueScheduler().run()
| 20.1875 | 51 | 0.755418 | 135 | 0.417957 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.074303 |
c5f3bfd369153aa479d911c36ea48a47b9a4d56f | 485 | py | Python | waateax/users/migrations/0004_auto_20200910_1516.py | hendu25/waatea | 668707cd14d3336cd74d7043473f7094f5d0db6e | [
"MIT"
] | 4 | 2020-09-10T11:51:48.000Z | 2021-08-18T21:25:22.000Z | waateax/users/migrations/0004_auto_20200910_1516.py | hendu25/waatea | 668707cd14d3336cd74d7043473f7094f5d0db6e | [
"MIT"
] | 22 | 2020-09-10T11:14:22.000Z | 2021-08-24T14:58:58.000Z | waateax/users/migrations/0004_auto_20200910_1516.py | hendu25/waatea | 668707cd14d3336cd74d7043473f7094f5d0db6e | [
"MIT"
] | 3 | 2020-09-13T06:37:14.000Z | 2021-08-17T13:51:23.000Z | # Generated by Django 3.0.10 on 2020-09-10 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_user_mobile_phone'),
]
operations = [
migrations.AlterField(
model_name='user',
name='mobile_phone',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='Mobile phone number'),
preserve_default=False,
),
]
| 24.25 | 111 | 0.618557 | 391 | 0.806186 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.251546 |
c5f43a8aa085560b1c95a6c088b9ec19d6141f65 | 2,005 | py | Python | examples/example2.py | alenaizan/resp | 41c58f465f9f3c00225dba9dfa54e5269a970240 | [
"BSD-3-Clause"
] | 10 | 2019-03-15T20:33:02.000Z | 2021-12-15T02:05:28.000Z | examples/example2.py | alenaizan/resp | 41c58f465f9f3c00225dba9dfa54e5269a970240 | [
"BSD-3-Clause"
] | 18 | 2018-06-13T04:10:39.000Z | 2022-03-18T08:17:33.000Z | examples/example2.py | alenaizan/resp | 41c58f465f9f3c00225dba9dfa54e5269a970240 | [
"BSD-3-Clause"
] | 5 | 2018-06-13T02:57:28.000Z | 2021-06-08T15:43:15.000Z | import psi4
import resp
# Initialize two different conformations of ethanol
geometry = """C 0.00000000 0.00000000 0.00000000
C 1.48805540 -0.00728176 0.39653260
O 2.04971655 1.37648153 0.25604810
H 3.06429978 1.37151670 0.52641124
H 1.58679428 -0.33618761 1.43102358
H 2.03441010 -0.68906454 -0.25521028
H -0.40814044 -1.00553466 0.10208540
H -0.54635470 0.68178278 0.65174288
H -0.09873888 0.32890585 -1.03449097
"""
mol1 = psi4.geometry(geometry)
mol1.update_geometry()
mol1.set_name('conformer1')
geometry = """C 0.00000000 0.00000000 0.00000000
C 1.48013500 -0.00724300 0.39442200
O 2.00696300 1.29224100 0.26232800
H 2.91547900 1.25572900 0.50972300
H 1.61500700 -0.32678000 1.45587700
H 2.07197500 -0.68695100 -0.26493400
H -0.32500012 1.02293415 -0.30034094
H -0.18892141 -0.68463906 -0.85893815
H -0.64257065 -0.32709111 0.84987482
"""
mol2 = psi4.geometry(geometry)
mol2.update_geometry()
mol2.set_name('conformer2')
molecules = [mol1, mol2]
# Specify options
options = {'VDW_SCALE_FACTORS' : [1.4, 1.6, 1.8, 2.0],
'VDW_POINT_DENSITY' : 1.0,
'RESP_A' : 0.0005,
'RESP_B' : 0.1,
'RESTRAINT' : True,
'IHFREE' : False,
'WEIGHT' : [1, 1],
}
# Call for first stage fit
charges1 = resp.resp(molecules, options)
print("Restrained Electrostatic Potential Charges")
print(charges1[1])
options['RESP_A'] = 0.001
resp.set_stage2_constraint(molecules[0], charges1[1], options)
# Add constraint for atoms fixed in second stage fit
options['grid'] = []
options['esp'] = []
for mol in range(len(molecules)):
options['grid'].append('%i_%s_grid.dat' %(mol+1, molecules[mol].name()))
options['esp'].append('%i_%s_grid_esp.dat' %(mol+1, molecules[mol].name()))
# Call for second stage fit
charges2 = resp.resp(molecules, options)
print("\nStage Two\n")
print("RESP Charges")
print(charges2[1])
| 30.378788 | 79 | 0.653367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,149 | 0.573067 |
c5f4f68fa80ed3db7ce10e3d2a40e3db67dcd198 | 627 | py | Python | data_processing/test.py | FMsunyh/keras-retinanet | cb86a987237d3f6bd504004e2b186cf65606c890 | [
"Apache-2.0"
] | null | null | null | data_processing/test.py | FMsunyh/keras-retinanet | cb86a987237d3f6bd504004e2b186cf65606c890 | [
"Apache-2.0"
] | null | null | null | data_processing/test.py | FMsunyh/keras-retinanet | cb86a987237d3f6bd504004e2b186cf65606c890 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 5/31/2018 9:20 PM
# @Author : sunyonghai
# @File : test.py
# @Software: ZJ_AI
from multiprocessing import Pool, Lock, Value
import os
tests_count = 80
lock = Lock()
counter = Value('i', 0) # int type,相当于java里面的原子变量
def run(fn):
global tests_count, lock, counter
with lock:
counter.value += 1
print( 'NO. (%d/%d) test start. PID: %d ' % (counter.value, tests_count, os.getpid()))
# do something below ...
if __name__ == "__main__":
pool = Pool(4)
# 80个任务,会运行run()80次,每次传入xrange数组一个元素
pool.map(run, range(80))
pool.close()
pool.join() | 20.225806 | 90 | 0.61244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 306 | 0.445415 |
c5f68e1534f103f39e915aa377cb7ad3e18fa265 | 2,280 | py | Python | neurofire/models/hed/fusionhed.py | nasimrahaman/neurofire | 4d645be149165da4c8202fe13b3c2360c2832383 | [
"MIT"
] | 9 | 2018-01-29T07:30:14.000Z | 2022-03-09T04:23:14.000Z | neurofire/models/hed/fusionhed.py | nasimrahaman/neurofire | 4d645be149165da4c8202fe13b3c2360c2832383 | [
"MIT"
] | 1 | 2017-12-19T14:27:18.000Z | 2017-12-19T15:35:59.000Z | neurofire/models/hed/fusionhed.py | nasimrahaman/neurofire | 4d645be149165da4c8202fe13b3c2360c2832383 | [
"MIT"
] | 5 | 2018-01-27T12:16:37.000Z | 2020-01-20T13:14:26.000Z | import torch.nn as nn
import torch
import torch.nn.functional as F
from .hed import HED
class FusionHED(nn.Module):
def __init__(self, in_channels=3,
out_channels=1, dilation=1,
conv_type_key='default',
block_type_key='default',
output_type_key='default',
upsampling_type_key='default'):
super(FusionHED, self).__init__()
self.out_channels = out_channels
self.hed1 = HED(in_channels=in_channels, out_channels=out_channels, dilation=dilation,
conv_type_key=conv_type_key, block_type_key=block_type_key,
output_type_key=output_type_key, upsampling_type_key=upsampling_type_key)
self.hed2 = HED(in_channels=in_channels, out_channels=out_channels, dilation=dilation,
conv_type_key=conv_type_key, block_type_key=block_type_key,
output_type_key=output_type_key, upsampling_type_key=upsampling_type_key)
self.hed3 = HED(in_channels=in_channels, out_channels=out_channels, dilation=dilation,
conv_type_key=conv_type_key, block_type_key=block_type_key,
output_type_key=output_type_key, upsampling_type_key=upsampling_type_key)
self.upscale = nn.UpsamplingBilinear2d(scale_factor=2)
self.downscale = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.fusion = nn.Conv2d(3*out_channels, out_channels, 1)
def forward(self, x):
ds = self.downscale
us = self.upscale
# upscaled branch
d11, d12, d13, d14, d15, d16 = self.hed1(self.upscale(x))
d11, d12, d13, d14, d15, d16 = ds(d11), ds(d12), ds(d13), ds(d14), ds(d15), ds(d16)
# normal branch
d21, d22, d23, d24, d25, d26 = self.hed2(x)
# downscaled branch
d31, d32, d33, d34, d35, d36 = self.hed3(ds(x))
d31, d32, d33, d34, d35, d36 = us(d31), us(d32), us(d33), us(d34), us(d35), us(d36)
d_final = self.fusion(torch.cat((d16, d26, d36), 1))
self.output = F.sigmoid(d_final)
return (d11, d12, d13, d14, d15, d16,
d21, d22, d23, d24, d25, d26,
d31, d32, d33, d34, d35, d36,
self.output)
| 43.846154 | 97 | 0.617982 | 2,189 | 0.960088 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.038158 |
c5f77d1a3cca856b474207ec3945b97a6cf71f6d | 303 | py | Python | examples/filter_simple.py | uvm-plaid/dduo-python | 368e1aa47a913f7adcf1e5db1e4a329a26d8d7ba | [
"MIT"
] | 4 | 2021-06-13T20:00:34.000Z | 2022-01-03T18:17:49.000Z | examples/filter_simple.py | uvm-plaid/dduo-python | 368e1aa47a913f7adcf1e5db1e4a329a26d8d7ba | [
"MIT"
] | null | null | null | examples/filter_simple.py | uvm-plaid/dduo-python | 368e1aa47a913f7adcf1e5db1e4a329a26d8d7ba | [
"MIT"
] | null | null | null | import sys
sys.path.append("../")
import duet
from duet import pandas as pd
epsilon = 1.0
alpha = 10
df = pd.read_csv("test.csv")
with duet.RenyiFilter(9,1.0):
noisy_count = duet.renyi_gauss(df.shape[0], α = alpha, ε = epsilon)
print(f'NoisyCount : {noisy_count}')
duet.print_privacy_cost()
| 18.9375 | 71 | 0.693069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.144262 |
c5fad6b1fa96e688327b68e8eed1828289790bab | 473 | py | Python | tartaruga espiral.py | talitadeoa/head-first-code | 3f89e5227f0996ed83832c6b848a77a4908e6168 | [
"MIT"
] | null | null | null | tartaruga espiral.py | talitadeoa/head-first-code | 3f89e5227f0996ed83832c6b848a77a4908e6168 | [
"MIT"
] | null | null | null | tartaruga espiral.py | talitadeoa/head-first-code | 3f89e5227f0996ed83832c6b848a77a4908e6168 | [
"MIT"
] | null | null | null | import turtle
tortuguinha = turtle.Turtle()
tortuguinha.shape('turtle')
tortuguinha.color('red')
tortugo = turtle.Turtle()
tortugo.shape('turtle')
tortugo.color('blue')
def faz_quadradin(the_turtle):
for i in range(0,4):
the_turtle.forward(100)
the_turtle.right(90)
def faz_espiral(the_turtle):
for i in range(0,36):
faz_quadradin(the_turtle)
the_turtle.right(10)
faz_espiral(tortuguinha)
tortugo.right(5)
faz_espiral(tortugo)
| 19.708333 | 33 | 0.710359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.057082 |
c5fbc71da36428911d857cb7dc5361b3f8ead4b6 | 3,375 | py | Python | rechun/dl/multimodelcontext.py | alainjungo/reliability-challenges-uncertainty | 21e86f6e2a5d2520b5767dce48bbcf2b11773788 | [
"MIT"
] | 56 | 2019-07-10T06:02:11.000Z | 2021-12-21T08:11:22.000Z | rechun/dl/multimodelcontext.py | alainjungo/reliability-challenges-uncertainty | 21e86f6e2a5d2520b5767dce48bbcf2b11773788 | [
"MIT"
] | 4 | 2019-09-26T08:51:58.000Z | 2021-06-08T20:27:53.000Z | rechun/dl/multimodelcontext.py | alainjungo/reliability-challenges-uncertainty | 21e86f6e2a5d2520b5767dce48bbcf2b11773788 | [
"MIT"
] | 8 | 2019-10-21T12:43:08.000Z | 2021-12-02T08:14:38.000Z | import torch
import common.trainloop.context as ctx
import common.trainloop.factory as factory
import common.model.management as mgt
import common.utils.torchhelper as th
class MultiModelTorchTrainContext(ctx.TorchTrainContext):
def __init__(self, device_str) -> None:
super().__init__(device_str)
self.additional_models = {}
self.additional_optimizers = {}
def load_from_new(self):
super().load_from_new() # retrieval of the first/standard model
if not hasattr(self.config.others, 'model_names'):
raise ValueError('model_names entry missing in others section of configuration')
if not hasattr(self.config.others, 'additional_models'):
raise ValueError('additional_models entry missing in others section of configuration')
if not hasattr(self.config.others, 'additional_optimizers'):
raise ValueError('additional_optimizers entry missing in others section of configuration')
for i, name in enumerate(self.config.others.model_names):
model = factory.get_model(self.config.others.additional_models[i])
model = self._multi_gpu_if_available(model)
model.to(self.device)
self.additional_models[name] = model
optimizer = factory.get_optimizer(model.parameters(), self.config.others.additional_optimizers[i])
self.additional_optimizers[name] = optimizer
mgt.model_service.backup_model_parameters(self.model_files.model_path(postfix=name),
self.config.model.to_dictable_parameter(),
self.config.optimizer.to_dictable_parameter())
def save_to_checkpoint(self, epoch: int, is_best=False):
super().save_to_checkpoint(epoch, is_best)
for name in self.additional_models:
checkpoint_path = self.model_files.build_checkpoint_path(epoch, is_best=is_best, postfix=name)
mgt.model_service.save_checkpoint(checkpoint_path, epoch, self.additional_models[name],
self.additional_optimizers[name])
def load_from_checkpoint(self, epoch):
super().load_from_checkpoint(epoch)
for name in self.additional_models:
# build, since we know it is a int epoch
checkpoint_path = self.model_files.build_checkpoint_path(epoch, postfix=name)
model, optimizer = mgt.model_service.load_model_from_parameters(self.model_files.model_path(postfix=name),
with_optimizer=True)
mgt.model_service.load_checkpoint(checkpoint_path, model, optimizer)
model = self._multi_gpu_if_available(model)
self.additional_models[name] = model.to(self.device)
self.additional_optimizers[name] = th.optimizer_to_device(optimizer, self.device)
def set_mode(self, is_train: bool) -> None:
self.is_train = is_train
if self.is_train:
self.model.train()
for model in self.additional_models.values():
model.train()
else:
self.model.eval()
for model in self.additional_models.values():
model.eval()
torch.set_grad_enabled(self.is_train)
| 43.831169 | 118 | 0.651259 | 3,200 | 0.948148 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.099556 |
c5fc1fd40bd0f609dfec322f4db82f5ebd97d46e | 704 | py | Python | ALGOs/Perceptron/logic_gates.py | iamharshit/ML_works | b0bb53c5a60312719d15e25b727c54cbab65b4af | [
"MIT"
] | 1 | 2016-11-29T04:28:09.000Z | 2016-11-29T04:28:09.000Z | ALGOs/Perceptron/logic_gates.py | iamharshit/ML_works | b0bb53c5a60312719d15e25b727c54cbab65b4af | [
"MIT"
] | null | null | null | ALGOs/Perceptron/logic_gates.py | iamharshit/ML_works | b0bb53c5a60312719d15e25b727c54cbab65b4af | [
"MIT"
] | null | null | null | #The input to the gate can only be 0 or 1
'''
Single Layer Perceptrons
'''
def AND_perceptron(x1,x2):
w1, w2, t = 1, 1, 2
return w1*x1 + w2*x2 >=t
def OR_perceptron(x1,x2):
w1, w2, t = 1, 1, 1
return w1*x1 + w2*x2 >=t
def AND_perceptron(x1):
w1, t = -1, 0
return w1*x1 >=t
'''
Multi Layer Perceptrons
'''
def XOR_perceptron(x1,x2):
w1, w2, t = 1, 1, 0.5
h_1_1 = (w1*x1 + w2*x2 >=t) #layer:1 node:1
w1, w2, t = -1, -1, -1.5
h_1_2 = (w1*x1 + w2*x2 >=t) #layer:1 node:2
w1, w2, t = 1, 1, 1.5
return w1*h_1_1 + w2*h_1_2 >=t #layer:2 or output layer
print XOR_perceptron(0,0)
print XOR_perceptron(0,1)
print XOR_perceptron(1,0)
print XOR_perceptron(1,1) | 20.705882 | 58 | 0.59233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.235795 |
c5fce436240d3d6bd829247715a353298fbb9868 | 19,916 | py | Python | Analysis/Metric_Impact_Hijacking/give_metric_ases_from_clusters.py | cgeorgitsis/ai4netmon | 36c4c1695fd980705d3e3f76385cda14baf7f397 | [
"MIT"
] | null | null | null | Analysis/Metric_Impact_Hijacking/give_metric_ases_from_clusters.py | cgeorgitsis/ai4netmon | 36c4c1695fd980705d3e3f76385cda14baf7f397 | [
"MIT"
] | null | null | null | Analysis/Metric_Impact_Hijacking/give_metric_ases_from_clusters.py | cgeorgitsis/ai4netmon | 36c4c1695fd980705d3e3f76385cda14baf7f397 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import random, os, json
from collections import defaultdict
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans, SpectralClustering
from scipy.spatial.distance import pdist, squareform
from scipy.sparse import csgraph
from numpy import linalg as LA
from sklearn.metrics import silhouette_score
PROXIMITY_FNAME = 'selected_ripe_ris__monitors_from_pathlens_100k.json'
def get_argmax_total_similarity(similarity_matrix, from_items=None, rank_normalization=False):
'''
Finds the item of a matrix (similarity_matrix) that has the maximum aggregate similarity to all other items.
If the "from_items" is not None, then only the rows/columns of the matrix in the from_items list are taken into account.
:param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to
represent the similarity between item of row i and column j
:param from_items: (list/set) a subset of the items (rows/columns) from which the item with the max similiarity will be selected
:param rank_normalization: (boolean) whether to modify the similarity matrix giving more emphasis to most similar values per row
by dividing each element with the rank it appears in the sorted list of values of the row
e.g., a_row = [0.5, 0.3, 0.4] --> modified_row = [0.5/1, 0.3/3, 0.4/2] = [0.5, 0.1, 0.2]
e.g., a_row = [0.1, 0.1, 0.4] --> modified_row = [0.1/2, 0.1/3, 0.4/1] = [0.05, 0.033, 0.4]
:return: (scalar, e.g., str or int) the index of the item in the dataframe that has the max total similarity
'''
if from_items is None:
df = similarity_matrix.copy()
else:
df = similarity_matrix.loc[from_items, from_items].copy()
np.fill_diagonal(df.values, np.nan) # set self-similarity to nan so that it is not taken into account
if rank_normalization:
for p1 in df.index:
sorted_indexes = list(df.loc[p1, :].sort_values(ascending=False).index)
df.loc[p1, sorted_indexes] = df.loc[p1, sorted_indexes] * [1.0 / i for i in range(1, 1 + df.shape[0])]
sum_similarities = np.nansum(df, axis=1)
if np.max(sum_similarities) == 0: # all similarities are nan or zero
next_item = random.sample(from_items, 1)[0]
else:
next_item = df.index[np.argmax(sum_similarities)]
return next_item
def greedy_most_similar_elimination(similarity_matrix, rank_normalization=False):
'''
Selects iteratively the item in the given similarity_matrix that has the maximum aggregate similarity to all other items. At each iteration,
only the similarities among the non-selected items are taken into account. At each iteration, the selected item is placed in the beginning of
a list. At the end, this list is returned. Example: returned_list = [item_selected_last, ..., item_selected_first]
:param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to
represent the similarity between item of row i and column j
:param rank_normalization: (boolean) whether to modify the similarity matrix giving more emphasis to most similar values per row
:return: (list) a list of ordered items (from the input's index); the first item is the least similar
'''
selected_items = []
for i in range(similarity_matrix.shape[0]):
from_items = list(set(similarity_matrix.index) - set(selected_items))
next_item = get_argmax_total_similarity(similarity_matrix, from_items=from_items,
rank_normalization=rank_normalization)
selected_items.insert(0, next_item)
return selected_items
def get_argmin_total_similarity(similarity_matrix, from_items=None):
'''
Finds the item of a matrix (similarity_matrix) that has the minimum aggregate similarity to all other items.
If the "from_items" is not None, then only the (a) rows of the matrix in the from_items list and (b) the columns
of the matrix NOT in the from_items list are taken into account.
:param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to
represent the similarity between item of row i and column j
:param from_items: (list/set) a subset of the items (rows/columns) from which the item with the min similiarity will be selected
:return: (scalar, e.g., str or int) the index of the item in the dataframe that has the min total similarity
'''
df = similarity_matrix.copy()
np.fill_diagonal(df.values, np.nan) # set self-similarity to nan so that it is not taken into account
if from_items is not None:
other_items = list(set(df.index) - set(from_items))
df = df.loc[from_items, other_items]
sum_similarities = np.nansum(df, axis=1)
if np.max(sum_similarities) == 0: # all similarities are nan or zero
next_item = random.sample(from_items, 1)[0]
else:
next_item = df.index[np.argmin(sum_similarities)]
return next_item
def greedy_least_similar_selection(similarity_matrix, nb_items=None):
'''
Selects iteratively the item in the given similarity_matrix that has the minimum aggregate similarity to all other items. At each iteration,
only the similarities among the non-selected items and the already selected items are taken into account. At each iteration, the selected item is
placed in the end of a list. At the end, this list is returned. Example: returned_list = [item_selected_first, ..., item_selected_last]
:param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to
represent the similarity between item of row i and column j
:param nb_items: (int) number of items to be selected; if None all items are selected in the returned list
:return: (list) a list of ordered items (from the input's index); the first item is the least similar
'''
selected_items = []
nb_total_items = similarity_matrix.shape[0]
if (nb_items is None) or (nb_items > nb_total_items):
nb_items = nb_total_items
for i in range(nb_items):
if len(selected_items) == 0:
from_items = None
else:
from_items = list(set(similarity_matrix.index) - set(selected_items))
next_item = get_argmin_total_similarity(similarity_matrix, from_items=from_items)
selected_items.append(next_item)
return selected_items
def sample_from_clusters(cluster_members_dict, nb_items=None):
'''
Samples items from the clusters, starting from a random item in the largest cluster, then a random item in the second largest cluster, and so on.
When elements of all clusters are selected, then starts again from the largest cluster, until all items (or up to nb_items) are selected.
:param cluster_members_dict: (dict of lists) dict of the form {cluster label: list of members of the cluster}
:param nb_items: (int) number of items to be selected; if None all items are selected in the returned list
:return: (list) a list of ordered items that are the samples from clusters
'''
nb_clusters = len(cluster_members_dict.keys())
nb_all_items = sum([len(v) for v in cluster_members_dict.values()])
if (nb_items is None) or (nb_items > nb_all_items):
nb_items = nb_all_items
sorted_clusters = sorted(cluster_members_dict, key=lambda k: len(cluster_members_dict.get(k)), reverse=True)
selected_items = []
for i in range(nb_items):
ind = i % nb_clusters # iterate over the sorted_clusters by getting the index of the current cluster
current_cluster = sorted_clusters[ind]
len_current_cluster = len(cluster_members_dict[current_cluster])
if len_current_cluster > 0:
next_item_ind = random.sample(range(len_current_cluster), 1)[0]
next_item = cluster_members_dict[current_cluster].pop(next_item_ind)
selected_items.append(next_item)
i += 1
return selected_items
def random_selection(similarity_matrix, nb_items=None):
"""
Selects randomly an item from the given similarity_matrix
:param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to
represent the similarity between item of row i and column j
:param nb_items: (int) number of items to be selected; if None all items are selected in the returned list
:return: (list) a list of random items
"""
selected_items = []
nb_total_items = similarity_matrix.shape[0]
if (nb_items is None) or (nb_items > nb_total_items):
nb_items = nb_total_items
for i in range(nb_items):
temp = random.sample(list(similarity_matrix), 1)[0]
selected_items.append(temp)
return selected_items
def getAffinityMatrix(coordinates, k=7):
"""
The Affinity matrix determines how close or similar are 2 points in our space.
Calculate affinity matrix based on input coordinates matrix and the number
of nearest neighbours.
Apply local scaling based on the k nearest neighbour
References:
https://papers.nips.cc/paper/2619-self-tuning-spectral-clustering.pdf
"""
# calculate euclidian distance matrix
dists = squareform(pdist(coordinates))
# for each row, sort the distances ascendingly and take the index of the
# k-th position (nearest neighbour)
knn_distances = np.sort(dists, axis=0)[k]
knn_distances = knn_distances[np.newaxis].T
# calculate sigma_i * sigma_j
local_scale = knn_distances.dot(knn_distances.T)
affinity_matrix = dists * dists
affinity_matrix = -affinity_matrix / local_scale
# divide square distance matrix by local scale
affinity_matrix[np.where(np.isnan(affinity_matrix))] = 0.0
# apply exponential
affinity_matrix = np.exp(affinity_matrix)
np.fill_diagonal(affinity_matrix, 0)
return affinity_matrix
def eigenDecomposition(A, topK=5):
"""
:param A: Affinity matrix
:param topK: Top k
:return A tuple containing:
- the optimal number of clusters by eigengap heuristic
- all eigen values
- all eigen vectors
This method performs the eigen decomposition on a given affinity matrix,
following the steps recommended in the paper:
1. Construct the normalized affinity matrix: L = D−1/2ADˆ −1/2.
2. Find the eigenvalues and their associated eigen vectors
3. Identify the maximum gap which corresponds to the number of clusters
by eigengap heuristic
References:
https://papers.nips.cc/paper/2619-self-tuning-spectral-clustering.pdf
http://www.kyb.mpg.de/fileadmin/user_upload/files/publications/attachments/Luxburg07_tutorial_4488%5b0%5d.pdf
"""
L = csgraph.laplacian(A, normed=True)
n_components = A.shape[0]
# LM parameter : Eigenvalues with largest magnitude (eigs, eigsh), that is, largest eigenvalues in
# the euclidean norm of complex numbers.
# eigenvalues, eigenvectors = eigsh(L, k=n_components, which="LM", sigma=1.0, maxiter=5000)
eigenvalues, eigenvectors = LA.eig(L)
plt.title('Largest eigen values of input matrix')
plt.scatter(np.arange(len(eigenvalues)), eigenvalues)
plt.grid()
plt.show()
# Identify the optimal number of clusters as the index corresponding
# to the larger gap between eigen values
index_largest_gap = np.argsort(np.diff(eigenvalues))[::-1][:topK]
nb_clusters = index_largest_gap + 1
return nb_clusters, eigenvalues, eigenvectors
def get_optimal_number_of_clusters(similarity):
'''
A function that help us identify which is the optimal number of cluster for Kmeans
:param similarity: The similarity matrix from graph embeddings
'''
distortions = []
for i in range(1, 20):
clustering = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=1e-04, random_state=0).fit(
similarity)
distortions.append(clustering.inertia_)
plt.plot(range(1, 20), distortions, marker='o')
plt.xlabel('Number of clusters (k)')
plt.ylabel('Sum of squared distance')
plt.title("Elbow Method for Optimal k")
plt.show()
def get_plot_for_different_k_values(similarity, model_name):
"""
This function plots points after applying a cluster method for k=3,4,5,6. Furthermore prints silhouette score for each k
:param similarity: Contains our dataset (The similarity of RIPE monitors)
:return: A list containing silhouette score
"""
silhouette_scores = []
f = plt.figure()
f.add_subplot(2, 2, 1)
for i in range(3, 7):
if model_name == 'Spectral':
sc = SpectralClustering(n_clusters=i, affinity='precomputed').fit(similarity)
else:
sc = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=1e-04, random_state=0).fit(similarity)
silhouette_scores.append(silhouette_score(similarity, sc.labels_))
f.add_subplot(2, 2, i - 2)
plt.scatter(similarity[:, 0], similarity[:, 1], s=5, c=sc.labels_, label="n_cluster-" + str(i))
plt.legend()
plt.show()
return silhouette_scores
def plot_silhouette_score_for_various_k(similarity, model_name):
"""
In this function we plot the silhouette score for various number of K (number of clusters)
:param similarity: Contains our dataset (The similarity of RIPE monitors)
:param model_name: The clustering algorithm we use (K-means or SpectralClustering)
"""
sil = []
for i in range(2, 21):
if model_name == 'Spectral':
sc = SpectralClustering(n_clusters=i, affinity='precomputed').fit(similarity)
else:
sc = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=1e-04, random_state=0).fit(similarity)
sil.append(silhouette_score(similarity, sc.labels_))
plt.plot(range(2, 21), sil[:], '--bo')
plt.title('Silhouette score for different cluster sizes for ' + str(model_name))
plt.xlabel('Silhouette Score')
plt.ylabel('Number of clusters (K)')
plt.show()
def clustering_based_selection(similarity_matrix, clustering_method, nb_clusters, nb_items=None, **kwargs):
'''
Applies a clustering algorithm to the similarity matrix to cluster items, and then selects samples from the classes.
:param similarity_matrix: (pandas.DataFrame) an NxN dataframe; should be (a) symmetric and (b) values {i,j} to
represent the similarity between item of row i and column j
:param clustering_method: (str) 'SpectralClustering' or 'Kmeans'
:param nb_clusters: (int) number of clusters
:param nb_items: (int) number of items to be selected; if None all items are selected in the returned list
:param **kwargs: (dict) optional kwargs for the clustering algorithms
:return: (list) a list of ordered items that are the samples from clusters
'''
sim = similarity_matrix.to_numpy()
sim = np.nan_to_num(sim, nan=0)
if clustering_method == 'SpectralClustering':
clustering = getAffinityMatrix(sim, k=7)
k, eigenvalues, eigenvectors = eigenDecomposition(sim)
clustering = SpectralClustering(n_clusters=nb_clusters, affinity='precomputed', **kwargs).fit(sim)
labels = clustering.labels_
plt.scatter(sim[:, 0], sim[:, 1], c=labels)
plt.title('Number of Clusters: ' + str(nb_clusters))
plt.show()
model = 'Spectral'
silhouette_scores = get_plot_for_different_k_values(sim, model)
# print(silhouette_scores)
# print(f'Optimal number of clusters {k}')
plot_silhouette_score_for_various_k(sim, model)
elif clustering_method == 'Kmeans':
get_optimal_number_of_clusters(sim)
clustering = KMeans(n_clusters=nb_clusters, **kwargs).fit(sim)
labels = clustering.labels_
plt.scatter(sim[:, 0], sim[:, 1], c=labels)
plt.title('Number of Clusters: ' + str(nb_clusters))
plt.show()
model = 'Kmeans'
silhouette_scores = get_plot_for_different_k_values(sim, model)
# print(silhouette_scores)
plot_silhouette_score_for_various_k(sim, model)
else:
raise ValueError
cluster_members_dict = defaultdict(list)
for i, label in enumerate(clustering.labels_):
cluster_members_dict[label].append(similarity_matrix.index[i])
return sample_from_clusters(cluster_members_dict, nb_items=nb_items)
def select_from_similarity_matrix(similarity_matrix, method, **kwargs):
if method == 'Greedy min':
selected_items = greedy_most_similar_elimination(similarity_matrix, **kwargs)
elif method == 'Greedy max':
selected_items = greedy_least_similar_selection(similarity_matrix, **kwargs)
elif method == 'Clustering':
selected_items = clustering_based_selection(similarity_matrix, **kwargs)
elif method == 'Random':
selected_items = random_selection(similarity_matrix, **kwargs)
else:
raise ValueError
return selected_items
def return_the_selected_monitors_from_methods():
similarity_matrix = pd.read_csv('ALL_RIPE_RIS_withASns_similarity_embeddings_BGP2VEC_20210107.csv',
header=0, index_col=0)
similarity_matrix.columns = similarity_matrix.columns.astype(float)
selected_items_greedy_random = select_from_similarity_matrix(similarity_matrix, 'Random')
selected_items_greedy_min = select_from_similarity_matrix(similarity_matrix, 'Greedy min')
selected_items_greedy_max = select_from_similarity_matrix(similarity_matrix, 'Greedy max')
kwargs = {'clustering_method': 'Kmeans', 'nb_clusters': 10}
selected_items_Kmeans = select_from_similarity_matrix(similarity_matrix, 'Clustering', **kwargs)
kwargs = {'clustering_method': 'SpectralClustering', 'nb_clusters': 10}
selected_items_Spectral = select_from_similarity_matrix(similarity_matrix, 'Clustering', **kwargs)
return selected_items_greedy_random, selected_items_greedy_min, selected_items_greedy_max, selected_items_Kmeans, selected_items_Spectral
# method_param_dict = {
# 'Greedy min': {'method': 'Greedy min', 'sim_matrix': similarity_matrix, 'args': {}},
# 'Greedy max': {'method': 'Greedy max', 'sim_matrix': similarity_matrix, 'args': {}},
# 'Clustering kmeans k10 full': {'method': 'Clustering', 'sim_matrix': similarity_matrix, 'args': {'clustering_method': 'Kmeans', 'nb_clusters': 7}},
# 'Clustering spectral k10': {'method': 'Clustering', 'sim_matrix': similarity_matrix, 'args': {'clustering_method': 'SpectralClustering', 'nb_clusters': 7}}}
#
# for m, params in method_param_dict.items():
# selected_items = select_from_similarity_matrix(params['sim_matrix'], params['method'], **params['args'])
# print('\t{} [DONE]'.format(m))
# with open('dataset_selected_monitors_ripe_ris_pathlens_100k_{}.json'.format('_'.join(m.lower().translate('()').split(' '))), 'w') as f:
# json.dump(selected_items, f)
#
# asns_per_method = dict()
# for m, params in method_param_dict.items():
# with open('dataset_selected_monitors_ripe_ris_pathlens_100k_{}.json'.format('_'.join(m.lower().split(' '))), 'r') as f:
# selected_items = json.load(f)
# asns_per_method[m] = selected_items
# print('\t{} [DONE]'.format(m))
# with open(PROXIMITY_FNAME, 'w') as f:
# json.dump(asns_per_method, f)
| 49.29703 | 165 | 0.693814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,475 | 0.525827 |
c5fda76eda5749a6451e2494fedda422ecadeadf | 2,184 | py | Python | kingbird/tests/unit/objects/test_base.py | starlingx-staging/stx-kingbird | 9869ad4640e76384fa14f031a59134cd439929a8 | [
"Apache-2.0"
] | null | null | null | kingbird/tests/unit/objects/test_base.py | starlingx-staging/stx-kingbird | 9869ad4640e76384fa14f031a59134cd439929a8 | [
"Apache-2.0"
] | null | null | null | kingbird/tests/unit/objects/test_base.py | starlingx-staging/stx-kingbird | 9869ad4640e76384fa14f031a59134cd439929a8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Ericsson AB.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from kingbird.objects import base as obj_base
from kingbird.tests import base
from oslo_versionedobjects import fields as obj_fields
class TestBaseObject(base.KingbirdTestCase):
def test_base_class(self):
obj = obj_base.KingbirdObject()
self.assertEqual(obj_base.KingbirdObject.OBJ_PROJECT_NAMESPACE,
obj.OBJ_PROJECT_NAMESPACE)
self.assertEqual(obj_base.KingbirdObject.VERSION,
obj.VERSION)
@mock.patch.object(obj_base.KingbirdObject, "obj_reset_changes")
def test_from_db_object(self, mock_obj_reset_ch):
class TestKingbirdObject(obj_base.KingbirdObject,
obj_base.VersionedObjectDictCompat):
fields = {
"key1": obj_fields.StringField(),
"key2": obj_fields.StringField(),
}
obj = TestKingbirdObject()
context = mock.Mock()
db_obj = {
"key1": "value1",
"key2": "value2",
}
res = obj_base.KingbirdObject._from_db_object(context, obj, db_obj)
self.assertIsNotNone(res)
self.assertEqual("value1", obj["key1"])
self.assertEqual("value2", obj["key2"])
self.assertEqual(obj._context, context)
mock_obj_reset_ch.assert_called_once_with()
def test_from_db_object_none(self):
obj = obj_base.KingbirdObject()
db_obj = None
context = mock.Mock()
res = obj_base.KingbirdObject._from_db_object(context, obj, db_obj)
self.assertIsNone(res)
| 35.803279 | 75 | 0.668498 | 1,426 | 0.65293 | 0 | 0 | 844 | 0.386447 | 0 | 0 | 680 | 0.311355 |
c5fe482fddeda64ee08226bdb2b25edc133236ff | 380 | py | Python | src/tests/tests.py | veleritas/mychem.info | bb22357d4cbbc3c4865da224bf998f2cbc59f8f2 | [
"Apache-2.0"
] | 1 | 2021-05-09T04:51:28.000Z | 2021-05-09T04:51:28.000Z | src/tests/tests.py | veleritas/mychem.info | bb22357d4cbbc3c4865da224bf998f2cbc59f8f2 | [
"Apache-2.0"
] | null | null | null | src/tests/tests.py | veleritas/mychem.info | bb22357d4cbbc3c4865da224bf998f2cbc59f8f2 | [
"Apache-2.0"
] | null | null | null | import sys
import os
# Add this directory to python path (contains nosetest_config)
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from biothings.tests import BiothingTest
from biothings.tests.settings import NosetestSettings
ns = NosetestSettings()
class {% nosetest_settings_class %}(BiothingTest):
__test__ = True
# Add extra nosetests here
pass
| 22.352941 | 62 | 0.776316 | 111 | 0.292105 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.231579 |
c5fe65c9539705123af8748c4e0aa1720b1d7fad | 824 | py | Python | hardware/camera/drivers.py | smartenv/smartcan | dcd214acd26fdc0af940460d42cb0f4f75a40829 | [
"Apache-2.0"
] | null | null | null | hardware/camera/drivers.py | smartenv/smartcan | dcd214acd26fdc0af940460d42cb0f4f75a40829 | [
"Apache-2.0"
] | 1 | 2019-09-02T20:21:27.000Z | 2019-09-02T22:58:38.000Z | hardware/camera/drivers.py | smartenv/smartcan | dcd214acd26fdc0af940460d42cb0f4f75a40829 | [
"Apache-2.0"
] | null | null | null | import tempfile
from abc import ABC, abstractmethod
from time import sleep, time
from hardware.camera import Photo, Resolution
class CameraDriver(ABC):
@abstractmethod
def capture(self) -> Photo:
pass
class PiCameraDriver(CameraDriver):
def __init__(self, resolution=Resolution(1024, 768), iso=300):
from picamera import PiCamera
self.resolution = resolution
self.iso = iso
self._camera = PiCamera(resolution=resolution)
self._camera.iso = iso
sleep(2)
print('Camera ready!')
def capture(self) -> Photo:
origin = tempfile.NamedTemporaryFile(mode="w+t", suffix='.jpg')
self._camera.capture(origin.name)
photo = Photo(origin.name, resolution=self.resolution, iso=self.iso)
return photo.resize(ratio=1)
| 23.542857 | 76 | 0.668689 | 690 | 0.837379 | 0 | 0 | 60 | 0.072816 | 0 | 0 | 26 | 0.031553 |
c5ffc77661b72f4eb6b28eeafca66a6e20e1570d | 6,897 | py | Python | src/sms_verifier/settings.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | null | null | null | src/sms_verifier/settings.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | 10 | 2020-02-12T02:51:31.000Z | 2022-02-10T13:33:43.000Z | src/sms_verifier/settings.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | 1 | 2022-02-22T18:56:22.000Z | 2022-02-22T18:56:22.000Z | """
Django settings for sms_verifier project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import socket
ENVIRONMENT = os.environ.get('environment', 'dev')
DOMAIN_NAME = os.environ.get('domain_name', 'http://127.0.0.1:8000')
try:
HOSTNAME = socket.gethostname()
except ImportError as e:
HOSTNAME = 'localhost'
PROJECT_NAME = 'SMS Verifier'
VERSION = os.environ.get('version', 'null')
EXTRA_ALLOWED_HOSTS = os.environ.get('allowed_hosts', '').split(',')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('django_secret_key', 'djangoSecretKey')
# SECURITY WARNING: don't run with debug turned on in production!
if ENVIRONMENT == 'dev' or ENVIRONMENT == 'ci':
DEBUG = True
EXTRA_ALLOWED_HOSTS.append('*')
else:
DEBUG = False
INTERNAL_IPS = [
'127.0.0.1',
]
ALLOWED_HOSTS = [
'alpha.sms-verifier.nalkins.cloud',
'sms-verifier.nalkins.cloud',
'127.0.0.1',
'10.0.2.2', # Android AVD IP for localhost
] + EXTRA_ALLOWED_HOSTS
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'oauth2_provider',
'django_user_email_extension',
'sms_verifier_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sms_verifier.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sms_verifier.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
######################
# DATABASE SETTINGS
######################
if ENVIRONMENT == 'dev':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('db_name', 'sms_verifier'),
'USER': os.environ.get('db_user', 'sms_verifier'),
'PASSWORD': os.environ.get('db_pass', 'django'),
'HOST': os.environ.get('db_host', 'localhost'),
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = os.environ.get('static_url', "/static/")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
######################
# Custom User Model
######################
AUTH_USER_MODEL = 'django_user_email_extension.User'
##################
# REST Framework
##################
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
}
######################
# Social Auth
######################
SOCIAL_AUTH_USER_MODEL = 'django_user_email_extension.User'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'index'
SOCIAL_AUTH_LOGOUT_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/'
SOCIAL_AUTH_LOGIN_URL = 'index'
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('google_oauth_client_id', 'None')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('google_oauth_client_secret', 'None')
SOCIAL_AUTH_GITHUB_KEY = os.environ.get('github_oauth_client_id', 'None')
SOCIAL_AUTH_GITHUB_SECRET = os.environ.get('github_oauth_client_secret', 'None')
SOCIAL_AUTH_GITHUB_SCOPE = [
'read:user',
'user:email',
'read:org',
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.github.GithubOAuth2',
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
######################
# LOGGING SETTINGS
######################
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO')
HANDLERS = ['console']
LOGGING = {
'version': 1,
'handlers': {
'console': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
},
},
'loggers': {
PROJECT_NAME: {
'handlers': HANDLERS,
'level': LOG_LEVEL,
},
'django.request': {
'handlers': HANDLERS,
'level': 'WARNING',
'propagate': False,
},
},
}
| 27.810484 | 91 | 0.660577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,239 | 0.614615 |
6801dc75931ad303482faa362dd4a4100e4ec4dd | 5,149 | py | Python | test/test_termCombinationLib.py | ozanozisik/orsum | 9d6b36c83d6cd7297fe68214fe89faa9281a7792 | [
"MIT"
] | 1 | 2021-04-02T15:07:35.000Z | 2021-04-02T15:07:35.000Z | test/test_termCombinationLib.py | ozanozisik/orsum | 9d6b36c83d6cd7297fe68214fe89faa9281a7792 | [
"MIT"
] | null | null | null | test/test_termCombinationLib.py | ozanozisik/orsum | 9d6b36c83d6cd7297fe68214fe89faa9281a7792 | [
"MIT"
] | null | null | null | from termCombinationLib import initializeTermSummary, applyRule, recurringTermsUnified, supertermRepresentsLessSignificantSubterm, subtermRepresentsLessSignificantSimilarSuperterm, subtermRepresentsSupertermWithLessSignificanceAndLessRepresentativePower, commonSupertermInListRepresentsSubtermsWithLessRepresentativePower, supertermRepresentsSubtermLargerThanMaxRep
def test_initializeTermSummary_singleInput():
tbsGsIDsList=[['term1', 'term2', 'term3']]
termSummary=initializeTermSummary(tbsGsIDsList)
assert termSummary==[
['term1', ['term1'], 1],
['term2', ['term2'], 2],
['term3', ['term3'], 3]
]
def test_initializeTermSummary_multiInput():
tbsGsIDsList=[['term11', 'termCommon', 'term13', 'term14'], ['term21', 'term22', 'term23', 'termCommon']]
termSummary=initializeTermSummary(tbsGsIDsList)
assert termSummary==[
['term11', ['term11'], 1],
['term21', ['term21'], 1],
['termCommon', ['termCommon'], 2],
['term22', ['term22'], 2],
['term13', ['term13'], 3],
['term23', ['term23'], 3],
['term14', ['term14'], 4],
['termCommon', ['termCommon'], 4]
]
def test_rule_recurringTermsUnified():
tbsGsIDsList=[['term11', 'termCommon', 'term13', 'term14'], ['term21', 'term22', 'term23', 'termCommon']]
termSummary=initializeTermSummary(tbsGsIDsList)
geneSetsDict=dict()#Not important, not used in this rule
termSummary=applyRule(termSummary, geneSetsDict, 2000, recurringTermsUnified)
assert termSummary==[
['term11', ['term11'], 1],
['term21', ['term21'], 1],
['termCommon', ['termCommon'], 2],
['term22', ['term22'], 2],
['term13', ['term13'], 3],
['term23', ['term23'], 3],
['term14', ['term14'], 4],
]
def test_rule_supertermRepresentsLessSignificantSubterm():
tbsGsIDsList=[['term1', 'term2', 'term3', 'term4', 'term5', 'term6']]
termSummary=initializeTermSummary(tbsGsIDsList)
geneSetsDict={
'term1':{'A','B','C'},
'term2':{'A','B','C','D','E','F'},
'term3':{'A','B','C','D'},
'term4':{'A','B','G','H'},
'term5':{'A','B'},
'term6':{'G','H'}
}
termSummary=applyRule(termSummary, geneSetsDict, 2000, supertermRepresentsLessSignificantSubterm)
assert termSummary==[
['term1', ['term1', 'term5'], 1],
['term2', ['term2', 'term3'], 2],
['term4', ['term4', 'term6'], 4],
]
def test_rule_subtermRepresentsLessSignificantSimilarSuperterm():
tbsGsIDsList=[['term1', 'term2', 'term3', 'term4', 'term5', 'term6']]
termSummary=initializeTermSummary(tbsGsIDsList)
geneSetsDict={
'term1':{'A','B','C'},
'term2':{'A','B','C','D','E','F'},
'term3':{'A','B','C','D'},
'term4':{'A','B','C','D','E','F','G'},
'term5':{'A','B'},
'term6':{'G','H'}
}
termSummary=applyRule(termSummary, geneSetsDict, 2000, subtermRepresentsLessSignificantSimilarSuperterm)
assert termSummary==[
['term1', ['term1', 'term3'], 1],
['term2', ['term2', 'term4'], 2],
['term5', ['term5'], 5],
['term6', ['term6'], 6]
]
def test_rule_subtermRepresentsSupertermWithLessSignificanceAndLessRepresentativePower():
tbsGsIDsList=[['term1', 'term2', 'term3', 'term4', 'term5']]
termSummary=initializeTermSummary(tbsGsIDsList)
geneSetsDict={
'term1':{'A','B','C'},
'term2':{'G','H'},
'term3':{'A','B','C','D'},
'term4':{'A','B'},
'term5':{'A','B','C','D','E','F'}
}
termSummary=applyRule(termSummary, geneSetsDict, 2000, subtermRepresentsLessSignificantSimilarSuperterm)
assert termSummary==[
['term1', ['term1', 'term3'], 1],
['term2', ['term2'], 2],
['term4', ['term4'], 4],
['term5', ['term5'], 5]
]
termSummary=applyRule(termSummary, geneSetsDict, 2000, subtermRepresentsSupertermWithLessSignificanceAndLessRepresentativePower)
assert termSummary==[
['term1', ['term1', 'term3', 'term5'], 1],
['term2', ['term2'], 2],
['term4', ['term4'], 4]
]
def test_rule_commonSupertermInListRepresentsSubtermsWithLessRepresentativePower():
tbsGsIDsList=[['term1', 'term2', 'term3', 'term4']]
termSummary=initializeTermSummary(tbsGsIDsList)
geneSetsDict={
'term1':{'A','B','C'},
'term2':{'G','H'},
'term3':{'A','B','C','D','G','H'},
'term4':{'A','B','D'}
}
termSummary=applyRule(termSummary, geneSetsDict, 2000, supertermRepresentsLessSignificantSubterm)
assert termSummary==[
['term1', ['term1'], 1],
['term2', ['term2'], 2],
['term3', ['term3','term4'], 3],
]
termSummary=applyRule(termSummary, geneSetsDict, 2000, commonSupertermInListRepresentsSubtermsWithLessRepresentativePower)
assert termSummary==[
['term3', ['term3','term4', 'term1', 'term2'], 1]
]
def test_rule_supertermRepresentsSubtermLargerThanMaxRep():
tbsGsIDsList=[['term1', 'term2', 'term3', 'term4', 'term5', 'term6']]
termSummary=initializeTermSummary(tbsGsIDsList)
geneSetsDict={
'term1':{'A','B','C'},
'term2':{'A','B','C','D','E','F','G','H','I','J'},
'term3':{'A','B','C','D'},
'term4':{'A','B','C','D','G','H','I','J'},
'term5':{'A','B'},
'term6':{'G','H'}
}
termSummary=applyRule(termSummary, geneSetsDict, 6, supertermRepresentsSubtermLargerThanMaxRep)
assert termSummary==[
['term1', ['term1'], 1],
['term2', ['term2', 'term4'], 2],
['term3', ['term3'], 3],
['term5', ['term5'], 5],
['term6', ['term6'], 6],
]
| 35.510345 | 365 | 0.649446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,614 | 0.313459 |
6803550947b81cdf9de81ccf68c02ddd50ac556a | 48 | py | Python | bindings/pydeck/pydeck/exceptions/__init__.py | marsupialmarcos/deck.gl | c9867c1db87e492253865353f68c985019c7c613 | [
"MIT"
] | null | null | null | bindings/pydeck/pydeck/exceptions/__init__.py | marsupialmarcos/deck.gl | c9867c1db87e492253865353f68c985019c7c613 | [
"MIT"
] | null | null | null | bindings/pydeck/pydeck/exceptions/__init__.py | marsupialmarcos/deck.gl | c9867c1db87e492253865353f68c985019c7c613 | [
"MIT"
] | null | null | null | from .exceptions import PydeckException # noqa
| 24 | 47 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.125 |
680818c94a5d8918261d5f2d00ab826a279d384e | 1,096 | py | Python | notebooks/tpot_exported_pipeline.py | rsouza/FGV_Intro_DS | 5c4da0a9476654516199708cb729231a21d99686 | [
"MIT"
] | 38 | 2018-02-28T17:33:00.000Z | 2022-01-21T22:30:11.000Z | Notebooks/tpot_exported_pipeline.py | rsouza/DataScience_Course | dd8018d616cf24a7b23c7b04ea1e2120261da3c0 | [
"MIT"
] | 17 | 2018-08-16T12:11:57.000Z | 2019-08-04T17:55:27.000Z | Notebooks/tpot_exported_pipeline.py | rsouza/DataScience_Course | dd8018d616cf24a7b23c7b04ea1e2120261da3c0 | [
"MIT"
] | 62 | 2018-02-20T12:58:13.000Z | 2022-03-03T11:10:16.000Z | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from xgboost import XGBClassifier
from sklearn.preprocessing import FunctionTransformer
from copy import copy
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'], random_state=None)
# Average CV score on the training set was: 0.8477898176814586
exported_pipeline = make_pipeline(
make_union(
FunctionTransformer(copy),
FunctionTransformer(copy)
),
XGBClassifier(learning_rate=0.5, max_depth=9, min_child_weight=9, n_estimators=100, n_jobs=1, subsample=0.8, verbosity=0)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| 40.592593 | 125 | 0.794708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.176095 |
680939a134bf2b1210290121f792004a5567a200 | 7,296 | py | Python | own/NER_torch/ner_dataset.py | felixdittrich92/DeepLearning-pyTorch | 5939012e960b79ddc183921dfeb4503126878068 | [
"MIT"
] | 1 | 2022-03-02T07:16:59.000Z | 2022-03-02T07:16:59.000Z | own/NER_torch/ner_dataset.py | felixdittrich92/DeepLearning-pyTorch | 5939012e960b79ddc183921dfeb4503126878068 | [
"MIT"
] | null | null | null | own/NER_torch/ner_dataset.py | felixdittrich92/DeepLearning-pyTorch | 5939012e960b79ddc183921dfeb4503126878068 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, Dataset
from transformers import BertTokenizerFast
os.environ["TOKENIZERS_PARALLELISM"] = "true"
FILE_PATH = 'data/own.txt'
def read_conll_from_txt_to_df(file_path):
df = pd.DataFrame(columns=['SENTENCE', 'TOKEN', 'LABEL'])
sent = 1
with open(file_path, 'r') as f:
for i, line in enumerate(f):
line = line.replace('\n', '')
splitted = line.split()
if not splitted:
sent += 1
else:
df.loc[i] = [sent, splitted[0], splitted[1]]
return df
data = read_conll_from_txt_to_df(FILE_PATH)
class SentenceGetter(object):
def __init__(self, data):
self.n_sent = 1
self.data = data
self.empty = False
agg_func = lambda s: [(token, tag) for token, tag in zip(s["TOKEN"].values.tolist(),
s["LABEL"].values.tolist())]
self.grouped = self.data.groupby("SENTENCE").apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["SENTENCE: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
getter = SentenceGetter(data)
tags_vals = list(set(data["LABEL"].values))
tag2index = {t: i for i, t in enumerate(tags_vals)}
index2tag = {i: t for i, t in enumerate(tags_vals)}
sentences = [' '.join([s[0] for s in sent]) for sent in getter.sentences]
labels = [[s[1] for s in sent] for sent in getter.sentences]
#labels = [[tag2idx.get(l) for l in lab] for lab in labels]
##### only overview #####
tags = ["[PAD]"]
tags.extend(list(set(data["LABEL"].values)))
tag2idx = {t: i for i, t in enumerate(tags)}
print('Length of Labels : ' + str(len(tags)))
words = ["[PAD]", "[UNK]"]
words.extend(list(set(data["TOKEN"].values)))
word2idx = {t: i for i, t in enumerate(words)}
print('Length of unique words : ' + str(len(words)))
# check dataset
def dataset_checker(sent_list, label_list):
sent_check = list()
for el in sent_list:
sent_check.append(len(el.split()))
label_check = list()
for el in label_list:
label_check.append(len(el))
for index, (first, second) in enumerate(zip(sent_check, label_check)):
if first != second:
print(index, second)
dataset_checker(sentences, labels)
##### only overview #####
unique_tags = list(set(tag for text in labels for tag in text))
train_sent, test_sent, train_label, test_label = train_test_split(sentences, labels, test_size=0.05)
train_sent, val_sent, train_label, val_label = train_test_split(train_sent, train_label, test_size=0.15)
print('FULL DATASET SENT: ' + str(len(sentences)))
print('FULL DATASET LABELS: ' + str(len(labels)))
print('Train sent size : ' + str(len(train_sent)))
print('Train label size : ' + str(len(train_label)))
print('Test sent size : ' + str(len(test_sent)))
print('Test label size : ' + str(len(test_label)))
print('Val sent size : ' + str(len(val_sent)))
print('Val label size : ' + str(len(val_label)))
print('Check Training dataset')
dataset_checker(train_sent, train_label)
print('Check Test dataset')
dataset_checker(test_sent, test_label)
print('Check Validation dataset')
dataset_checker(val_sent, val_label)
class ConllDataset(Dataset):
def __init__(self, tokenizer, sentences, labels, max_len):
self.len = len(sentences)
self.sentences = sentences
self.labels = labels
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return self.len
def __getitem__(self, index):
sentence = self.sentences[index].strip().split()
labels = self.labels[index]
inputs = self.tokenizer.encode_plus(
sentence,
None,
is_split_into_words=True,
add_special_tokens=True,
max_length=self.max_len,
truncation=True,
padding='max_length',
return_offsets_mapping=True,
return_token_type_ids=False,
return_attention_mask=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
labels = [tag2index[label] for label in labels]
# code based on https://huggingface.co/transformers/custom_datasets.html#tok-ner
# create an empty array of -100 of length max_length
encoded_labels = np.ones(len(inputs["offset_mapping"]), dtype=int) * -100
# set only labels whose first offset position is 0 and the second is not 0
i = 0
for idx, mapping in enumerate(inputs["offset_mapping"]):
if mapping[0] == 0 and mapping[1] != 0:
# overwrite label
encoded_labels[idx] = labels[i]
i += 1
inputs.pop('offset_mapping')
return {
'ids': torch.tensor(ids).flatten(),
'mask': torch.tensor(mask).flatten(),
'tags': torch.tensor(encoded_labels)
}
class NERConllDataset(pl.LightningDataModule):
def __init__(self, tokenizer, train_sent, train_label, val_sent, val_label, test_sent, test_label, max_len, batch_size):
super().__init__()
self.tokenizer = tokenizer
self.train_sent = train_sent
self.train_label = train_label
self.val_sent = val_sent
self.val_label = val_label
self.test_sent = test_sent
self.test_label = test_label
self.max_len = max_len
self.batch_size = batch_size
def setup(self, stage=None):
self.train_dataset = ConllDataset(self.tokenizer, self.train_sent, self.train_label, self.max_len)
self.val_dataset = ConllDataset(self.tokenizer, self.val_sent, self.val_label, self.max_len)
self.test_dataset = ConllDataset(self.tokenizer, self.test_sent, self.test_label, self.max_len)
def eval_print(self, pos: int):
for token, label in zip(self.tokenizer.convert_ids_to_tokens(self.train_dataset[pos]["ids"].numpy()), self.train_dataset[pos]["tags"].numpy()):
print('{0:10} {1}'.format(token, label))
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=False, num_workers=8)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=8)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=8)
MAX_LEN = 256
BATCH_SIZE = 8
tokenizer = BertTokenizerFast.from_pretrained('bert-base-german-cased', do_lower_case=False)
# save tokenizer
tokenizer.save_pretrained("model/tokenizer/")
data_module = NERConllDataset(tokenizer, train_sent, train_label, val_sent, val_label, test_sent, test_label, max_len=MAX_LEN, batch_size=BATCH_SIZE)
data_module.setup()
data_module.eval_print(pos=5)
train = data_module.train_dataloader()
val = data_module.val_dataloader()
test = data_module.test_dataloader()
| 34.742857 | 151 | 0.649397 | 3,934 | 0.5392 | 0 | 0 | 0 | 0 | 0 | 0 | 972 | 0.133224 |
6809a2acfca0df46c95da57d06eac41e2dd27631 | 124 | py | Python | wikilabels/tests/test_stats_routes.py | notconfusing/wikilabels | 5956e20a5d46286bfb0b6327a2b5a12fa406118b | [
"MIT"
] | null | null | null | wikilabels/tests/test_stats_routes.py | notconfusing/wikilabels | 5956e20a5d46286bfb0b6327a2b5a12fa406118b | [
"MIT"
] | null | null | null | wikilabels/tests/test_stats_routes.py | notconfusing/wikilabels | 5956e20a5d46286bfb0b6327a2b5a12fa406118b | [
"MIT"
] | null | null | null | from .routes_test_fixture import app # noqa
def test_stats(client):
assert client.get("/stats/")._status_code == 200
| 20.666667 | 52 | 0.725806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.120968 |
680b099433a019c0405e29fa05a069e43cce35d3 | 2,017 | py | Python | show_history.py | FairyDevicesRD/statistical-quality-estimation | 1ce238b118be8ffb4abb902fa5ad1a7525a43b3a | [
"MIT"
] | null | null | null | show_history.py | FairyDevicesRD/statistical-quality-estimation | 1ce238b118be8ffb4abb902fa5ad1a7525a43b3a | [
"MIT"
] | null | null | null | show_history.py | FairyDevicesRD/statistical-quality-estimation | 1ce238b118be8ffb4abb902fa5ad1a7525a43b3a | [
"MIT"
] | null | null | null | import argparse
import logging
import pathlib
import re
import warnings
import numpy as np
import dirichlet
from sklearn.linear_model import LogisticRegression
from optimize import load_config, load_data, get_loglikelihood, get_mse
logger = logging.getLogger()
def main(data, src_dir):
files = []
for src_path in src_dir.iterdir():
m = re.search(r"^(\d+?)\.npz$", str(src_path.name))
if not m:
continue
idx = int(m.group(1))
files.append((idx, src_path))
files.sort()
q_prev = None
for idx, src_path in files:
logger.info("load {}".format(src_path))
d = np.load(str(src_path), allow_pickle=True)
q = d["q"].flat[0]
alpha = d["alpha"].flat[0]
beta = d["beta"].flat[0]
if q_prev:
mse = get_mse(q_prev, q)
else:
mse = float("nan")
q_prev = q
ll = get_loglikelihood(q, alpha, beta, data, config)
logger.info("{}-th iteration, mse={}, loglikelihood={}"
.format(idx + 1, mse, ll))
def parse_args(argv=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config-path", dest="config_path",
default="config.toml",
help="configuration file in the TOML format")
parser.add_argument(
"-v", "--verbose", help="verbose mode",
action="store_true", default=False)
args = parser.parse_args(argv)
logger.info("argments: {}".format(args))
return args
if __name__ == "__main__":
warnings.filterwarnings(
"ignore", category=RuntimeWarning,
message="divide by zero encountered in log",
module="sklearn.linear_model")
logger.info("start")
args = parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
config_path = pathlib.Path(args.config_path)
config = load_config(config_path)
data = load_data(config)
main(data, config["data"]["exp_dir"])
logger.info("end")
| 24.901235 | 71 | 0.61527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 326 | 0.161626 |
680c5f63df1dfe08789bfa98126f9d196d2c9212 | 620 | py | Python | src/backend/web/handlers/apidocs.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | null | null | null | src/backend/web/handlers/apidocs.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | null | null | null | src/backend/web/handlers/apidocs.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | null | null | null | from backend.common.decorators import cached_public
from backend.web.profiled_render import render_template
@cached_public(timeout=int(60 * 60 * 24 * 7))
def apidocs_trusted_v1() -> str:
template_values = {
"title": "Trusted APIv1",
"swagger_url": "/swagger/api_trusted_v1.json",
}
return render_template("apidocs_swagger.html", template_values)
@cached_public(timeout=int(60 * 60 * 24 * 7))
def apidocs_v3() -> str:
template_values = {
"title": "APIv3",
"swagger_url": "/swagger/api_v3.json",
}
return render_template("apidocs_swagger.html", template_values)
| 29.52381 | 67 | 0.68871 | 0 | 0 | 0 | 0 | 506 | 0.816129 | 0 | 0 | 158 | 0.254839 |
680d5f5213572fa53827c06c480230ee309727aa | 712 | py | Python | tests/common/devices/vmhost.py | emilmih/sonic-mgmt | e4e42ec8028bf51b39587e2b53e526d505fe7938 | [
"Apache-2.0"
] | 132 | 2016-10-19T12:34:44.000Z | 2022-03-16T09:00:39.000Z | tests/common/devices/vmhost.py | emilmih/sonic-mgmt | e4e42ec8028bf51b39587e2b53e526d505fe7938 | [
"Apache-2.0"
] | 3,152 | 2016-09-21T23:05:58.000Z | 2022-03-31T23:29:08.000Z | tests/common/devices/vmhost.py | emilmih/sonic-mgmt | e4e42ec8028bf51b39587e2b53e526d505fe7938 | [
"Apache-2.0"
] | 563 | 2016-09-20T01:00:15.000Z | 2022-03-31T22:43:54.000Z | from tests.common.devices.base import AnsibleHostBase
class VMHost(AnsibleHostBase):
"""
@summary: Class for VM server
For running ansible module on VM server
"""
def __init__(self, ansible_adhoc, hostname):
AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
@property
def external_port(self):
if not hasattr(self, "_external_port"):
vm = self.host.options["variable_manager"]
im = self.host.options["inventory_manager"]
hostvars = vm.get_vars(host=im.get_host(self.hostname), include_delegate_to=False)
setattr(self, "_external_port", hostvars["external_port"])
return getattr(self, "_external_port")
| 32.363636 | 94 | 0.678371 | 655 | 0.919944 | 0 | 0 | 410 | 0.575843 | 0 | 0 | 190 | 0.266854 |
680e3a6742a96523df350e83def38272170019eb | 438 | py | Python | normatrix/normatrix/plugged/__init__.py | romainpanno/NorMatrix | 33bbf02b1881853088c8350f9a3c9d22ba3aa704 | [
"MIT"
] | null | null | null | normatrix/normatrix/plugged/__init__.py | romainpanno/NorMatrix | 33bbf02b1881853088c8350f9a3c9d22ba3aa704 | [
"MIT"
] | null | null | null | normatrix/normatrix/plugged/__init__.py | romainpanno/NorMatrix | 33bbf02b1881853088c8350f9a3c9d22ba3aa704 | [
"MIT"
] | null | null | null | """All plugging called to check norm for a C file."""
__all__ = [
"columns",
"comma",
"function_line",
"indent",
"libc_func",
"nested_branches",
"number_function",
"parenthesis",
"preprocessor",
"snake_case",
"solo_space",
"statements",
"trailing_newline",
"two_space",
"operators",
"newline_at_end_of_file",
"subscriptor",
"header",
]
PREVIEW = [
"nb_params"
]
| 16.846154 | 53 | 0.584475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.675799 |
680ed6affdcd3fb061dee185fd006fb9c5077b21 | 30,862 | py | Python | sc2monitor/controller.py | 2press/sc2monitor | a4c193fd946e54a03be2181d839875ddb956e621 | [
"MIT"
] | 1 | 2018-07-30T11:39:32.000Z | 2018-07-30T11:39:32.000Z | sc2monitor/controller.py | 2press/sc2-monitor | a4c193fd946e54a03be2181d839875ddb956e621 | [
"MIT"
] | 22 | 2019-01-03T11:34:30.000Z | 2021-05-03T19:50:10.000Z | sc2monitor/controller.py | 2press/sc2-monitor | a4c193fd946e54a03be2181d839875ddb956e621 | [
"MIT"
] | 1 | 2019-01-14T21:35:05.000Z | 2019-01-14T21:35:05.000Z | """Control the sc2monitor."""
import asyncio
import logging
import math
import time
from datetime import datetime, timedelta
from operator import itemgetter
import aiohttp
import sc2monitor.model as model
from sc2monitor.handlers import SQLAlchemyHandler
from sc2monitor.sc2api import SC2API
logger = logging.getLogger(__name__)
sql_logger = logging.getLogger()
class Controller:
"""Control the sc2monitor."""
def __init__(self, **kwargs):
"""Init the sc2monitor."""
self.kwargs = kwargs
self.sc2api = None
self.db_session = None
self.current_season = {}
async def __aenter__(self):
"""Create a aiohttp and db session that will later be closed."""
headers = {'Accept-Encoding': 'gzip, deflate'}
self.http_session = aiohttp.ClientSession(headers=headers)
self.create_db_session()
return self
def create_db_session(self):
"""Create sqlalchemy database session."""
self.db_session = model.create_db_session(
db=self.kwargs.pop('db', ''),
encoding=self.kwargs.pop('encoding', ''))
self.handler = SQLAlchemyHandler(self.db_session)
self.handler.setLevel(logging.INFO)
sql_logger.setLevel(logging.INFO)
sql_logger.addHandler(self.handler)
if len(self.kwargs) > 0:
self.setup(**self.kwargs)
self.sc2api = SC2API(self)
self.cache_matches = self.get_config(
'cache_matches',
default_value=1000)
self.cache_logs = self.get_config(
'cache_logs',
default_value=500)
self.cache_runs = self.get_config(
'cache_runs',
default_value=500)
self.analyze_matches = self.get_config(
'analyze_matches',
default_value=100)
async def __aexit__(self, exc_type, exc, tb):
"""Close all aiohtto and database session."""
await self.http_session.close()
self.db_session.commit()
self.db_session.close()
self.db_session = None
def get_config(self, key, default_value=None,
raise_key_error=True,
return_object=False):
"""Read a config value from database."""
if default_value is not None:
raise_key_error = False
entry = self.db_session.query(
model.Config).filter(model.Config.key == key).scalar()
if not entry:
if raise_key_error:
raise ValueError(f'Unknown config key "{key}"')
else:
if return_object:
return None
else:
return '' if default_value is None else default_value
else:
if return_object:
return entry
else:
return entry.value
def set_config(self, key, value, commit=True):
"""Save a config value to the database."""
entry = self.db_session.query(
model.Config).filter(model.Config.key == key).scalar()
if not entry:
self.db_session.add(model.Config(key=key, value=value))
else:
entry.value = value
if commit:
self.db_session.commit()
def setup(self, **kwargs):
"""Set up the sc2monitor with api-key and api-secret."""
valid_keys = ['api_key', 'api_secret',
'cache_matches', 'analyze_matches']
for key, value in kwargs.items():
if key not in valid_keys:
raise ValueError(
f"Invalid configuration key '{key}'"
f" (valid keys: {', '.join(valid_keys)})")
self.set_config(key, value, commit=False)
self.db_session.commit()
if self.sc2api:
self.sc2api.read_config()
def add_player(self, url, race=model.Race['Random']):
"""Add a player by url to the sc2monitor."""
close_db = False
if self.db_session is None:
self.create_db_session()
close_db = True
server, realm, player_id = self.sc2api.parse_profile_url(url)
count = self.db_session.query(model.Player).filter(
model.Player.realm == realm,
model.Player.player_id == player_id,
model.Player.server == server).count()
if count == 0:
new_player = model.Player(
realm=realm,
player_id=player_id,
server=server,
race=race)
self.db_session.add(new_player)
self.db_session.commit()
if close_db:
self.db_session.close()
self.db_session = None
def remove_player(self, url):
"""Remove a player by url to the sc2monitor."""
close_db = False
if self.db_session is None:
self.create_db_session()
close_db = True
server, realm, player_id = self.sc2api.parse_profile_url(url)
for player in self.db_session.query(model.Player).filter(
model.Player.realm == realm,
model.Player.player_id == player_id,
model.Player.server == server).all():
self.db_session.delete(player)
self.db_session.commit()
if close_db:
self.db_session.close()
self.db_session = None
async def update_season(self, server: model.Server):
"""Update info about the current season in the database."""
current_season = await self.sc2api.get_season(server)
season = self.db_session.query(model.Season).\
filter(model.Season.server == server).\
order_by(model.Season.season_id.desc()).\
limit(1).scalar()
if not season or current_season.season_id != season.season_id:
self.db_session.add(current_season)
self.db_session.commit()
self.db_session.refresh(current_season)
logger.info(f'Found a new ladder season: {current_season}')
return current_season
else:
season.start = current_season.start
season.end = current_season.end
season.year = current_season.year
season.number = current_season.number
self.db_session.commit()
return season
async def update_seasons(self):
"""Update seasons info for all servers."""
servers = [server[0] for server in self.db_session.query(
model.Player.server).distinct()]
tasks = []
for server in servers:
tasks.append(asyncio.create_task(self.update_season(server)))
for season in await asyncio.gather(*tasks, return_exceptions=True):
try:
if isinstance(season, model.Season):
self.current_season[season.server.id()] = season
else:
raise season
except Exception:
logger.exception(
('The following exception was'
' raised while updating seasons:'))
async def query_player(self, player: model.Player):
"""Collect api data of a player."""
complete_data = []
for ladder in await self.sc2api.get_ladders(player):
async for data in self.sc2api.get_ladder_data(player, ladder):
current_player = await self.get_player_with_race(player, data)
missing_games, new = self.count_missing_games(
current_player, data)
if missing_games['Total'] > 0:
complete_data.append({'player': current_player,
'new_data': data,
'missing': missing_games,
'Win': 0,
'Loss': 0})
if len(complete_data) > 0:
await self.process_player(complete_data, new)
elif (not player.name
or not isinstance(player.refreshed, datetime)
or player.refreshed <= datetime.now() - timedelta(days=1)):
await self.update_player_name(player)
async def update_player_name(self, player: model.Player, name=''):
"""Update the name of a player from api data."""
if not name:
metadata = await self.sc2api.get_metadata(player)
name = metadata['name']
for tmp_player in self.db_session.query(model.Player).filter(
model.Player.player_id == player.player_id,
model.Player.realm == player.realm,
model.Player.server == player.server,
model.Player.name != name).all():
logger.info(f"{tmp_player.id}: Updating name to '{name}'")
tmp_player.name = name
self.db_session.commit()
async def check_match_history(self, complete_data):
"""Check matches in match history and assign them to races."""
match_history = await self.sc2api.get_match_history(
complete_data[0]['player'])
for match in match_history:
positive = []
for data_key, data in enumerate(complete_data):
needed = data['missing'].get(match['result'].describe(), 0) > 0
try:
datetime_check = (match['datetime']
- data['player'].last_played
> timedelta(seconds=0))
except TypeError:
datetime_check = True
if (needed and datetime_check):
positive.append(data_key)
if len(positive) == 0:
continue
elif len(positive) >= 1:
# Choose the race with most missing results.
max_missing = 0
for key in positive:
tmp_missing = complete_data[key][
'missing'][match['result'].describe()]
if tmp_missing > max_missing:
data_key = key
max_missing = tmp_missing
complete_data[data_key][
'missing'][match['result'].describe()] -= 1
complete_data[data_key][match['result'].describe()] += 1
try:
complete_data[data_key]['games'].insert(0, match)
except KeyError:
complete_data[data_key]['games'] = [match]
try:
last_played = match['datetime']
except Exception:
last_played = datetime.now()
return last_played, len(match_history)
async def process_player(self, complete_data, new=False):
"""Process the api data of a player."""
last_played, len_history \
= await self.check_match_history(complete_data)
for race_player in complete_data:
race_player['missing']['Total'] = race_player['missing']['Win'] + \
race_player['missing']['Loss']
if race_player['missing']['Total'] > 0:
if new:
logger.info(
f"{race_player['player'].id}: Ignoring "
f"{race_player['missing']['Total']} games missing in"
f" match history ({len_history}) "
"of new player.")
else:
self.guess_games(race_player, last_played)
self.guess_mmr_changes(race_player)
await self.update_player(race_player)
self.calc_statistics(race_player['player'])
async def update_player(self, complete_data):
"""Update database with new data of a player."""
player = complete_data['player']
new_data = complete_data['new_data']
player.mmr = new_data['mmr']
player.ladder_id = new_data['ladder_id']
player.league = new_data['league']
player.ladder_joined = new_data['joined']
player.wins = new_data['wins']
player.losses = new_data['losses']
player.last_active_season = self.get_season_id(player.server)
if player.name != new_data['name']:
await self.update_player_name(
player,
new_data['name'])
if (not player.last_played
or player.ladder_joined
> player.last_played):
player.last_played = player.ladder_joined
self.db_session.commit()
def calc_statistics(self, player: model.Player):
"""Recalculate player statistics."""
self.db_session.refresh(player)
if not player.statistics:
stats = model.Statistics(player=player)
self.db_session.add(stats)
self.db_session.commit()
self.db_session.refresh(stats)
else:
stats = player.statistics
matches = self.db_session.query(model.Match).filter(
model.Match.player_id == player.id).order_by(
model.Match.datetime.desc()).limit(self.analyze_matches).all()
stats.games_available = len(matches)
wma_mmr_denominator = stats.games_available * \
(stats.games_available + 1.0) / 2.0
stats.max_mmr = player.mmr
stats.min_mmr = player.mmr
stats.current_mmr = player.mmr
wma_mmr = 0.0
expected_mmr_value = 0.0
expected_mmr_value2 = 0.0
current_wining_streak = 0
current_losing_streak = 0
for idx, match in enumerate(matches):
if match.result == model.Result.Win:
stats.wins += 1
current_wining_streak += 1
current_losing_streak = 0
if current_wining_streak > stats.longest_wining_streak:
stats.longest_wining_streak = current_wining_streak
elif match.result == model.Result.Loss:
stats.losses += 1
current_losing_streak += 1
current_wining_streak = 0
if current_losing_streak > stats.longest_losing_streak:
stats.longest_losing_streak = current_losing_streak
if match.max_length <= 120:
stats.instant_left_games += 1
if match.guess:
stats.guessed_games += 1
mmr = match.mmr
wma_mmr += mmr * \
(stats.games_available - idx) / wma_mmr_denominator
if stats.max_mmr < mmr:
stats.max_mmr = mmr
if stats.min_mmr > mmr:
stats.min_mmr = mmr
expected_mmr_value += mmr / stats.games_available
expected_mmr_value2 += mmr * (mmr / stats.games_available)
if stats.games_available <= 1:
stats.lr_mmr_slope = 0.0
stats.lr_mmr_intercept = expected_mmr_value
else:
ybar = expected_mmr_value
xbar = -0.5 * (stats.games_available - 1)
numerator = 0
denominator = 0
for x, match in enumerate(matches):
x = -x
y = match.mmr
numerator += (x - xbar) * (y - ybar)
denominator += (x - xbar) * (x - xbar)
stats.lr_mmr_slope = numerator / denominator
stats.lr_mmr_intercept = ybar - stats.lr_mmr_slope * xbar
stats.sd_mmr = round(
math.sqrt(expected_mmr_value2
- expected_mmr_value
* expected_mmr_value))
# critical_idx = min(self.controller.config['no_critical_games'],
# stats.games_available) - 1
# stats.critical_game_played = matches[critical_idx]["played"]
stats.avg_mmr = expected_mmr_value
stats.wma_mmr = wma_mmr
self.db_session.commit()
@classmethod
def guess_games(cls, complete_data, last_played):
"""Guess games of a player if missing in match history."""
# If a player isn't new in the database and has played more
# than 25 games since the last refresh or the match
# history is not available for this player, there are
# missing games in the match history. These are guessed to be very
# close to the last game of the match history and in alternating
# order.
player = complete_data['player']
if 'games' not in complete_data:
complete_data['games'] = []
logger.info((
"{}: {} missing games in match "
+ "history - more guessing!").format(
player.id, complete_data['missing']['Total']))
try:
delta = (last_played - player.last_played) / \
complete_data['missing']['Total']
except Exception:
delta = timedelta(minutes=3)
if delta > timedelta(minutes=3):
delta = timedelta(minutes=3)
if delta.total_seconds() <= 0:
last_played = datetime.now()
delta = timedelta(minutes=3)
while (complete_data['missing']['Win'] > 0
or complete_data['missing']['Loss'] > 0):
if complete_data['missing']['Win'] > 0:
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Win})
complete_data['missing']['Win'] -= 1
complete_data['Win'] += 1
if (complete_data['missing']['Win'] > 0
and complete_data['missing']['Win']
> complete_data['missing']['Loss']):
# If there are more wins than losses add
# a second win before the next loss.
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Win})
complete_data['missing']['Win'] -= 1
complete_data['Win'] += 1
if complete_data['missing']['Loss'] > 0:
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Loss})
complete_data['missing']['Loss'] -= 1
complete_data['Loss'] += 1
if (complete_data['missing']['Loss'] > 0
and complete_data['missing']['Win']
< complete_data['missing']['Loss']):
# If there are more losses than wins add second loss before
# the next win.
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Loss})
complete_data['missing']['Loss'] -= 1
complete_data['Loss'] += 1
def guess_mmr_changes(self, complete_data):
"""Guess MMR change of matches."""
MMR = complete_data['player'].mmr
if MMR is None:
MMR = 0
totalMMRchange = complete_data['new_data']['mmr'] - MMR
wins = complete_data['Win']
losses = complete_data['Loss']
complete_data['games'] = sorted(
complete_data.get('games', []), key=itemgetter('datetime'))
logger.info('{}: Adding {} wins and {} losses!'.format(
complete_data['player'].id, wins, losses))
if wins + losses <= 0:
# No games to guess
return
# Estimate MMR change to be +/-21 for a win and losse, each adjusted
# by the average deviation to achive the most recent MMR value.
# Is 21 accurate? Yes, as the empirical avrage MMR change is 20.9016
# according to data gathered by this tool.
if wins + losses == 1 and MMR != 0:
MMRchange = abs(totalMMRchange)
else:
MMRchange = 21
if MMR == 0:
totalMMRchange = MMRchange * (wins - losses)
MMR = complete_data['new_data']['mmr'] - totalMMRchange
while True:
avgMMRadjustment = (totalMMRchange - MMRchange
* (wins - losses)) / (wins + losses)
# Make sure that sign of MMR change is correct
if abs(avgMMRadjustment) >= MMRchange and MMRchange <= 50:
MMRchange += 1
logger.info(f"{complete_data['player'].id}:"
f" Adjusting avg. MMR change to {MMRchange}")
else:
break
last_played = complete_data['player'].last_played
previous_match = self.db_session.query(model.Match).\
filter(model.Match.player_id
== complete_data['player'].id).\
order_by(model.Match.datetime.desc()).limit(1).scalar()
# Warning breaks Travis CI
# if not previous_match:
# logger.warning('{}: No previous match found.'.format(
# complete_data['player'].id))
for idx, match in enumerate(complete_data['games']):
estMMRchange = round(
MMRchange * match['result'].change() + avgMMRadjustment)
MMR = MMR + estMMRchange
try:
delta = match['datetime'] - last_played
except Exception:
delta = timedelta(minutes=3)
last_played = match['datetime']
max_length = delta.total_seconds()
# Don't mark the most recent game as guess, as time and mmr value
# should be accurate (but not mmr change).
guess = not (idx + 1 == len(complete_data['games']))
alpha = 2.0 / (100.0 + 1.0)
if previous_match and previous_match.ema_mmr > 0.0:
delta = MMR - previous_match.ema_mmr
ema_mmr = previous_match.ema_mmr + alpha * delta
emvar_mmr = (1.0 - alpha) * \
(previous_match.emvar_mmr + alpha * delta * delta)
else:
ema_mmr = MMR
emvar_mmr = 0.0
new_match = model.Match(
player=complete_data['player'],
result=match['result'],
datetime=match['datetime'],
mmr=MMR,
mmr_change=estMMRchange,
guess=guess,
ema_mmr=ema_mmr,
emvar_mmr=emvar_mmr,
max_length=max_length)
complete_data['player'].last_played = match['datetime']
self.db_session.add(new_match)
previous_match = new_match
self.db_session.commit()
# Delete old matches:
deletions = 0
for match in self.db_session.query(model.Match).\
filter(model.Match.player_id == complete_data['player'].id).\
order_by(model.Match.datetime.desc()).\
offset(self.cache_matches).all():
self.db_session.delete(match)
deletions += 1
if deletions > 0:
self.db_session.commit()
logger.info(f"{complete_data['player'].id}: "
f"{deletions} matches deleted!")
def update_ema_mmr(self, player: model.Player):
"""Update the exponential moving avarage MMR of a player."""
matches = self.db_session.query(model.Match).\
filter(model.Match.player == player).\
order_by(model.Match.datetime.asc()).all()
previous_match = None
for match in matches:
alpha = 2.0 / (100.0 + 1.0)
if previous_match and previous_match.ema_mmr > 0.0:
delta = match.mmr - previous_match.ema_mmr
ema_mmr = previous_match.ema_mmr + alpha * delta
emvar_mmr = (1.0 - alpha) * \
(previous_match.emvar_mmr + alpha * delta * delta)
else:
ema_mmr = match.mmr
emvar_mmr = 0.0
match.ema_mmr = ema_mmr
match.emvar_mmr = emvar_mmr
previous_match = match
self.db_session.commit()
def get_season_id(self, server: model.Server):
"""Get the current season id on a server."""
return self.current_season[server.id()].season_id
def count_missing_games(self, player: model.Player, data):
"""Count games of the api data that are not yet in the database."""
missing = {}
missing['Win'] = data['wins']
missing['Loss'] = data['losses']
if player.last_active_season == 0 or player.mmr == 0:
new = True
elif (player.last_active_season < self.get_season_id(player.server)):
# New Season!
# TODO: Check if last season endpoint can be requested!
# Only the legacy endpoints give the option to query the
# previous season's data (given that the ladder ID is
# known), e.g.:
# https://eu.api.blizzard.com/sc2/legacy/ladder/2/209966
new = False
elif (player.ladder_id != data['ladder_id']
or not player.ladder_joined
or player.ladder_joined < data['joined']
or data['wins'] < player.wins
or data['losses'] < player.losses):
# Old season, but new ladder or same ladder, but rejoined
if (data['wins'] < player.wins
or data['losses'] < player.losses):
# Forced ladder reset!
logger.info('{}: Manual ladder reset to {}!'.format(
player.id, data['ladder_id']))
new = True
else:
# Promotion?!
missing['Win'] -= player.wins
missing['Loss'] -= player.losses
new = player.mmr == 0
if missing['Win'] + missing['Loss'] == 0:
# Player was promoted/demoted to/from GM!
promotion = data['league'] == model.League.Grandmaster
demotion = player.league == model.League.Grandmaster
if promotion == demotion:
logger.warning(
'Logical error in GM promotion/'
'demotion detection.')
player.ladder_joined = data['joined']
player.ladder_id = data['ladder_id']
player.league = data['league']
self.db_session.commit()
logger.info(f"{player.id}: GM promotion/demotion.")
else:
if data['league'] < player.league:
logger.warning('Logical error in promtion detection.')
else:
logger.info(f"{player.id}: Promotion "
f"to ladder {data['ladder_id']}!")
else:
missing['Win'] -= player.wins
missing['Loss'] -= player.losses
new = player.mmr == 0
missing['Total'] = missing['Win'] + missing['Loss']
if (missing['Total']) > 0:
logger.info(
'{player}: {Total} new matches found!'.format(
player=player.id, **missing))
return missing, new
async def get_player_with_race(self, player, ladder_data):
"""Get the player with the race present in the ladder data."""
if player.ladder_id == 0:
player.race = ladder_data['race']
correct_player = player
elif player.race != ladder_data['race']:
correct_player = self.db_session.query(model.Player).filter(
model.Player.player_id == player.player_id,
model.Player.realm == player.realm,
model.Player.server == player.server,
model.Player.race == ladder_data['race']).scalar()
if not correct_player:
correct_player = model.Player(
player_id=player.player_id,
realm=player.realm,
server=player.server,
race=ladder_data['race'],
ladder_id=0)
self.db_session.add(correct_player)
self.db_session.commit()
self.db_session.refresh(correct_player)
else:
correct_player = player
return correct_player
def delete_old_logs_and_runs(self):
""" Delete old logs and runs from database."""
deletions = 0
for log_entry in self.db_session.query(model.Log).\
order_by(model.Log.datetime.desc()).\
offset(self.cache_logs).all():
self.db_session.delete(log_entry)
deletions += 1
if deletions > 0:
self.db_session.commit()
logger.info(f"{deletions} old log entries were deleted!")
deletions = 0
for run in self.db_session.query(model.Run).\
order_by(model.Run.datetime.desc()).\
offset(self.cache_runs).all():
self.db_session.delete(run)
deletions += 1
if deletions > 0:
self.db_session.commit()
logger.info(f"{deletions} old run logs were deleted!")
async def run(self):
"""Run the sc2monitor."""
start_time = time.time()
logger.debug("Starting job...")
await self.update_seasons()
unique_group = (model.Player.player_id,
model.Player.realm, model.Player.server)
tasks = []
players = self.db_session.query(model.Player).distinct(
*unique_group).group_by(*unique_group).all()
for player in players:
tasks.append(asyncio.create_task(self.query_player(player)))
results = await asyncio.gather(*tasks, return_exceptions=True)
for key, result in enumerate(results):
try:
if result is not None:
raise result
except Exception:
logger.exception(
'The following exception was'
f' raised while quering player {players[key].id}:')
self.delete_old_logs_and_runs()
duration = time.time() - start_time
self.db_session.add(
model.Run(duration=duration,
api_requests=self.sc2api.request_count,
api_retries=self.sc2api.retry_count,
warnings=self.handler.warnings,
errors=self.handler.errors))
self.db_session.commit()
logger.debug(f"Finished job performing {self.sc2api.request_count}"
f" api requests ({self.sc2api.retry_count} retries)"
f" in {duration:.2f} seconds.")
| 40.342484 | 79 | 0.547307 | 30,494 | 0.988076 | 0 | 0 | 3,023 | 0.097952 | 10,303 | 0.333841 | 5,322 | 0.172445 |
680f6ab5ef1c42004a8c67eed4c23528a49a9b2c | 9,867 | py | Python | impermagit/repo.py | tomheon/impermagit | 1f508e8cedcd8ee54745489d2b7682a60dfa07cd | [
"MIT"
] | null | null | null | impermagit/repo.py | tomheon/impermagit | 1f508e8cedcd8ee54745489d2b7682a60dfa07cd | [
"MIT"
] | null | null | null | impermagit/repo.py | tomheon/impermagit | 1f508e8cedcd8ee54745489d2b7682a60dfa07cd | [
"MIT"
] | null | null | null | from contextlib import contextmanager
import errno
import os
import shutil
import subprocess
import tempfile
class GitExeException(Exception):
"""
Thrown when the external git exe doesn't return a 0.
"""
pass
class Repo(object):
"""
Interface to a git repo.
Generally you should create one with `fleeting_repo`, which
manages cleanup.
"""
def __init__(self, repo_root, git_exe=None):
"""
- `repo_root`: the (presumably temporary) dir in which to init
and manage a git repo, e.g. '/tmp/Fkjwpa'
- `git_exe`: the git exe to use. Should be a list, suitable
for passing to subprocess.call. If None, defaults to
["/usr/bin/env", "git"], which should work fine on most
systems.
Calls "git init" to create a new repo, and therefore may raise
a GitExeException.
"""
self.repo_root = _real_abs(repo_root)
self.git_exe = git_exe
if self.git_exe is None:
self.git_exe = ["/usr/bin/env", "git"]
self.do_git(["init"])
def commit(self, fnames_with_contents, commit_msg=None, author=None):
"""
Apply the sequential changes described in fnames_with_contents
in the repo directory, and then commit the results.
Like all methods in this class that invoke git, this can raise
a GitExeException.
- `fnames_with_contents`: a list of tuples of the
form
[(str(fname), str(contents)|None), ...]
E.g.
[('test.txt', 'this is a test\\n'),
('testdir_one/to_be_removed.txt', None)]
Each fname should be relative to the git repo root. If
fname contains directory paths, they will be created under
the repo root. If the contents is not None, the string will
be written to the file indicated by fname. If contents is
None, the file will be git rm'ed.
- `commmit_msg`: a utf-8 str() being the commit message to
pass to the commit.
If None, the commit message will be:
"Test commit."
- `author`: a utf-8 str() being the author for the commit,
formatted according to git requirements as:
"Author Name <author@email>"
If author is None, the author will be:
"Test Author <test@example.com>"
"""
self._write_add_rm(fnames_with_contents)
commit_cmd = ["commit", "--author"]
if author is None:
author = "Test Author <test@example.com>"
commit_cmd.append(author)
if commit_msg is None:
commit_msg = "Test commit."
# Use a temp file to hold the commit message, in case it's
# long or weird and wouldn't do well on the command line.
with _temp_fname() as commit_fname:
with open(commit_fname, 'wb') as fil:
fil.write(commit_msg.encode('utf-8'))
commit_cmd.append("-F")
commit_cmd.append(commit_fname)
self.do_git(commit_cmd)
def get_path(self, fname):
"""
Return a full path to fname under the repository root.
- `fname`: a file or directory name, relative to the repo
root.
>>> repo = Repo('/some/dir')
>>> repo.get_path('testing/test.txt')
'/some/dir/testing/test.txt'
"""
return os.path.join(self.repo_root, fname)
def _write_add_rm(self, fnames_with_contents):
"""
Write contents for each fname in fname with contents, then git
add it.
For each fname with None contents, git rm it.
"""
for (fname, contents) in fnames_with_contents:
fpath = os.path.join(self.repo_root, fname)
_ensure_dir_for_fpath(fpath)
if contents is None:
self.do_git(["rm", fname])
else:
with open(fpath, 'wb') as fil:
fil.write(contents.encode('utf-8'))
self.do_git(["add", fname])
def do_git(self, cmd):
"""
Run a git cmd in the repo root and don't worry about what it
writes to stdout / stderr.
If you need access to stdout / stderr, take a look at
`yield_git`.
Raises a GitExeException if git returns non-0.
- `cmd`: a list of strings, suitable for passing to
subprocess.call, e.g. ["add", "some_file.txt"]
"""
with self.yield_git(cmd):
pass
def yield_git(self, cmd):
"""
Run git_exe in the repo root, passing cmd to it.
Raises a GitExeException if git returns non-0.
- `cmd`: a list of strings, suitable for passing to
subprocess.call, e.g. ["add", "some_file.txt"]
Yields a tuple of open (file(), file()), being the stdout and
stderr written to by the git process, to support use like:
>>> repo = Repo('/some/dir')
>>> with repo.yield_git(["log", "--oneline"]) as git_output:
... git_out, git_err = git_output
... output = git_out.read()
Note that means this is *not* what you want to do:
>>> repo.yield_git(["log", "--oneline"])
as this will just yield the generator and run nothing.
Use do_git for that.
"""
# this is ugly, creating a local function and calling it, but
# otherwise @contextmanager ruins the args in interactive help
@contextmanager
def _yield_git():
whole_cmd = self.git_exe + cmd
with _temp_dir() as tmp_dir:
# don't bother using temp files for these, just a tmp dir,
# as it will be recursively cleaned up.
stdout_fname = os.path.join(tmp_dir, 'git_stdout')
stderr_fname = os.path.join(tmp_dir, 'git_stderr')
with open(stdout_fname, 'wb') as o_f:
with open(stderr_fname, 'wb') as e_f:
returncode = subprocess.call(whole_cmd,
stdout=o_f,
stderr=e_f,
cwd=self.repo_root)
if returncode != 0:
raise GitExeException(_fmt_err(whole_cmd,
returncode,
stderr_fname,
stdout_fname))
# if we get here, the git command returned 0. We re-open
# and yield the stdout and stderr in case the caller wants
# them. After the yield returns, the entire temp
# directory will be cleaned up.
with open(stdout_fname, 'rb') as git_out_f:
with open(stderr_fname, 'rb') as git_err_f:
yield (git_out_f, git_err_f)
return _yield_git()
def fleeting_repo(git_exe=None):
"""
Create a temp directory and yield a Repo built from it.
The temp dir will be cleaned up afterwards.
>>> with fleeting_repo() as repo:
... repo.do_git(["some", "git", "cmd"])
- `git_exe`: the git exe to use. Should be a list, suitable for
passing to subprocess.call. If None, defaults to
["/usr/bin/env", "git"], which should work fine on most systems.
"""
# this is ugly, creating a local function and returning it, but
# otherwise @contextmanager screws up the param names in the
# interactive help.
@contextmanager
def _fleeting_repo():
with _temp_dir() as temp_dir:
yield Repo(temp_dir, git_exe=git_exe)
return _fleeting_repo()
@contextmanager
def _temp_fname():
"""
Yield a temp file name, delete the file afterwards.
"""
ntf = tempfile.NamedTemporaryFile(delete=False)
fname = ntf.name
ntf.close()
try:
yield fname
finally:
try:
os.unlink(fname)
except OSError:
# this means it was already unlinked
pass
@contextmanager
def _temp_dir():
"""
Make a temp dir and yield it, removing it afterwards.
"""
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
def _ensure_dir_for_fpath(fpath):
"""
Ensure that the directory in which fpath will reside exists,
recursively creating it otherwise.
Adapted from:
http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
"""
dir_name = os.path.dirname(fpath)
try:
os.makedirs(dir_name)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(dir_name):
raise
def _fmt_cmd_for_err(cmd):
"""
Join a git cmd, quoting individual segments first so that it's
relatively easy to see if there were whitespace issues or not.
"""
return ' '.join(['"%s"' % seg for seg in cmd])
def _fmt_err(git_cmd, returncode, stderr_fname, stdout_fname):
"""
Format an error string for a failed git command, which includes
the harvested stdout and stderr of the process.
"""
git_cmd_s = _fmt_cmd_for_err(git_cmd)
err_msg = '\n'.join([_read_fname(stderr_fname),
_read_fname(stdout_fname)])
return ("Git command %s returned %d with err log %s" %
(git_cmd_s,
returncode,
err_msg))
def _read_fname(fname):
"""
Open fname, read the contents, return them.
"""
with open(fname, 'rb') as fil:
return str(fil.read())
def _real_abs(path):
"""
Return a real, absolute path.
"""
return os.path.abspath(os.path.realpath(path))
| 31.32381 | 77 | 0.572616 | 6,963 | 0.705686 | 3,851 | 0.390291 | 2,258 | 0.228844 | 0 | 0 | 5,546 | 0.562076 |
681045cfaacc2418fa853c78810b9368726344ac | 717 | py | Python | chaospy/distributions/copulas/__init__.py | lblonk/chaospy | 1759a4307c6134b74ce63ff44973195f1e185f94 | [
"MIT"
] | null | null | null | chaospy/distributions/copulas/__init__.py | lblonk/chaospy | 1759a4307c6134b74ce63ff44973195f1e185f94 | [
"MIT"
] | null | null | null | chaospy/distributions/copulas/__init__.py | lblonk/chaospy | 1759a4307c6134b74ce63ff44973195f1e185f94 | [
"MIT"
] | null | null | null | r"""
Copulas are a type dependency structure imposed on independent variables to
achieve to more complex problems without adding too much complexity.
To construct a copula one needs a copula transformation and the
Copula wrapper::
>>> dist = chaospy.Iid(chaospy.Uniform(), 2)
>>> copula = chaospy.Gumbel(dist, theta=1.5)
The resulting copula is then ready for use::
>>> print(numpy.around(copula.sample(5), 4))
[[0.6536 0.115 0.9503 0.4822 0.8725]
[0.6286 0.0654 0.96 0.5073 0.9705]]
"""
from .baseclass import Copula
from .archimedean import Archimedean
from .gumbel import Gumbel
from .clayton import Clayton
from .joe import Joe
from .nataf import Nataf
from .t_copula import TCopula
| 27.576923 | 75 | 0.732218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 516 | 0.719665 |
6810793cac493b5c11bee04614c0af284e700eb6 | 2,112 | py | Python | documentation/examples/policy_aggregation.py | oscardavidtorres1994/cadCAD | 229e2dac585eb6c1644cf277e3a7807883f10d13 | [
"MIT"
] | 1 | 2021-11-22T23:11:15.000Z | 2021-11-22T23:11:15.000Z | documentation/examples/policy_aggregation.py | oscardavidtorres1994/cadCAD | 229e2dac585eb6c1644cf277e3a7807883f10d13 | [
"MIT"
] | null | null | null | documentation/examples/policy_aggregation.py | oscardavidtorres1994/cadCAD | 229e2dac585eb6c1644cf277e3a7807883f10d13 | [
"MIT"
] | 1 | 2021-11-22T23:11:03.000Z | 2021-11-22T23:11:03.000Z | import pandas as pd
from tabulate import tabulate
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
# Policies per Mechanism
def p1m1(_g, step, sH, s):
return {'policy1': 1}
def p2m1(_g, step, sH, s):
return {'policy2': 2}
def p1m2(_g, step, sH, s):
return {'policy1': 2, 'policy2': 2}
def p2m2(_g, step, sH, s):
return {'policy1': 2, 'policy2': 2}
def p1m3(_g, step, sH, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
def p2m3(_g, step, sH, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
# Internal States per Mechanism
def add(y, x):
return lambda _g, step, sH, s, _input: (y, s[y] + x)
def policies(_g, step, sH, s, _input):
y = 'policies'
x = _input
return (y, x)
# Genesis States
genesis_states = {
'policies': {},
's1': 0
}
variables = {
's1': add('s1', 1),
"policies": policies
}
psubs = {
"m1": {
"policies": {
"p1": p1m1,
"p2": p2m1
},
"variables": variables
},
"m2": {
"policies": {
"p1": p1m2,
"p2": p2m2
},
"variables": variables
},
"m3": {
"policies": {
"p1": p1m3,
"p2": p2m3
},
"variables": variables
}
}
sim_config = config_sim(
{
"N": 1,
"T": range(3),
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
partial_state_update_blocks=psubs,
policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b
)
exec_mode = ExecutionMode()
local_proc_ctx = ExecutionContext(context=exec_mode.local_mode)
run = Executor(exec_context=local_proc_ctx, configs=configs)
raw_result, tensor_field, sessions = run.execute()
result = pd.DataFrame(raw_result)
print()
print("Tensor Field:")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()
| 21.333333 | 82 | 0.597538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.190814 |
6813a323f69361605daa7bd7874ef3440ba67ca6 | 2,667 | py | Python | lib/Agent.py | mbhatt1/Mofosploit | d21295253b9cfe730da22bea9eeb4bcc7b872154 | [
"Apache-2.0"
] | 6 | 2019-01-21T13:09:29.000Z | 2019-10-02T07:58:57.000Z | lib/Agent.py | mbhatt1/Mofosploit | d21295253b9cfe730da22bea9eeb4bcc7b872154 | [
"Apache-2.0"
] | null | null | null | lib/Agent.py | mbhatt1/Mofosploit | d21295253b9cfe730da22bea9eeb4bcc7b872154 | [
"Apache-2.0"
] | 3 | 2019-01-21T13:15:50.000Z | 2020-09-28T11:30:51.000Z |
from lib.imports import *
from lib.Constants import *
from lib.Environment import *
from lib.ML_Modules import ML_Nnet
from lib.parameter_server import Server as ParameterServer
'''
Single Agent
'''
class Agent:
def __init__(self, name, parameter_server):
self.brain = ML_NNet(name, parameter_server)
self.memory = [] # Memory of s,a,r,s_
self.R = 0. # Time discounted total reward.
def act(self, s, available_action_list, eps_steps):
# Decide action using epsilon greedy.
if frames >= eps_steps:
eps = EPS_END
else:
# Linearly interpolate
eps = EPS_START + frames * (EPS_END - EPS_START) / eps_steps
if random.random() < eps:
# Randomly select action.
if len(available_action_list) != 0:
return available_action_list[random.randint(0, len(available_action_list) - 1)], None, None
else:
return 'no payload', None, None
else:
# Select action according to probability p[0] (greedy).
s = np.array([s])
p = self.brain.predict_p(s)
if len(available_action_list) != 0:
prob = []
for action in available_action_list:
prob.append([action, p[0][action]])
prob.sort(key=lambda s: -s[1])
return prob[0][0], prob[0][1], prob
else:
return 'no payload', p[0][len(p[0]) - 1], None
# Push s,a,r,s considering advantage to LocalBrain.
def advantage_push_local_brain(self, s, a, r, s_):
def get_sample(memory, n):
s, a, _, _ = memory[0]
_, _, _, s_ = memory[n - 1]
return s, a, self.R, s_
# Create a_cats (one-hot encoding)
a_cats = np.zeros(NUM_ACTIONS)
a_cats[a] = 1
self.memory.append((s, a_cats, r, s_))
# Calculate R using previous time discounted total reward.
self.R = (self.R + r * GAMMA_N) / GAMMA
# Input experience considering advantage to LocalBrain.
if s_ is None:
while len(self.memory) > 0:
n = len(self.memory)
s, a, r, s_ = get_sample(self.memory, n)
self.brain.train_push(s, a, r, s_)
self.R = (self.R - self.memory[0][2]) / GAMMA
self.memory.pop(0)
self.R = 0
if len(self.memory) >= N_STEP_RETURN:
s, a, r, s_ = get_sample(self.memory, N_STEP_RETURN)
self.brain.train_push(s, a, r, s_)
self.R = self.R - self.memory[0][2]
self.memory.pop(0)
| 35.092105 | 107 | 0.548556 | 2,464 | 0.923885 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.16198 |
6813cdd606ade45f43873ba2c99142d90f8a961a | 16,267 | py | Python | wrt/wrt-packertool-android-tests/test.py | tiwanek/crosswalk-test-suite | 82d12e6187c81caeb56e12c5ec3483f35d939982 | [
"BSD-3-Clause"
] | null | null | null | wrt/wrt-packertool-android-tests/test.py | tiwanek/crosswalk-test-suite | 82d12e6187c81caeb56e12c5ec3483f35d939982 | [
"BSD-3-Clause"
] | null | null | null | wrt/wrt-packertool-android-tests/test.py | tiwanek/crosswalk-test-suite | 82d12e6187c81caeb56e12c5ec3483f35d939982 | [
"BSD-3-Clause"
] | null | null | null | import sys, os, os.path, time, shutil
import commands
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement as SE
import metacomm.combinatorics.all_pairs2
all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2
totalNum = 0
failNum = 0
passNum = 0
Flag = "positive"
ConstPath = os.getcwd()
Start = time.strftime("%Y-%m-%d %H:%M:%S")
ResultList = []
Direc = "./"
def lineCount(fp):
fileTmp = open(fp)
count = len(fileTmp.readlines())
fileTmp.close()
return count
def genSelfcom(combIn, combOut):
try:
fp = open(combIn)
comb = open(combOut, 'a+')
comb.write(fp.read())
fp.close()
comb.close()
print "Update selfcomb.txt ---------------->O.k"
return
except Exception,e:
print Exception,":",e
print "Update selfcomb.txt ---------------->Error"
sys.exit(1)
def processMain(seedIn):
try:
print "Input Seed :" + os.path.basename(seedIn)
print "Excute " + Flag + " cases ------------------------->Start"
row = 0
sectionList = []
fp = open(seedIn)
for line in fp:
items = line.strip('\n\r').split(":")
sectionName = items[0].split("--")[0]
if sectionName not in sectionList:
sectionList.append(sectionName)
inputTxt = open(ConstPath + "/self/" + sectionName + "_input.txt", "a+")
inputTxt.write(line)
inputTxt.close()
fp.close()
for section in sectionList:
caseline = ""
counters = lineCount(ConstPath + "/self/" + section + "_input.txt")
if counters >= 2:
lists = [[] for m in range(counters)]
inputTxt = open(ConstPath + "/self/" + section + "_input.txt")
for line in inputTxt:
items = line.strip('\n\r').split(":")
values = items[1].split(",")
lists[row].extend(values)
row = row + 1
inputTxt.close()
pairs = all_pairs(lists)
outTxt = open(ConstPath + "/self/" + section + "_output.txt", 'w+')
for e, v in enumerate(pairs):
for c in range(len(v)):
caseline = caseline + v[c] + ","
outTxt.write(section + ":" + caseline[:-1] + "\n")
outTxt.close()
else:
shutil.copy(ConstPath + "/self/" + section + "_input.txt", ConstPath + "/self/" + section + "_output.txt")
#1*********XX_output.txt -> selfcomb.txt
genSelfcom(ConstPath + "/self/" + section + "_output.txt", ConstPath + "/allpairs/selfcomb.txt")
#2*********selfcomb.txt -> caseXX.txt
genCases(ConstPath + "/allpairs/selfcomb.txt")
#3*********output -> manifest.json
caseExecute(ConstPath + "/allpairs/case_" + Flag + ".txt")
print "Excute " + Flag + " cases ------------------------->O.K"
except Exception,e:
print Exception,":",e
print "Excute " + Flag + " cases ------------------------->Error"
sys.exit(1)
def genCases(selfcomb):
try:
print "Genarate " + Flag + " case.txt file ---------------->Start"
caseFile = open(ConstPath + "/allpairs/case_" + Flag + ".txt", 'w+')
names = ""
row = 0
counters = lineCount(selfcomb)
lists = [[] for m in range(counters)]
fobj = open(selfcomb)
for line in fobj:
items = line.strip('\n\r').split(":")
names = names + items[0] + "\t"
caseFile.write(names.rstrip("\t") + "\n")
fobj.seek(0)
for line in fobj:
items = line.strip('\n\r').split(":")
values = items[1:]
lists[row].extend(":".join(values).split(","))
row = row + 1
fobj.close()
pairs = all_pairs(lists)
for e, v in enumerate(pairs):
case = ""
for c in range(0,len(v)):
case = case + v[c] +"\t"
caseFile.write(case.rstrip("\t") + "\n")
caseFile.close()
print "Genarate " + Flag + " case.txt file ---------------->O.k"
except Exception,e:
print "Generate " + Flag + " case.txt file ---------------->Error"
print Exception,":",e
sys.exit(1)
def caseExecute(caseInput):
try:
global totalNum
global failNum
global passNum
global ResultList
global Flag
global Direc
print "Excute cases ------------------------->Start"
caseIn = open(caseInput)
line = caseIn.readline().strip('\n\r')
sectionList = line.split("\t")
os.chdir(ConstPath + "/tools/crosswalk")
toolstatus = commands.getstatusoutput("python make_apk.py")
if toolstatus[0] != 0:
print "Crosswalk Binary is not ready, Please attention"
sys.exit(1)
for line in caseIn:
totalNum = totalNum + 1
items = line.strip("\t\n").split("\t")
command = "python make_apk.py "
data = {"id":"","result":"","entry":"","start":"","end":"","set":""}
data["start"] = time.strftime("%Y-%m-%d %H:%M:%S")
for i in range(len(sectionList)):
items[i] = items[i].replace("000", " ")
command = command + "--" + sectionList[i] + "=" + '"' + items[i] + '" '
command = command.strip()
if "target-dir" in sectionList:
dirIndex = sectionList.index("target-dir")
Direc = items[dirIndex]
else:
Direc = "./"
nameIndex = sectionList.index("name")
packIndex = sectionList.index("package")
name = items[nameIndex]
package = items[packIndex]
print "##########"
print "Case" + str(totalNum) + " :"
print "Packer Tool Command:"
print command
print "Genarate APK ---------------->Start"
packstatus = commands.getstatusoutput(command)
if Flag == "negative":
if packstatus[0] == 0:
print "Genarate APK ---------------->O.K"
result = "FAIL"
failNum = failNum + 1
else:
print "Genarate APK ---------------->Error"
result = "PASS"
passNum = passNum + 1
else:
if packstatus[0] != 0:
print "Genarate APK ---------------->Error"
result = "FAIL"
failNum = failNum + 1
else:
print "Genarate APK ---------------->O.K"
result = tryRunApp(name, package)
data["end"] = time.strftime("%Y-%m-%d %H:%M:%S")
data["id"] = "Case" + str(totalNum)
data["result"] = result
data["entry"] = command
data["set"] = Flag
ResultList.append(data)
os.system("rm -rf " + ConstPath + "/tools/crosswalk/" + Direc + "/*apk")
print "Case Result :",result
print "##########"
caseIn.close()
print "Excute cases ------------------------->O.K"
except Exception,e:
print Exception,":",e
print "Execute case ---------------->Error"
sys.exit(1)
def tryRunApp(name, package):
try:
global failNum
global passNum
result = "PASS"
message = ""
print "Install APK ---------------->Start"
instatus = commands.getstatusoutput("adb install " + ConstPath + "/tools/crosswalk/" + Direc + "/*apk")
if instatus[0] == 0:
print "Install APK ---------------->O.K"
print "Find Package in device ---------------->Start"
pmstatus = commands.getstatusoutput("adb shell pm list packages |grep " + package)
if pmstatus[0] == 0:
print "Find Package in device ---------------->O.K"
print "Launch APK ---------------->Start"
launchstatus = commands.getstatusoutput("adb shell am start -n " + package + "/." + name + "Acivity")
if launchstatus[0] != 0:
print "Launch APK ---------------->Error"
os.system("adb uninstall " + package)
failNum = failNum + 1
result = "FAIL"
else:
print "Launch APK ---------------->O.K"
print "Stop APK ---------------->Start"
stopstatus = commands.getstatusoutput("adb shell am force-stop " + package)
if stopstatus[0] == 0:
print "Stop APK ---------------->O.K"
print "Uninstall APK ---------------->Start"
unistatus = commands.getstatusoutput("adb uninstall " + package)
if unistatus[0] == 0:
print "Uninstall APK ---------------->O.K"
passNum = passNum + 1
else:
print "Uninstall APK ---------------->Error"
failNum = failNum + 1
result = "FAIL"
else:
print "Stop APK ---------------->Error"
failNum = failNum + 1
result = "FAIL"
os.system("adb uninstall " + package)
else:
print "Find Package in device ---------------->Error"
os.system("adb uninstall " + package)
failNum = failNum + 1
result = "FAIL"
else:
print "Install APK ---------------->Error"
result = "FAIL"
failNum = failNum + 1
os.system("rm -rf " + ConstPath + "/tools/crosswalk/" + Direc + "/*apk" + "&>/dev/null")
return result
except Exception,e:
print Exception,":",e
print "Try run webapp ---------------->Error"
sys.exit(1)
def updateXmlTitle(fp,title):
fobj = open(fp, "r+")
lines = fobj.readlines()
fobj.seek(0)
fobj.truncate()
lines.insert(0,title)
fobj.writelines(lines)
fobj.close()
def genResultXml():
try:
tree = ElementTree()
root = Element("test_definition")
tree._setroot(root)
env = Element("environment", {"build_id":"","device_id":"","device_name":"","host":"",\
"lite_version":"","manufacturer":"","resolution":"","screen_size":""})
root.append(env)
#summary element
summary = Element("summary", {"test_plan_name":""})
root.append(summary)
tStart = SE(summary, "start_at")
tEnd = SE(summary, "end_at")
tStart.text = Start
tEnd.text = End
#suite element
suite = SE(root, "suite", {"category":"Crosswalk_Packer_Tool","launcher":"xwalk",\
"name":"wrt-packertool-android-tests"})
setPositive = SE(suite, "set", {"name":"positive","set_debug_msg":""})
setNegitive = SE(suite, "set", {"name":"negitive","set_debug_msg":""})
#testcase element
for case in ResultList:
setElement = setPositive
if case["set"] == "negative":
setElement = setNegitive
pur = "Check if packer tool work properly"
testcase = SE(setElement, "testcase", {"component":"Crosswalk Packer Tool",\
"execution_type":"auto","id":case["id"],"purpose":pur,"result":case["result"]},)
desc = SE(testcase, "description")
entry = Element("test_script_entry")
entry.text = "pack command: " + case["entry"].decode("utf-8")
desc.append(entry)
resultInfo = SE(testcase, "result_info")
actualResult = SE(resultInfo, "actual_result")
actualResult.text = case["result"]
caseStart = SE(resultInfo, "start")
caseStart.text = case["start"]
caseEnd = SE(resultInfo, "end")
caseEnd.text = case["end"]
SE(resultInfo, "stdout")
SE(resultInfo, "stderr")
tree.write(ConstPath + "/report/wrt-packertool-android-tests.xml")
updateXmlTitle(ConstPath + "/report/wrt-packertool-android-tests.xml",'<?xml version="1.0" encoding="UTF-8"?>\n<?xml-stylesheet type="text/xsl" href="./style/testresult.xsl"?>\n<?xml-stylesheet type="text/xsl" href="testresult.xsl"?>\n')
print "Generate test.result.xml file ------------------------->O.K"
except Exception,e:
print Exception,"Generate test.result.xml error:",e
def genSummaryXml():
try:
tree = ElementTree()
root = Element("result_summary", {"plan_name":""})
tree._setroot(root)
env = SE(root,"environment",{"build_id":"","cts_version":"","device_id":"","device_model":"","device_name":"","host":"","resolution":"","screen_size":"","manufacturer":""})
summary = SE(root, "summary")
startTime = SE(summary, "start_at")
endTime = SE(summary, "end_at")
startTime.text = Start
endTime.text = End
suite = SE(root, "suite", {"name":"wrt-packertool-android-tests"})
total_case = SE(suite, "total_case")
total_case.text = str(totalNum)
pass_case = SE(suite, "pass_case")
pass_case.text = str(passNum)
pass_rate = SE(suite, "pass_rate")
pass_rate.text = str(float(passNum) / totalNum * 100)
fail_case = SE(suite, "fail_case")
fail_case.text = str(failNum)
fail_rate = SE(suite, "fail_rate")
fail_rate.text = str(float(failNum) / totalNum * 100)
SE(suite, "block_case")
SE(suite, "block_rate")
SE(suite, "na_case")
SE(suite, "na_rate")
tree.write(ConstPath + "/report/summary.xml")
updateXmlTitle(ConstPath + "/report/summary.xml",'<?xml version="1.0" encoding="UTF-8"?>\n<?xml-stylesheet type="text/xsl" href="./style/summary.xsl"?>\n')
print "Generate summary.xml file ------------------------->O.K"
except Exception,e:
print Exception,"Generate summary.xml error:",e
def devicesConform():
try:
deviceList = os.popen("adb devices").readlines()
if len(deviceList) == 2:
print "No test devices connected, Please attention"
sys.exit(1)
except Exception,e:
print Exception,"Device Connect error:",e
sys.exit(1)
def main():
try:
global End
global Flag
os.system("rm -rf " + ConstPath + "/allpairs/negative/*~ &>/dev/null")
os.system("rm -rf " + ConstPath + "/allpairs/positive/*~ &>/dev/null")
os.system("rm -rf " + ConstPath + "/allpairs/positive/case*txt &>/dev/null")
os.system("rm -rf " + ConstPath + "/tools/crosswalk/*apk &>/dev/null")
os.system("rm -rf " + ConstPath + "/self &>/dev/null")
os.system("mkdir -p " + ConstPath + "/self")
devicesConform()
#positive test
for seed in os.listdir(ConstPath + "/allpairs/positive/"):
os.system("rm -rf " + ConstPath + "/allpairs/selfcomb.txt &>/dev/null")
os.system("rm -rf " + ConstPath + "/self &>/dev/null")
os.system("mkdir -p " + ConstPath + "/self")
processMain(ConstPath + "/allpairs/positive/" + seed)
#negative case
Flag = "negative"
for seed in os.listdir(ConstPath + "/allpairs/negative/"):
os.system("rm -rf " + ConstPath + "/allpairs/selfcomb.txt &>/dev/null")
os.system("rm -rf " + ConstPath + "/self &>/dev/null")
os.system("mkdir -p " + ConstPath + "/self")
processMain(ConstPath + "/allpairs/negative/" + seed)
End = time.strftime("%Y-%m-%d %H:%M:%S")
genResultXml()
genSummaryXml()
except Exception,e:
print Exception,":",e
sys.exit(1)
finally:
os.system("rm -rf " + ConstPath + "/self &>/dev/null")
if __name__=="__main__":
main()
| 40.165432 | 245 | 0.500645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,817 | 0.296121 |
6814898e651034adbd68ccc0b6b8b8dad8f99277 | 584 | py | Python | bigcommerce/resources/options.py | sebaacuna/bigcommerce-api-python | 59ef206d7296c196a0ae0400b6bf9bdb5c2f72af | [
"MIT"
] | null | null | null | bigcommerce/resources/options.py | sebaacuna/bigcommerce-api-python | 59ef206d7296c196a0ae0400b6bf9bdb5c2f72af | [
"MIT"
] | null | null | null | bigcommerce/resources/options.py | sebaacuna/bigcommerce-api-python | 59ef206d7296c196a0ae0400b6bf9bdb5c2f72af | [
"MIT"
] | null | null | null | from .base import *
class Options(ListableApiResource, CreateableApiResource, UpdateableApiResource, DeleteableApiResource):
resource_name = 'options'
def values(self, id=None):
if id:
return OptionValues.get(self.id, id, connection=self._connection)
else:
return OptionValues.all(self.id, connection=self._connection)
class OptionValues(ListableApiSubResource, CreateableApiSubResource, UpdateableApiSubResource, DeleteableApiSubResource):
resource_name = 'values'
parent_resource = 'options'
parent_key = 'option_id'
| 32.444444 | 121 | 0.739726 | 558 | 0.955479 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.063356 |
6814efafa8b0436d0c07705dbb35b7e3e7d2d5ab | 61 | py | Python | itscsapp/admision/models/__init__.py | danyRivC/itscsapp | 485309f41f477fcebf66899740a0b4a954f4b98b | [
"MIT"
] | null | null | null | itscsapp/admision/models/__init__.py | danyRivC/itscsapp | 485309f41f477fcebf66899740a0b4a954f4b98b | [
"MIT"
] | null | null | null | itscsapp/admision/models/__init__.py | danyRivC/itscsapp | 485309f41f477fcebf66899740a0b4a954f4b98b | [
"MIT"
] | null | null | null | from .admision_carrer import *
from .admision_event import *
| 20.333333 | 30 | 0.803279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6815fb78cca0f6a2e9ed9085014fe961cd3f8369 | 3,649 | py | Python | paperswithcode/models/evaluation/result.py | lambdaofgod/paperswithcode-client | 2edc0288724400a17c66ba0f07c4a680d0aa907e | [
"Apache-2.0"
] | 1 | 2021-11-21T12:01:44.000Z | 2021-11-21T12:01:44.000Z | paperswithcode/models/evaluation/result.py | lambdaofgod/paperswithcode-client | 2edc0288724400a17c66ba0f07c4a680d0aa907e | [
"Apache-2.0"
] | null | null | null | paperswithcode/models/evaluation/result.py | lambdaofgod/paperswithcode-client | 2edc0288724400a17c66ba0f07c4a680d0aa907e | [
"Apache-2.0"
] | 1 | 2021-11-21T12:01:46.000Z | 2021-11-21T12:01:46.000Z | from datetime import datetime
from typing import Optional
from tea_client.models import TeaClientModel
class Result(TeaClientModel):
"""Evaluation table row object.
Attributes:
id (str): Result id.
best_rank (int, optional): Best rank of the row.
metrics (dict): Dictionary of metrics and metric values.
methodology (str): Methodology used for this implementation.
uses_additional_data (bool): Does this evaluation uses additional data
not provided in the dataset used for other evaluations.
paper (str, optional): Paper describing the evaluation.
best_metric (str, optional): Name of the best metric.
evaluated_on (str, optional): Date of the result evaluation in YYYY-MM-DD format.
external_source_url (str, option): The URL to the external source (eg competition)
"""
id: str
best_rank: Optional[int]
metrics: dict
methodology: str
uses_additional_data: bool
paper: Optional[str]
best_metric: Optional[str]
evaluated_on: Optional[str]
external_source_url: Optional[str]
class _ResultRequest(TeaClientModel):
def dict(
self,
*,
include=None,
exclude=None,
by_alias: bool = False,
skip_defaults: bool = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
):
d = super().dict(
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
evaluated_on = d.get("evaluated_on")
if isinstance(evaluated_on, datetime):
d["evaluated_on"] = evaluated_on.strftime("%Y-%m-%d")
return d
class ResultCreateRequest(_ResultRequest):
"""Evaluation table row object.
Attributes:
metrics (dict): Dictionary of metrics and metric values.
methodology (str): Methodology used for this implementation.
uses_additional_data (bool, optional): Does this evaluation uses additional data
not provided in the dataset used for other evaluations.
paper (str, optional): Paper describing the evaluation.
evaluated_on (str, optional): Date of the result evaluation: YYYY-MM-DD format
external_source_url (str, option): The URL to the external source (eg competition)
"""
metrics: dict
methodology: str
uses_additional_data: Optional[bool] = False
paper: Optional[str] = None
evaluated_on: Optional[str] = None
external_source_url: Optional[str] = None
class ResultUpdateRequest(_ResultRequest):
"""Evaluation table row object.
Attributes:
metrics (dict, optional): Dictionary of metrics and metric values.
methodology (str, optional): Methodology used for this implementation.
uses_additional_data (bool, optional): Does this evaluation uses
additional data not provided in the dataset used for other
evaluations.
paper (str, optional): Paper describing the evaluation.
evaluated_on (datetime, optional): Date of the result evaluation: YYYY-MM-DD format
external_source_url (str, option): The URL to the external source (eg competition)
"""
metrics: Optional[dict] = None
methodology: Optional[str] = None
uses_additional_data: Optional[bool] = None
paper: Optional[str] = None
evaluated_on: Optional[str] = None
external_source_url: Optional[str] = None
| 35.77451 | 91 | 0.671965 | 3,533 | 0.96821 | 0 | 0 | 0 | 0 | 0 | 0 | 1,983 | 0.543437 |
681a1cf3ad99f3d849dfcabbe5d567ca687824c1 | 3,300 | py | Python | tests/api_resources/test_porting_order.py | rjkboyle/telnyx-python | bee8bdef9efd43076e63693876f7a423f3853180 | [
"MIT"
] | 35 | 2019-07-22T20:44:09.000Z | 2022-02-15T05:52:15.000Z | tests/api_resources/test_porting_order.py | rjkboyle/telnyx-python | bee8bdef9efd43076e63693876f7a423f3853180 | [
"MIT"
] | 15 | 2019-08-05T21:16:41.000Z | 2022-01-14T00:14:35.000Z | tests/api_resources/test_porting_order.py | rjkboyle/telnyx-python | bee8bdef9efd43076e63693876f7a423f3853180 | [
"MIT"
] | 6 | 2019-08-19T17:27:39.000Z | 2022-03-15T18:36:23.000Z | from __future__ import absolute_import, division, print_function
import pytest
import telnyx
TEST_RESOURCE_ID = "f1486bae-f067-460c-ad43-73a92848f902"
class TestPortingOrder(object):
def test_is_listable(self, request_mock):
resources = telnyx.PortingOrder.list()
request_mock.assert_requested("get", "/v2/porting_orders")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], telnyx.PortingOrder)
def test_is_retrievable(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested("get", "/v2/porting_orders/%s" % TEST_RESOURCE_ID)
assert isinstance(resource, telnyx.PortingOrder)
def test_is_creatable(self, request_mock):
resource = telnyx.PortingOrder.create(
phone_numbers=["13035550000", "13035550001", "13035550002"],
)
request_mock.assert_requested("post", "/v2/porting_orders")
assert isinstance(resource.data[0], telnyx.PortingOrder)
def test_is_saveable(self, request_mock):
porting_order = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
porting_order.webhook_event = "https://update.com"
porting_order.customer_reference = "updated name"
resource = porting_order.save()
request_mock.assert_requested(
"patch", "/v2/porting_orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
assert resource is porting_order
def test_is_modifiable(self, request_mock):
resource = telnyx.PortingOrder.modify(
TEST_RESOURCE_ID,
webhook_event="https://update.com",
customer_reference="updated name",
)
request_mock.assert_requested(
"patch", "/v2/porting_orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
def test_is_deletable(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
resource.delete()
request_mock.assert_requested(
"delete", "/v2/porting_orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
def test_can_confirm_porting_order_action(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
resource.confirm()
request_mock.assert_requested(
"post", "/v2/porting_orders/%s/actions/confirm" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
@pytest.mark.skip(reason="PDF endpoint not supported by mock currently")
def test_can_get_loa_template(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
resource.loaTemplate()
request_mock.assert_requested(
"get", "/v2/porting_orders/%s/loa_template" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
def test_can_list_porting_phone_numbers(self, request_mock):
resource = telnyx.PortingPhoneNumber.list()
request_mock.assert_requested("get", "/v2/porting_phone_numbers")
assert isinstance(resource.data, list)
assert isinstance(resource.data[0], telnyx.PortingPhoneNumber)
| 40.740741 | 88 | 0.698485 | 3,143 | 0.952424 | 0 | 0 | 405 | 0.122727 | 0 | 0 | 479 | 0.145152 |
681b7c879fe3f2d09f237ea35bdbdb7e97ac6241 | 2,002 | py | Python | tests/garage/replay_buffer/test_replay_buffer.py | Maltimore/garage | a3f44b37eeddca37d157766a9a72e8772f104bcd | [
"MIT"
] | 1 | 2019-07-31T06:53:38.000Z | 2019-07-31T06:53:38.000Z | tests/garage/replay_buffer/test_replay_buffer.py | Maltimore/garage | a3f44b37eeddca37d157766a9a72e8772f104bcd | [
"MIT"
] | null | null | null | tests/garage/replay_buffer/test_replay_buffer.py | Maltimore/garage | a3f44b37eeddca37d157766a9a72e8772f104bcd | [
"MIT"
] | 1 | 2020-02-05T00:34:07.000Z | 2020-02-05T00:34:07.000Z | import numpy as np
from garage.replay_buffer import SimpleReplayBuffer
from tests.fixtures.envs.dummy import DummyDiscreteEnv
class TestReplayBuffer:
def test_add_transition_dtype(self):
env = DummyDiscreteEnv()
obs = env.reset()
replay_buffer = SimpleReplayBuffer(
env_spec=env, size_in_transitions=3, time_horizon=1)
replay_buffer.add_transition(
observation=obs, action=env.action_space.sample())
sample = replay_buffer.sample(1)
sample_obs = sample['observation']
sample_action = sample['action']
assert sample_obs.dtype == env.observation_space.dtype
assert sample_action.dtype == env.action_space.dtype
def test_add_transitions_dtype(self):
env = DummyDiscreteEnv()
obs = env.reset()
replay_buffer = SimpleReplayBuffer(
env_spec=env, size_in_transitions=3, time_horizon=1)
replay_buffer.add_transitions(
observation=[obs], action=[env.action_space.sample()])
sample = replay_buffer.sample(1)
sample_obs = sample['observation']
sample_action = sample['action']
assert sample_obs.dtype == env.observation_space.dtype
assert sample_action.dtype == env.action_space.dtype
def test_eviction_policy(self):
env = DummyDiscreteEnv()
obs = env.reset()
replay_buffer = SimpleReplayBuffer(
env_spec=env, size_in_transitions=3, time_horizon=1)
replay_buffer.add_transitions(observation=[obs, obs], action=[1, 2])
assert not replay_buffer.full
replay_buffer.add_transitions(observation=[obs, obs], action=[3, 4])
assert replay_buffer.full
replay_buffer.add_transitions(observation=[obs, obs], action=[5, 6])
replay_buffer.add_transitions(observation=[obs, obs], action=[7, 8])
assert np.array_equal(replay_buffer._buffer['action'], [[7], [8], [6]])
assert replay_buffer.n_transitions_stored == 3
| 39.254902 | 79 | 0.679321 | 1,872 | 0.935065 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.024975 |
a838ac6842aba9c31c47292a8f156a76646dc315 | 3,238 | py | Python | python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 17,085 | 2016-11-18T06:40:52.000Z | 2022-03-31T22:52:32.000Z | python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 29,769 | 2016-11-18T06:35:22.000Z | 2022-03-31T16:46:15.000Z | python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 4,641 | 2016-11-18T07:43:33.000Z | 2022-03-31T15:15:02.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
import unittest
import paddle
import paddle.nn as nn
class SimpleReturnLayer(nn.Layer):
def forward(self, x):
return x
class AddAttrLayer(nn.Layer):
def __init__(self):
super(AddAttrLayer, self).__init__()
self.attr = None
def forward(self, x):
out = x + self.attr
return out
class IsInstanceLayer(nn.Layer):
def __init__(self, layer):
super(IsInstanceLayer, self).__init__()
self.layer = layer
@paddle.jit.to_static
def forward(self, x):
if isinstance(self.layer, (AddAttrLayer, )):
self.layer.attr = x
res = self.layer(x)
return res
class SequentialLayer(nn.Layer):
def __init__(self, layers):
super(SequentialLayer, self).__init__()
self.layers = nn.LayerList(layers)
@paddle.jit.to_static
def forward(self, x):
res = x
for layer in self.layers:
if isinstance(layer, AddAttrLayer):
layer.attr = x
res = layer(res)
return res
def train(model, to_static):
prog_trans = paddle.jit.ProgramTranslator.get_instance()
prog_trans.enable(to_static)
x = paddle.ones(shape=[2, 3], dtype='int32')
out = model(x)
return out.numpy()
class TestIsinstance(unittest.TestCase):
def test_isinstance_simple_return_layer(self):
model = IsInstanceLayer(SimpleReturnLayer())
self._test_model(model)
def test_isinstance_add_attr_layer(self):
model = IsInstanceLayer(AddAttrLayer())
self._test_model(model)
def test_sequential_layer(self):
layers = []
for i in range(5):
layers.append(SimpleReturnLayer())
layers.append(AddAttrLayer())
model = SequentialLayer(layers)
self._test_model(model)
def _test_model(self, model):
st_out = train(model, to_static=True)
dy_out = train(model, to_static=False)
self.assertTrue(
np.allclose(dy_out, st_out),
msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out))
if __name__ == "__main__":
unittest.main()
| 28.654867 | 74 | 0.67202 | 1,794 | 0.554046 | 0 | 0 | 403 | 0.12446 | 0 | 0 | 1,109 | 0.342495 |
a8394db96022de367ff157a8a8c8008d7bc2a5be | 6,244 | py | Python | dlf/core/registry.py | scheckmedia/dl-framework | 8fea39e166fda0ff8fa51696831bf5cb42f3ed10 | [
"Apache-2.0"
] | null | null | null | dlf/core/registry.py | scheckmedia/dl-framework | 8fea39e166fda0ff8fa51696831bf5cb42f3ed10 | [
"Apache-2.0"
] | null | null | null | dlf/core/registry.py | scheckmedia/dl-framework | 8fea39e166fda0ff8fa51696831bf5cb42f3ed10 | [
"Apache-2.0"
] | null | null | null | """ The task of the registry is to register complex objects
by an keyword/alias that you easily can build and instanciate
these objects with a single keyword. This allows it in a easy
manner to parse a yaml configuration file and use these values
to instanciate the available objects.
"""
import tensorflow as tf
from importlib import import_module
from dlf.core.preprocessing import PreprocessingMethod
from dlf.core.callback import Callback
from dlf.core.evaluator import Evaluator
FRAMEWORK_CALLBACKS = {}
FRAMEWORK_DATA_GENERATORS = {}
FRAMEWORK_LOSSES = {}
FRAMEWORK_METRICS = {}
FRAMEWORK_MODELS = {}
FRAMEWORK_PREPROCESSING_METHODS = {}
FRAMEWORK_EVALUATORS = {}
FRAMEWORK_ACTIVE_EXPERIMENT = None
def import_framework_modules(module_folder, package):
""" Auto import of all files in module folder
# Note
This is necessary for the register_* decorator to work properly.
# Args
module_folder: str.path to folder where files to import are located
package: str. module path e.g. dlf.metrics
"""
# auto import all files and register metrics
# Path(__file__).parent
for module in module_folder.iterdir():
if module.name == '__init__.py' or module.suffix != '.py':
continue
module = f'{package}.{module.stem}'
import_module(module)
def register_preprocessing_method(*names):
"""Decorator to register a preprocessing object to the framework
# Args
*names: Tuple(str). List of aliases for this preprocessing object
# Raises
ValueError: If the parent of this method is not of type [PreprocessingMethod](/dlf/core/preprocessing)
"""
def decorator(cls):
if not issubclass(cls, PreprocessingMethod):
raise ValueError("invalid base class for class {}".format(cls))
for name in names:
FRAMEWORK_PREPROCESSING_METHODS[name] = cls
return cls
return decorator
def register_metric(*names):
"""Decorator to register a custom metric to the framework
# Args
*names: Tuple(str). List of aliases for this metric
# Raises
ValueError: If the parent of this method is not of type `tf.keras.metrics.Metrics`
ValueError: If a given alias is not valid
"""
def decorator(cls):
if not issubclass(cls, tf.keras.metrics.Metric):
raise ValueError("invalid base class for class {}".format(cls))
FRAMEWORK_METRICS[cls.__name__] = cls # alias
for name in names:
if not isinstance(name, str):
raise ValueError(
"Invalid type of name '{}' for register_metric decorator".format(name))
FRAMEWORK_METRICS[name] = cls
return cls
return decorator
def register_loss(*names):
"""Decorator to register a custom loss to the framework
# Args
*names: Tuple(str) List of aliases for this loss
# Raises
Exception: If object is not subclass of `tf.keras.losses.Loss`
ValueError: If a given alias is not valid
"""
def decorator(cls):
if not issubclass(cls, tf.keras.losses.Loss):
raise Exception("invalid base class for class {}".format(cls))
FRAMEWORK_LOSSES[cls.__name__] = cls # alias
for name in names:
if not isinstance(name, str):
raise ValueError(
"Invalid type of name '{}' for register_loss decorator".format(name))
FRAMEWORK_LOSSES[name] = cls
return cls
return decorator
def register_data_generator(*names):
"""Decorator to register a data reader to the framework
# Args
*names: Tuple(str). List of aliases for this data reader
# Raises
ValueError: If a given alias is not valid
"""
def decorator(cls):
for name in names:
if not isinstance(name, str):
raise ValueError(
"Invalid type of name '{}' for register_data_generator decorator".format(name))
FRAMEWORK_DATA_GENERATORS[name] = cls
return cls
return decorator
def register_model(*names):
"""Decorator to register a custom model to the framework
# Args
*names: Tuple(str). List of aliases for this model
# Raises
ValueError: If a given alias is not valid
"""
def decorator(cls):
for name in names:
if not isinstance(name, str):
raise ValueError(
"Invalid type of name '{}' for register_model decorator".format(name))
FRAMEWORK_MODELS[name] = cls
return cls
return decorator
def register_callback(*names):
"""Decorator to register a callback to the framework
# Args
*names: Tuple(str). List of aliases for this callback
# Raises
ValueError: If a given alias is not valid
"""
def decorator(cls):
for name in names:
if not issubclass(cls, Callback):
raise ValueError(
"Invalid type of name '{}' for register_callback decorator".format(name))
FRAMEWORK_CALLBACKS[name] = cls
return cls
return decorator
def register_evaluator(*names):
"""Decorator to register an evaluator to the framework
# Args
*names: Tuple(str). List of aliases for this evaluator
# Raises
ValueError: If a given alias is not valid
"""
def decorator(cls):
for name in names:
if not issubclass(cls, Evaluator):
raise ValueError(
"Invalid type of name '{}' for register_evaluator decorator".format(name))
FRAMEWORK_EVALUATORS[name] = cls
return cls
return decorator
def set_active_experiment(exp):
"""Sets active experiment to global state and
allows all modules to access it
# Arguments
exp: dlf.core.Experiment. Active experiment
"""
global FRAMEWORK_ACTIVE_EXPERIMENT
FRAMEWORK_ACTIVE_EXPERIMENT = exp
def get_active_experiment():
"""Gets the current, active, experiment
# Returns
dlf.core.Experiment. Active experiment
"""
global FRAMEWORK_ACTIVE_EXPERIMENT
return FRAMEWORK_ACTIVE_EXPERIMENT
| 28.774194 | 110 | 0.648302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,055 | 0.48927 |
a839c728217667721506b5ef85d6e58de7793368 | 1,017 | py | Python | project/dev/tuning.py | maple1eaf/data_mining_inf553 | fba0c19f46aac5882e103dbe53155e7128a9290f | [
"MIT"
] | 1 | 2021-05-04T05:17:57.000Z | 2021-05-04T05:17:57.000Z | project/dev/tuning.py | maple1eaf/data_mining_inf553 | fba0c19f46aac5882e103dbe53155e7128a9290f | [
"MIT"
] | null | null | null | project/dev/tuning.py | maple1eaf/data_mining_inf553 | fba0c19f46aac5882e103dbe53155e7128a9290f | [
"MIT"
] | 1 | 2021-09-24T08:17:23.000Z | 2021-09-24T08:17:23.000Z | import os
import json
import rmse
TUNING_FILE = "/Users/markduan/duan/USC_course/USC_APDS/INF553/project/predict/tuning.json"
CORATED_LIMIT = [3, 5, 7, 10]
LONELY_THRESHOLD = [2, 3, 5, 7]
N_NEIGHBORS_ITEMBASED = [5, 7, 10, 12]
WEIGHT = [0.2, 0.4, 0.6, 0.8]
def writeRes(c, l, n, w, res):
with open(TUNING_FILE, 'a', encoding='utf-8') as fp:
x = {
'c': c,
'l': l,
'n': n,
'w': w,
'rmse': res
}
x_j = json.dumps(x)
fp.write(x_j)
fp.write('\n')
if os.path.exists(TUNING_FILE):
os.remove(TUNING_FILE)
for c in CORATED_LIMIT:
for l in LONELY_THRESHOLD:
train_comm = "spark-submit train.py %d %d %d" % (c, l, l)
os.system(train_comm)
for n in N_NEIGHBORS_ITEMBASED:
for w in WEIGHT:
test_comm = "spark-submit predict.py %d %f" % (n, w)
os.system(test_comm)
res = rmse.getRmse()
writeRes(c, l, n, w, res)
| 25.425 | 91 | 0.52999 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.169125 |
a83aab63534247c9aaa5317711686c9c7891a302 | 2,736 | py | Python | tests/test_weightings.py | matchup-ir/whooshy | 3e8730f4cbb559fd59971c5a2e6d01924d3968c0 | [
"BSD-2-Clause-FreeBSD"
] | 319 | 2016-09-22T15:54:48.000Z | 2022-03-18T02:36:58.000Z | tests/test_weightings.py | matchup-ir/whooshy | 3e8730f4cbb559fd59971c5a2e6d01924d3968c0 | [
"BSD-2-Clause-FreeBSD"
] | 85 | 2018-12-11T06:53:06.000Z | 2021-07-30T20:39:12.000Z | tests/test_weightings.py | matchup-ir/whooshy | 3e8730f4cbb559fd59971c5a2e6d01924d3968c0 | [
"BSD-2-Clause-FreeBSD"
] | 62 | 2015-07-03T12:34:32.000Z | 2022-02-08T08:21:12.000Z | from __future__ import with_statement
import inspect
from random import choice, randint
import sys
from whoosh import fields, query, scoring
from whoosh.compat import u, xrange, permutations
from whoosh.filedb.filestore import RamStorage
def _weighting_classes(ignore):
# Get all the subclasses of Weighting in whoosh.scoring
return [c for _, c in inspect.getmembers(scoring, inspect.isclass)
if scoring.Weighting in c.__bases__ and c not in ignore]
def test_all():
domain = [u("alfa"), u("bravo"), u("charlie"), u("delta"), u("echo"),
u("foxtrot")]
schema = fields.Schema(text=fields.TEXT)
storage = RamStorage()
ix = storage.create_index(schema)
w = ix.writer()
for _ in xrange(100):
w.add_document(text=u(" ").join(choice(domain)
for _ in xrange(randint(10, 20))))
w.commit()
# List ABCs that should not be tested
abcs = ()
# provide initializer arguments for any weighting classes that require them
init_args = {"MultiWeighting": ([scoring.BM25F()],
{"text": scoring.Frequency()}),
"ReverseWeighting": ([scoring.BM25F()], {})}
for wclass in _weighting_classes(abcs):
try:
if wclass.__name__ in init_args:
args, kwargs = init_args[wclass.__name__]
weighting = wclass(*args, **kwargs)
else:
weighting = wclass()
except TypeError:
e = sys.exc_info()[1]
raise TypeError("Error instantiating %r: %s" % (wclass, e))
with ix.searcher(weighting=weighting) as s:
try:
for word in domain:
s.search(query.Term("text", word))
except Exception:
e = sys.exc_info()[1]
e.msg = "Error searching with %r: %s" % (wclass, e)
raise
def test_compatibility():
from whoosh.scoring import Weighting
# This is the old way of doing a custom weighting model, check that
# it's still supported...
class LegacyWeighting(Weighting):
use_final = True
def score(self, searcher, fieldname, text, docnum, weight):
return weight + 0.5
def final(self, searcher, docnum, score):
return score * 1.5
schema = fields.Schema(text=fields.TEXT)
ix = RamStorage().create_index(schema)
w = ix.writer()
domain = "alfa bravo charlie delta".split()
for ls in permutations(domain, 3):
w.add_document(text=u(" ").join(ls))
w.commit()
s = ix.searcher(weighting=LegacyWeighting())
r = s.search(query.Term("text", u("bravo")))
assert r.score(0) == 2.25
| 33.365854 | 79 | 0.593567 | 241 | 0.088085 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.164839 |
a83b6d55a2a7a1cbce269d3312e7f896d2b471a1 | 1,133 | py | Python | Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/10_CubeClass/cubeProperties.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/10_CubeClass/cubeProperties.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/10_CubeClass/cubeProperties.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | # cubeProperties.py
# A program to calculate the volume and surface area of a cube.
"""Same as Chapter 10 Programming Exercise 9, but for a cube. The constructor
should accept the length of a side as a parameter."""
from cubeClass import Cube
def main():
edge = 0
while edge <= 0:
try:
edge = float(input("Please enter the edge length of the cube: "))
if radius <= 0:
print("You have to enter a number greater than zero.")
except(SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a number greater than zero.")
continue
cube = Cube(edge)
volume = cube.volume()
surfaceArea = cube.surfaceArea()
if volume == 1:
print("\nThe volume of the cube is {0:.2f} unit.".format(volume))
else:
print("\nThe volume of the cube is: {0:.2f} units.".format(volume))
if surfaceArea == 1:
print("\nThe surface area of the cube is: {0:.2f} unit."
.format(surfaceArea))
else:
print("\nThe surface area of the cube is: {0:.2f} units."
.format(surfaceArea))
main()
| 29.815789 | 77 | 0.613416 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 540 | 0.476611 |
a83cd6c3e2c44f9770befd49c93d195219322f27 | 5,768 | py | Python | pharmrep/product/models.py | boyombo/pharmrep | 2293ceb235dec949c58fa40d1ee43fce172e0ceb | [
"MIT"
] | null | null | null | pharmrep/product/models.py | boyombo/pharmrep | 2293ceb235dec949c58fa40d1ee43fce172e0ceb | [
"MIT"
] | null | null | null | pharmrep/product/models.py | boyombo/pharmrep | 2293ceb235dec949c58fa40d1ee43fce172e0ceb | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from datetime import datetime, date
from django.db import models
from django.contrib.auth.models import User
from django.db.models.aggregates import Sum
AGE_LIMIT = 7 # 7 days age limit
class PriceTemplate(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=200)
rate = models.IntegerField('Default price of product')
template = models.ManyToManyField(
PriceTemplate,
through='ProductPriceTemplate',
through_fields=('product', 'template'))
def __unicode__(self):
return self.name
@property
def quantity(self):
return self.product_sales.aggregate(
Sum('quantity'))['quantity__sum'] or 0
@property
def amount(self):
return self.product_sales.aggregate(
Sum('amount'))['amount__sum'] or 0
class ProductPriceTemplate(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
template = models.ForeignKey(PriceTemplate, on_delete=models.CASCADE)
price = models.IntegerField()
def __unicode__(self):
return "{0} - {1}".format(self.product, self.template)
class BatchSize(models.Model):
name = models.CharField(max_length=100)
quantity = models.PositiveIntegerField()
def __unicode__(self):
return self.name
class Rep(models.Model):
name = models.CharField(max_length=200)
user = models.OneToOneField(User, on_delete=models.CASCADE)
supervisor = models.ForeignKey('Rep', null=True, blank=True)
last_activity = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
return self.name
@property
def is_old(self):
if not self.last_activity:
return True
if (date.today() - self.last_activity.date()) <= AGE_LIMIT:
return False
return True
class Customer(models.Model):
HOSPITAL = 0
INSTITUTION = 1
PHARMACY = 2
WHOLESELLER = 3
HEALTH_PERSONNEL = 4
CUSTOMER_TYPE = (
(0, 'Hospital'),
(1, 'Institution'),
(2, 'Pharmacy'),
(3, 'Wholeseller'),
(4, 'Health Personnel'))
name = models.CharField(max_length=200)
address = models.TextField(blank=True)
contact_person = models.CharField(max_length=200, blank=True)
phone1 = models.CharField(max_length=20, blank=True)
email = models.EmailField(blank=True, null=True)
customer_type = models.PositiveIntegerField(choices=CUSTOMER_TYPE)
price_template = models.ForeignKey(PriceTemplate, null=True, blank=True)
def __unicode__(self):
return self.name
@property
def balance(self):
sales = Sale.objects.filter(invoice__customer=self).aggregate(
Sum('amount'))['amount__sum'] or 0
#sales = self.customer_sales.aggregate(
# Sum('amount'))['amount__sum'] or 0
paymt = self.customer_payments.aggregate(
Sum('amount'))['amount__sum'] or 0
return sales - paymt
class Invoice(models.Model):
ACTUAL_SALES = 0
SOR = 1
SAMPLES = 2
INVOICE_TYPES = ((0, 'Actual Sales'), (1, 'SOR'), (2, 'Samples'))
rep = models.ForeignKey(Rep, related_name='rep_invoices')
customer = models.ForeignKey(Customer, related_name='customer_invoices')
invoice_no = models.CharField(max_length=200, blank=True)
invoice_date = models.DateField(blank=True, null=True)
sales_type = models.PositiveIntegerField(choices=INVOICE_TYPES)
recorded_date = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return unicode(self.invoice_no)
@property
def amount(self):
return sum([sale.amount for sale in self.invoice_sales.all()])
class Sale(models.Model):
invoice = models.ForeignKey(
Invoice, related_name='invoice_sales', null=True)
product = models.ForeignKey(Product, related_name='product_sales')
batch_size = models.ForeignKey(BatchSize, null=True)
quantity = models.PositiveIntegerField()
amount = models.IntegerField()
recorded_date = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return unicode(self.invoice)
@property
def rate(self):
templ = self.invoice.customer.price_template
if not templ:
price = self.product.rate
else:
try:
prod_price_templ = ProductPriceTemplate.objects.get(
product=self.product, template=templ)
except ProductPriceTemplate.DoesNotExist:
price = self.product.rate
else:
price = prod_price_templ.price
return price
class Payment(models.Model):
EPAYMENT = 0
CHEQUE = 1
TELLER = 2
MODE_OF_PAYMENT = ((0, 'E-Payment'), (1, 'Cheque'), (2, 'Teller'))
rep = models.ForeignKey(Rep, related_name='rep_payments')
customer = models.ForeignKey(Customer, related_name='customer_payments')
amount = models.PositiveIntegerField()
receipt_no = models.CharField(max_length=50, blank=True)
payment_date = models.DateField()
receipt_date = models.DateField()
recorded_date = models.DateTimeField(default=datetime.now)
balance = models.IntegerField()
bank_of_payment = models.CharField(max_length=200, blank=True)
mode_of_payment = models.PositiveIntegerField(choices=MODE_OF_PAYMENT)
teller_number = models.CharField(max_length=50, blank=True)
teller_date = models.DateField(blank=True, null=True)
cheque_date = models.DateField(blank=True, null=True)
remarks = models.TextField(blank=True)
def __unicode__(self):
return unicode(self.customer)
| 32.044444 | 76 | 0.677358 | 5,512 | 0.955617 | 0 | 0 | 1,411 | 0.244626 | 0 | 0 | 482 | 0.083564 |
a83d95d0bdb34524a4d9657cff33c959ab96b482 | 19,189 | py | Python | src/apscheduler/datastores/async_/sqlalchemy.py | spaceack/apscheduler | ce5262c05a663677fd74a43c7a315bd5e3def902 | [
"MIT"
] | null | null | null | src/apscheduler/datastores/async_/sqlalchemy.py | spaceack/apscheduler | ce5262c05a663677fd74a43c7a315bd5e3def902 | [
"MIT"
] | null | null | null | src/apscheduler/datastores/async_/sqlalchemy.py | spaceack/apscheduler | ce5262c05a663677fd74a43c7a315bd5e3def902 | [
"MIT"
] | null | null | null | from __future__ import annotations
import json
import logging
from contextlib import AsyncExitStack, closing
from datetime import datetime, timedelta, timezone
from json import JSONDecodeError
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Type, Union
from uuid import UUID
import sniffio
from anyio import TASK_STATUS_IGNORED, create_task_group, sleep
from attr import asdict
from sqlalchemy import (
Column, DateTime, Integer, LargeBinary, MetaData, Table, Unicode, and_, bindparam, func, or_,
select)
from sqlalchemy.engine import URL
from sqlalchemy.exc import CompileError, IntegrityError
from sqlalchemy.ext.asyncio import AsyncConnection, create_async_engine
from sqlalchemy.ext.asyncio.engine import AsyncConnectable
from sqlalchemy.sql.ddl import DropTable
from ... import events as events_module
from ...abc import AsyncDataStore, Job, Schedule, Serializer
from ...events import (
AsyncEventHub, DataStoreEvent, Event, JobAdded, JobDeserializationFailed, ScheduleAdded,
ScheduleDeserializationFailed, ScheduleRemoved, ScheduleUpdated, SubscriptionToken)
from ...exceptions import ConflictingIdError, SerializationError
from ...policies import ConflictPolicy
from ...serializers.pickle import PickleSerializer
from ...util import reentrant
logger = logging.getLogger(__name__)
def default_json_handler(obj: Any) -> Any:
if isinstance(obj, datetime):
return obj.timestamp()
elif isinstance(obj, UUID):
return obj.hex
elif isinstance(obj, frozenset):
return list(obj)
raise TypeError(f'Cannot JSON encode type {type(obj)}')
def json_object_hook(obj: Dict[str, Any]) -> Any:
for key, value in obj.items():
if key == 'timestamp':
obj[key] = datetime.fromtimestamp(value, timezone.utc)
elif key == 'job_id':
obj[key] = UUID(value)
elif key == 'tags':
obj[key] = frozenset(value)
return obj
@reentrant
class SQLAlchemyDataStore(AsyncDataStore):
_metadata = MetaData()
t_metadata = Table(
'metadata',
_metadata,
Column('schema_version', Integer, nullable=False)
)
t_schedules = Table(
'schedules',
_metadata,
Column('id', Unicode, primary_key=True),
Column('task_id', Unicode, nullable=False),
Column('serialized_data', LargeBinary, nullable=False),
Column('next_fire_time', DateTime(timezone=True), index=True),
Column('acquired_by', Unicode),
Column('acquired_until', DateTime(timezone=True))
)
t_jobs = Table(
'jobs',
_metadata,
Column('id', Unicode(32), primary_key=True),
Column('task_id', Unicode, nullable=False, index=True),
Column('serialized_data', LargeBinary, nullable=False),
Column('created_at', DateTime(timezone=True), nullable=False),
Column('acquired_by', Unicode),
Column('acquired_until', DateTime(timezone=True))
)
def __init__(self, bind: AsyncConnectable, *, schema: Optional[str] = None,
serializer: Optional[Serializer] = None,
lock_expiration_delay: float = 30, max_poll_time: Optional[float] = 1,
max_idle_time: float = 60, start_from_scratch: bool = False,
notify_channel: Optional[str] = 'apscheduler'):
self.bind = bind
self.schema = schema
self.serializer = serializer or PickleSerializer()
self.lock_expiration_delay = lock_expiration_delay
self.max_poll_time = max_poll_time
self.max_idle_time = max_idle_time
self.start_from_scratch = start_from_scratch
self._logger = logging.getLogger(__name__)
self._exit_stack = AsyncExitStack()
self._events = AsyncEventHub()
# Find out if the dialect supports RETURNING
statement = self.t_jobs.update().returning(self.t_schedules.c.id)
try:
statement.compile(bind=self.bind)
except CompileError:
self._supports_update_returning = False
else:
self._supports_update_returning = True
self.notify_channel = notify_channel
if notify_channel:
if self.bind.dialect.name != 'postgresql' or self.bind.dialect.driver != 'asyncpg':
self.notify_channel = None
@classmethod
def from_url(cls, url: Union[str, URL], **options) -> 'SQLAlchemyDataStore':
engine = create_async_engine(url, future=True)
return cls(engine, **options)
async def __aenter__(self):
asynclib = sniffio.current_async_library() or '(unknown)'
if asynclib != 'asyncio':
raise RuntimeError(f'This data store requires asyncio; currently running: {asynclib}')
# Verify that the schema is in place
async with self.bind.begin() as conn:
if self.start_from_scratch:
for table in self._metadata.sorted_tables:
await conn.execute(DropTable(table, if_exists=True))
await conn.run_sync(self._metadata.create_all)
query = select(self.t_metadata.c.schema_version)
result = await conn.execute(query)
version = result.scalar()
if version is None:
await conn.execute(self.t_metadata.insert(values={'schema_version': 1}))
elif version > 1:
raise RuntimeError(f'Unexpected schema version ({version}); '
f'only version 1 is supported by this version of APScheduler')
await self._exit_stack.enter_async_context(self._events)
if self.notify_channel:
task_group = create_task_group()
await self._exit_stack.enter_async_context(task_group)
await task_group.start(self._listen_notifications)
self._exit_stack.callback(task_group.cancel_scope.cancel)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb)
async def _publish(self, conn: AsyncConnection, event: DataStoreEvent) -> None:
if self.notify_channel:
event_type = event.__class__.__name__
event_data = json.dumps(asdict(event), ensure_ascii=False,
default=default_json_handler)
notification = event_type + ' ' + event_data
if len(notification) < 8000:
await conn.execute(func.pg_notify(self.notify_channel, notification))
return
self._logger.warning(
'Could not send %s notification because it is too long (%d >= 8000)',
event_type, len(notification))
self._events.publish(event)
async def _listen_notifications(self, *, task_status=TASK_STATUS_IGNORED) -> None:
def callback(connection, pid, channel: str, payload: str) -> None:
self._logger.debug('Received notification on channel %s: %s', channel, payload)
event_type, _, json_data = payload.partition(' ')
try:
event_data = json.loads(json_data, object_hook=json_object_hook)
except JSONDecodeError:
self._logger.exception('Failed decoding JSON payload of notification: %s', payload)
return
event_class = getattr(events_module, event_type)
event = event_class(**event_data)
self._events.publish(event)
task_started_sent = False
while True:
with closing(await self.bind.raw_connection()) as conn:
asyncpg_conn = conn.connection._connection
await asyncpg_conn.add_listener(self.notify_channel, callback)
if not task_started_sent:
task_status.started()
task_started_sent = True
try:
while True:
await sleep(self.max_idle_time)
await asyncpg_conn.execute('SELECT 1')
finally:
await asyncpg_conn.remove_listener(self.notify_channel, callback)
def _deserialize_jobs(self, serialized_jobs: Iterable[Tuple[UUID, bytes]]) -> List[Job]:
jobs: List[Job] = []
for job_id, serialized_data in serialized_jobs:
try:
jobs.append(self.serializer.deserialize(serialized_data))
except SerializationError as exc:
self._events.publish(JobDeserializationFailed(job_id=job_id, exception=exc))
return jobs
def _deserialize_schedules(
self, serialized_schedules: Iterable[Tuple[str, bytes]]) -> List[Schedule]:
jobs: List[Schedule] = []
for schedule_id, serialized_data in serialized_schedules:
try:
jobs.append(self.serializer.deserialize(serialized_data))
except SerializationError as exc:
self._events.publish(
ScheduleDeserializationFailed(schedule_id=schedule_id, exception=exc))
return jobs
def subscribe(self, callback: Callable[[Event], Any],
event_types: Optional[Iterable[Type[Event]]] = None) -> SubscriptionToken:
return self._events.subscribe(callback, event_types)
def unsubscribe(self, token: SubscriptionToken) -> None:
self._events.unsubscribe(token)
async def clear(self) -> None:
async with self.bind.begin() as conn:
await conn.execute(self.t_schedules.delete())
await conn.execute(self.t_jobs.delete())
async def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:
serialized_data = self.serializer.serialize(schedule)
statement = self.t_schedules.insert().\
values(id=schedule.id, task_id=schedule.task_id, serialized_data=serialized_data,
next_fire_time=schedule.next_fire_time)
try:
async with self.bind.begin() as conn:
await conn.execute(statement)
event = ScheduleAdded(schedule_id=schedule.id,
next_fire_time=schedule.next_fire_time)
await self._publish(conn, event)
except IntegrityError:
if conflict_policy is ConflictPolicy.exception:
raise ConflictingIdError(schedule.id) from None
elif conflict_policy is ConflictPolicy.replace:
statement = self.t_schedules.update().\
where(self.t_schedules.c.id == schedule.id).\
values(serialized_data=serialized_data,
next_fire_time=schedule.next_fire_time)
async with self.bind.begin() as conn:
await conn.execute(statement)
event = ScheduleUpdated(schedule_id=schedule.id,
next_fire_time=schedule.next_fire_time)
await self._publish(conn, event)
async def remove_schedules(self, ids: Iterable[str]) -> None:
async with self.bind.begin() as conn:
now = datetime.now(timezone.utc)
conditions = and_(self.t_schedules.c.id.in_(ids),
or_(self.t_schedules.c.acquired_until.is_(None),
self.t_schedules.c.acquired_until < now))
statement = self.t_schedules.delete(conditions)
if self._supports_update_returning:
statement = statement.returning(self.t_schedules.c.id)
removed_ids = [row[0] for row in await conn.execute(statement)]
else:
await conn.execute(statement)
for schedule_id in removed_ids:
await self._publish(conn, ScheduleRemoved(schedule_id=schedule_id))
async def get_schedules(self, ids: Optional[Set[str]] = None) -> List[Schedule]:
query = select([self.t_schedules.c.id, self.t_schedules.c.serialized_data]).\
order_by(self.t_schedules.c.id)
if ids:
query = query.where(self.t_schedules.c.id.in_(ids))
async with self.bind.begin() as conn:
result = await conn.execute(query)
return self._deserialize_schedules(result)
async def acquire_schedules(self, scheduler_id: str, limit: int) -> List[Schedule]:
async with self.bind.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = datetime.fromtimestamp(
now.timestamp() + self.lock_expiration_delay, timezone.utc)
schedules_cte = select(self.t_schedules.c.id).\
where(and_(self.t_schedules.c.next_fire_time.isnot(None),
self.t_schedules.c.next_fire_time <= now,
or_(self.t_schedules.c.acquired_until.is_(None),
self.t_schedules.c.acquired_until < now))).\
limit(limit).cte()
subselect = select([schedules_cte.c.id])
statement = self.t_schedules.update().where(self.t_schedules.c.id.in_(subselect)).\
values(acquired_by=scheduler_id, acquired_until=acquired_until)
if self._supports_update_returning:
statement = statement.returning(self.t_schedules.c.id,
self.t_schedules.c.serialized_data)
result = await conn.execute(statement)
else:
await conn.execute(statement)
statement = select([self.t_schedules.c.id, self.t_schedules.c.serialized_data]).\
where(and_(self.t_schedules.c.acquired_by == scheduler_id))
result = await conn.execute(statement)
return self._deserialize_schedules(result)
async def release_schedules(self, scheduler_id: str, schedules: List[Schedule]) -> None:
update_events: List[ScheduleUpdated] = []
finished_schedule_ids: List[str] = []
async with self.bind.begin() as conn:
update_args: List[Dict[str, Any]] = []
for schedule in schedules:
if schedule.next_fire_time is not None:
try:
serialized_data = self.serializer.serialize(schedule)
except SerializationError:
self._logger.exception('Error serializing schedule %r – '
'removing from data store', schedule.id)
finished_schedule_ids.append(schedule.id)
continue
update_args.append({
'p_id': schedule.id,
'p_serialized_data': serialized_data,
'p_next_fire_time': schedule.next_fire_time
})
else:
finished_schedule_ids.append(schedule.id)
# Update schedules that have a next fire time
if update_args:
p_id = bindparam('p_id')
p_serialized = bindparam('p_serialized_data')
p_next_fire_time = bindparam('p_next_fire_time')
statement = self.t_schedules.update().\
where(and_(self.t_schedules.c.id == p_id,
self.t_schedules.c.acquired_by == scheduler_id)).\
values(serialized_data=p_serialized, next_fire_time=p_next_fire_time)
next_fire_times = {arg['p_id']: arg['p_next_fire_time'] for arg in update_args}
if self._supports_update_returning:
statement = statement.returning(self.t_schedules.c.id)
updated_ids = [row[0] for row in await conn.execute(statement, update_args)]
for schedule_id in updated_ids:
event = ScheduleUpdated(schedule_id=schedule_id,
next_fire_time=next_fire_times[schedule_id])
update_events.append(event)
# Remove schedules that have no next fire time or failed to serialize
if finished_schedule_ids:
statement = self.t_schedules.delete().\
where(and_(self.t_schedules.c.id.in_(finished_schedule_ids),
self.t_schedules.c.acquired_by == scheduler_id))
await conn.execute(statement)
for event in update_events:
await self._publish(conn, event)
for schedule_id in finished_schedule_ids:
await self._publish(conn, ScheduleRemoved(schedule_id=schedule_id))
async def add_job(self, job: Job) -> None:
now = datetime.now(timezone.utc)
serialized_data = self.serializer.serialize(job)
statement = self.t_jobs.insert().values(id=job.id.hex, task_id=job.task_id,
created_at=now, serialized_data=serialized_data)
async with self.bind.begin() as conn:
await conn.execute(statement)
event = JobAdded(job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id,
tags=job.tags)
await self._publish(conn, event)
async def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> List[Job]:
query = select([self.t_jobs.c.id, self.t_jobs.c.serialized_data]).\
order_by(self.t_jobs.c.id)
if ids:
job_ids = [job_id.hex for job_id in ids]
query = query.where(self.t_jobs.c.id.in_(job_ids))
async with self.bind.begin() as conn:
result = await conn.execute(query)
return self._deserialize_jobs(result)
async def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> List[Job]:
async with self.bind.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + timedelta(seconds=self.lock_expiration_delay)
query = select([self.t_jobs.c.id, self.t_jobs.c.serialized_data]).\
where(or_(self.t_jobs.c.acquired_until.is_(None),
self.t_jobs.c.acquired_until < now)).\
order_by(self.t_jobs.c.created_at).\
limit(limit)
serialized_jobs: Dict[str, bytes] = {row[0]: row[1]
for row in await conn.execute(query)}
if serialized_jobs:
query = self.t_jobs.update().\
values(acquired_by=worker_id, acquired_until=acquired_until).\
where(self.t_jobs.c.id.in_(serialized_jobs))
await conn.execute(query)
return self._deserialize_jobs(serialized_jobs.items())
async def release_jobs(self, worker_id: str, jobs: List[Job]) -> None:
job_ids = [job.id.hex for job in jobs]
statement = self.t_jobs.delete().\
where(and_(self.t_jobs.c.acquired_by == worker_id, self.t_jobs.c.id.in_(job_ids)))
async with self.bind.begin() as conn:
await conn.execute(statement)
| 45.471564 | 99 | 0.614675 | 17,218 | 0.897191 | 0 | 0 | 17,229 | 0.897765 | 13,316 | 0.693867 | 1,053 | 0.054869 |
a83deef93d0c43dab99c0c6171aa58bee51fc329 | 185 | py | Python | financeiro/views.py | Antonio-Neves/Gestao-Escolar | a97052beb571a32619d4e6b6f5e7c3aae3bc8e9b | [
"MIT"
] | 7 | 2021-05-21T00:23:40.000Z | 2021-12-09T12:35:00.000Z | financeiro/views.py | Antonio-Neves/Gestao-Escolar | a97052beb571a32619d4e6b6f5e7c3aae3bc8e9b | [
"MIT"
] | null | null | null | financeiro/views.py | Antonio-Neves/Gestao-Escolar | a97052beb571a32619d4e6b6f5e7c3aae3bc8e9b | [
"MIT"
] | 7 | 2021-08-03T22:28:36.000Z | 2022-03-13T20:08:40.000Z | from django.shortcuts import render
from django.views.generic.base import TemplateView
class IndexFinanceiroView(TemplateView):
template_name = 'financeiro/index-financeiro.html'
| 18.5 | 51 | 0.827027 | 92 | 0.497297 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.183784 |
a83e29d363c3ca50085e97a39444917b28289d0b | 26 | py | Python | __init__.py | fleximus/pelican-fancyindex | 305a953ed42c3b9f6c43dbd2d20751ac4f11deaf | [
"BSD-2-Clause"
] | null | null | null | __init__.py | fleximus/pelican-fancyindex | 305a953ed42c3b9f6c43dbd2d20751ac4f11deaf | [
"BSD-2-Clause"
] | null | null | null | __init__.py | fleximus/pelican-fancyindex | 305a953ed42c3b9f6c43dbd2d20751ac4f11deaf | [
"BSD-2-Clause"
] | null | null | null | from .fancyindex import *
| 13 | 25 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a83e32ca63ea15b59111ab7fa07808949fb07688 | 11,444 | py | Python | SLpackage/private/pacbio/pythonpkgs/pbreports/lib/python2.7/site-packages/pbreports/report/structural_variants.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | 5 | 2022-02-20T07:10:02.000Z | 2022-03-18T17:47:53.000Z | SLpackage/private/pacbio/pythonpkgs/pbreports/lib/python2.7/site-packages/pbreports/report/structural_variants.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | null | null | null | SLpackage/private/pacbio/pythonpkgs/pbreports/lib/python2.7/site-packages/pbreports/report/structural_variants.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | null | null | null |
"""
Structural Variants Report
"""
import os
import os.path as op
import logging
import sys
import json
import itertools
import collections
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as ticker
from matplotlib import rcParams
from matplotlib.transforms import offset_copy
from pbcommand.models.report import Report, Table, Column, PlotGroup, Plot
from pbcommand.models import FileTypes, get_pbparser
from pbcommand.cli import pbparser_runner
from pbcommand.utils import setup_log
from pbreports.io.specs import *
from pbreports.plot.helper import (get_fig_axes_lpr,
save_figure_with_thumbnail, DEFAULT_DPI)
__version__ = '0.1.0'
class Constants(object):
TOOL_ID = "pbreports.tasks.structural_variants_report"
DRIVER_EXE = ("python -m pbreports.report.structural_variants "
"--resolved-tool-contract ")
R_ID = "structural_variants"
SAMPLE_KEY = "CountBySample"
T_SAMPLE = "sample_table"
C_SAMPLE = "sample"
C_INS = "insertions"
C_DEL = "deletions"
C_HOM = "homozygous"
C_HET = "heterozygous"
C_TOTAL = "total"
ANNO_KEY = "CountByAnnotation"
T_ANNO = "anno_table"
C_ANNO = "annotation"
R_TANDEM = "Tandem Repeat"
R_ALU = "Alu"
R_L1 = "L1"
R_SVA = "SVA"
R_UNANNOTATED = "Unannotated"
R_TOTAL = "Total"
PG_SV = "sv_plot_group"
P_SV = "sv_plot"
C_SHORT = 'Variants <1 kb'
C_LONG = 'Variants ' + r'$\geq$' + '1 kb'
SV_LEN_CUTOFF_S = 1000
BIN_WIDTH_S = 50
X_TICKS_S = range(0, SV_LEN_CUTOFF_S + 100, 50)[:-1]
X_LIMS_S = [X_TICKS_S[0], X_TICKS_S[-1]]
X_LABELS_S = list(itertools.chain(
*[[str(x), ""] for x in xrange(0, 1000, 100)])) + ["1,000"]
X_LABEL_S = "variant length (bp)"
N_BINS_S = X_LIMS_S[1] / BIN_WIDTH_S
OVERFLOW_BIN_X = 11250
SV_LEN_CUTOFF_L = 10000
BIN_WIDTH_L = 500
X_TICKS_L = range(0, SV_LEN_CUTOFF_L + 500,
500) + [OVERFLOW_BIN_X]
X_LIMS_L = [0, 12000]
X_LABELS_L = list(itertools.chain(
*[[str(x), ""] for x in xrange(0, 11)]))[:-1] + [">10"]
X_LABEL_L = "variant length (kb)"
N_BINS_L = X_LIMS_L[1] / BIN_WIDTH_L
log = logging.getLogger(__name__)
spec = load_spec(Constants.R_ID)
def _comma_formatter(x, pos=0):
return ("{0:,d}".format(int(x)))
def _my_combine(n, t):
"""
Takes two integers, n and t, and returns "n (t)"
"""
c = _comma_formatter(str(n)) + " (" + _comma_formatter(str(t)) + ")"
return c
def to_sample_table(table_json):
col_ids = [Constants.C_SAMPLE, Constants.C_INS, Constants.C_DEL,
Constants.C_HOM, Constants.C_HET, Constants.C_TOTAL]
sample_table = table_json[Constants.SAMPLE_KEY]
t = []
if len(sample_table) == 0:
table = [[], [], [], [], [], []]
else:
for row in sample_table:
r = [row[0]]
r.append(_my_combine(row[1], row[2]))
r.append(_my_combine(row[3], row[4]))
r.append(row[5])
r.append(row[6])
r.append(_my_combine(row[7], row[8]))
t.append(r)
table = zip(*t)
columns = []
for i, col_id in enumerate(col_ids):
columns.append(Column(col_id, values=table[i]))
sample_table = Table(Constants.T_SAMPLE, columns=columns)
return sample_table
def to_anno_table(table_json):
col_ids = [Constants.C_ANNO, Constants.C_INS,
Constants.C_DEL, Constants.C_TOTAL]
row_ids = [Constants.R_TANDEM, Constants.R_ALU, Constants.R_L1,
Constants.R_SVA, Constants.R_UNANNOTATED, Constants.R_TOTAL]
anno_table = table_json[Constants.ANNO_KEY]
t = []
for _id in row_ids:
for row in anno_table:
if _id == row[0]:
r = [row[0]]
for i in xrange(1, 6, 2):
r.append(_my_combine(row[i], row[i + 1]))
t.append(r)
table = zip(*t)
columns = []
for i, col_id in enumerate(col_ids):
columns.append(Column(col_id, values=table[i]))
anno_table = Table(Constants.T_ANNO, columns=columns)
return anno_table
def process_short_data(data):
short_ins = [x for x in data.get(
"Insertion", []) if x < Constants.SV_LEN_CUTOFF_S]
short_del = [x for x in data.get(
"Deletion", []) if x < Constants.SV_LEN_CUTOFF_S]
return short_ins, short_del
def process_long_data(data):
long_ins_raw = [x for x in data.get(
"Insertion", []) if x >= Constants.SV_LEN_CUTOFF_S]
long_del_raw = [x for x in data.get(
"Deletion", []) if x >= Constants.SV_LEN_CUTOFF_S]
# mapping all lengths above 10k to a constant
long_ins = [Constants.OVERFLOW_BIN_X if x > Constants.SV_LEN_CUTOFF_L
else x for x in long_ins_raw]
long_del = [Constants.OVERFLOW_BIN_X if x > Constants.SV_LEN_CUTOFF_L
else x for x in long_del_raw]
return long_ins, long_del
def add_subplot(fig, ax, sample, data, counter, y_max, position):
insertions = data[0]
deletions = data[1]
y_label = get_plot_ylabel(spec, Constants.PG_SV, Constants.P_SV)
if position == 0:
x_ticks = Constants.X_TICKS_S
x_lims = Constants.X_LIMS_S
x_labels = Constants.X_LABELS_S
n_bins = Constants.N_BINS_S
x_label = Constants.X_LABEL_S
if position == 1:
x_ticks = Constants.X_TICKS_L
x_lims = Constants.X_LIMS_L
x_labels = Constants.X_LABELS_L
n_bins = Constants.N_BINS_L
x_label = Constants.X_LABEL_L
ax = ax[counter, position]
if insertions or deletions:
ax.hist([deletions, insertions], label=["Deletions", "Insertions"], histtype='barstacked',
color=["#FF7E79", "#A9D18E"], edgecolor="none", bins=n_bins,
width=0.85 * (x_lims[1] - x_lims[0]) / n_bins, range=[x_lims[0], x_lims[1]])
ax.set_xlabel(x_label, size=20)
ax.set_ylabel(y_label, size=20)
ax.set_ylim(bottom=0)
ax.set_xlim(left=x_lims[0], right=x_lims[1])
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_formatter(ticker.FuncFormatter(_comma_formatter))
ax.grid(color='#e0e0e0', linewidth=0.9, linestyle='-')
ax.xaxis.grid(False)
ax.set_axisbelow(True)
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_labels, size=15)
ax.tick_params(axis='y', labelsize=15)
rcParams['xtick.direction'] = 'out'
rcParams['ytick.direction'] = 'out'
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
y_top = ax.get_ylim()[1]
if y_top > y_max[position]:
y_max[position] = y_top
def add_subplots(fig, ax, sample, data, counter, y_max):
short_ins, short_del = process_short_data(data)
add_subplot(fig, ax, sample, [short_ins, short_del], counter, y_max, 0)
long_ins, long_del = process_long_data(data)
add_subplot(fig, ax, sample, [long_ins, long_del], counter, y_max, 1)
def label_rows(fig, axes, rows):
pad = 5 # in points
for ax, row in zip(axes[:, 0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=25, ha='right', va='center')
fig.tight_layout()
fig.subplots_adjust(left=0.15, top=0.95)
def label_columns(fig, axes):
pad = 5 # in points
columns = [Constants.C_SHORT, Constants.C_LONG]
for ax, col in zip(axes[0], columns):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=25, ha='center', va='baseline')
def to_plotgroup(plot_json, output_dir):
n_samples = len(plot_json)
if n_samples > 0:
fig, ax = plt.subplots(n_samples, 2, figsize=(
15, n_samples * 5), squeeze=False)
od = collections.OrderedDict(sorted(plot_json.items()))
counter = 0
y_max = [0, 0]
for sample, data in od.iteritems():
add_subplots(fig, ax, sample, data, counter, y_max)
counter += 1
label_rows(fig, ax, od.keys())
label_columns(fig, ax)
for row in xrange(0, n_samples):
ax[row, 0].set_ylim(top=y_max[0] * 1.1)
ax[row, 1].set_ylim(top=y_max[1] * 1.1)
p1 = mpatches.Patch(color='#FF7E79', linewidth=0)
p2 = mpatches.Patch(color='#A9D18E', linewidth=0)
fig.legend((p1, p2), ("Deletions", "Insertions"),
"upper left", fontsize=15)
else:
fig = plt.figure()
plot_name = get_plot_title(spec, Constants.PG_SV, Constants.P_SV)
png_fn = os.path.join(output_dir, "{p}.png".format(p=Constants.P_SV))
png_base, thumbnail_base = save_figure_with_thumbnail(
fig, png_fn, dpi=DEFAULT_DPI, bbox_inches='tight')
plot = Plot(Constants.P_SV, os.path.relpath(png_base, output_dir),
title=plot_name, caption=plot_name,
thumbnail=os.path.relpath(thumbnail_base, output_dir))
plot_group = PlotGroup(Constants.PG_SV, plots=[plot])
return plot_group
def to_report(table_json_file, plot_json_file, output_dir):
log.info("Starting {f} v{v}".format(f=os.path.basename(__file__),
v=__version__))
with open(table_json_file) as f:
table_json = json.load(f)
with open(plot_json_file) as f:
plot_json = json.load(f)
tables = [to_sample_table(table_json), to_anno_table(table_json)]
plotgroups = [to_plotgroup(plot_json, output_dir)]
report = Report(Constants.R_ID, tables=tables, plotgroups=plotgroups)
return spec.apply_view(report)
def _args_runner(args):
output_dir = os.path.dirname(args.report)
report = to_report(args.table_json, args.plot_json, output_dir)
report.write_json(args.report)
return 0
def _resolved_tool_contract_runner(rtc):
output_dir = os.path.dirname(rtc.task.output_files[0])
report = to_report(rtc.task.input_files[0],
rtc.task.input_files[1],
output_dir)
report.write_json(rtc.task.output_files[0])
return 0
def _add_options_to_parser(p):
p.add_input_file_type(
FileTypes.JSON,
file_id="json_table",
name="JSON Table Data",
description="JSON of table data")
p.add_input_file_type(
FileTypes.JSON,
file_id="json_plot",
name="JSON Plot Data",
description="JSON of plot data")
p.add_output_file_type(FileTypes.REPORT, "report", spec.title,
description=("Filename of JSON output report. Should be name only, "
"and will be written to output dir"),
default_name="report")
return p
def _get_parser():
p = get_pbparser(
Constants.TOOL_ID,
__version__,
"Report",
__doc__,
Constants.DRIVER_EXE,
is_distributed=False)
return _add_options_to_parser(p)
def main(argv=sys.argv):
return pbparser_runner(argv[1:],
_get_parser(),
_args_runner,
_resolved_tool_contract_runner,
log,
setup_log)
if __name__ == "__main__":
sys.exit(main())
| 33.267442 | 98 | 0.625306 | 1,513 | 0.132209 | 0 | 0 | 0 | 0 | 0 | 0 | 1,201 | 0.104946 |
a83fc38633f6c95bca36f69963bdbc69b5abb27d | 976 | py | Python | logger.py | K-Molloy/nrdf-feed-python | 8b4ed87cc112154f43aae3d885a2085a2f28a74f | [
"MIT"
] | null | null | null | logger.py | K-Molloy/nrdf-feed-python | 8b4ed87cc112154f43aae3d885a2085a2f28a74f | [
"MIT"
] | null | null | null | logger.py | K-Molloy/nrdf-feed-python | 8b4ed87cc112154f43aae3d885a2085a2f28a74f | [
"MIT"
] | null | null | null | import logging
import os
from datetime import datetime
class Logger :
logger = None
def myLogger(self):
if None == self.logger:
self.logger=logging.getLogger('nrdf')
self.logger.setLevel(logging.DEBUG)
log_folder = r"logs/"
os.makedirs(os.path.dirname(log_folder), exist_ok=True)
output_file = os.path.join(log_folder, datetime.now().strftime("%Y_%m_%d-%H_%M_%S"))
file_handler=logging.FileHandler(output_file + '.log', mode="w", encoding=None, delay=False)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
file_handler.setFormatter(formatter)
self.logger.addHandler(stream_handler)
self.logger.propagate = False
return self.logger | 39.04 | 104 | 0.621926 | 920 | 0.942623 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.082992 |
a840832e20eb91945756ce9b6d98a5d6d6a25541 | 1,689 | py | Python | rest_framework_auth0/models.py | robindebois/djangorestframework-auth0 | a203dcd9e067bc411b852b9f3ad3e7b1d0b843aa | [
"MIT"
] | 107 | 2016-03-28T22:45:40.000Z | 2021-06-28T01:46:38.000Z | rest_framework_auth0/models.py | robindebois/djangorestframework-auth0 | a203dcd9e067bc411b852b9f3ad3e7b1d0b843aa | [
"MIT"
] | 41 | 2016-09-03T05:15:47.000Z | 2021-01-02T12:47:36.000Z | rest_framework_auth0/models.py | robindebois/djangorestframework-auth0 | a203dcd9e067bc411b852b9f3ad3e7b1d0b843aa | [
"MIT"
] | 26 | 2016-04-16T22:01:29.000Z | 2021-05-07T14:01:55.000Z | # Just to keep things like ./manage.py test happy
from django.contrib.auth.models import AbstractUser
# class Group(models.Model):
# """
# Groups are a generic way of categorizing users to apply permissions, or
# some other label, to those users. A user can belong to any number of
# groups.
# A user in a group automatically has all the permissions granted to that
# group. For example, if the group Site editors has the permission
# can_edit_home_page, any user in that group will have that permission.
# Beyond permissions, groups are a convenient way to categorize users to
# apply some label, or extended functionality, to them. For example, you
# could create a group 'Special users', and you could write code that would
# do special things to those users -- such as giving them access to a
# members-only portion of your site, or sending them members-only email
# messages.
# """
# name = models.CharField(_('name'), max_length=80, unique=True)
# permissions = models.ManyToManyField(
# Permission,
# verbose_name=_('permissions'),
# blank=True,
# )
#
# objects = GroupManager()
#
# class Meta:
# verbose_name = _('group')
# verbose_name_plural = _('groups')
#
# def __str__(self):
# return self.name
#
# def natural_key(self):
# return (self.name,)
# class User(AbstractUser):
# """
# Users within the Django authentication system are represented by this
# model.
# Username, password and email are required. Other fields are optional.
# """
# class Meta(AbstractUser.Meta):
# swappable = 'AUTH_USER_MODEL'
| 36.717391 | 79 | 0.666075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,593 | 0.943162 |
a8416b3fb1eb6b1559c7209666065c8baea36cb7 | 7,494 | py | Python | inference.py | quantori/prj-covid-scoring | 98d80498ffbab1fdbbeec0fc4016f9a199dbf88e | [
"MIT"
] | null | null | null | inference.py | quantori/prj-covid-scoring | 98d80498ffbab1fdbbeec0fc4016f9a199dbf88e | [
"MIT"
] | null | null | null | inference.py | quantori/prj-covid-scoring | 98d80498ffbab1fdbbeec0fc4016f9a199dbf88e | [
"MIT"
] | null | null | null | import os
import argparse
import cv2
import torch
import pandas as pd
from tqdm import tqdm
from pathlib import Path
import segmentation_models_pytorch as smp
from tools.datasets import InferenceDataset
from tools.models import CovidScoringNet, SegmentationModel
from tools.utils import extract_model_opts, get_list_of_files
def inference(
model: CovidScoringNet,
dataset: InferenceDataset,
output_dir: str,
csv_name: str,
) -> None:
model.eval()
output_lungs_dir = os.path.join(output_dir, 'lungs')
output_covid_dir = os.path.join(output_dir, 'covid')
os.makedirs(output_lungs_dir) if not os.path.exists(output_lungs_dir) else False
os.makedirs(output_covid_dir) if not os.path.exists(output_covid_dir) else False
data = {
'dataset': [],
'filename': [],
'lungs_mask': [],
'covid_mask': [],
'score': [],
}
keys = ['lung_segment_{:d}'.format(idx + 1) for idx in range(6)]
lung_segment_probs = {key: [] for key in keys}
data.update(lung_segment_probs)
for source_img, img_path in tqdm(dataset, desc='Prediction', unit=' images'):
image_path = os.path.normpath(img_path)
filename = os.path.split(image_path)[-1]
dataset_name = image_path.split(os.sep)[-3]
predicted_score, mask_lungs, mask_covid, raw_pred = model.predict(source_img)
cv2.imwrite(os.path.join(output_lungs_dir, filename), mask_lungs * 255)
cv2.imwrite(os.path.join(output_covid_dir, filename), mask_covid * 255)
data['dataset'].append(dataset_name)
data['filename'].append(filename)
data['lungs_mask'].append(os.path.join(output_lungs_dir, filename))
data['covid_mask'].append(os.path.join(output_covid_dir, filename))
data['score'].append(predicted_score)
for idx in range(len(raw_pred)):
raw_pred_col = 'lung_segment_{:d}'.format(idx + 1)
data[raw_pred_col].append(raw_pred[idx])
csv_save_path = os.path.join(output_dir, csv_name)
df = pd.DataFrame(data)
df.to_csv(csv_save_path, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference pipeline')
# Dataset settings
parser.add_argument('--data_dir', type=str)
parser.add_argument('--output_dir', default='dataset/inference_output', type=str)
parser.add_argument('--csv_name', default='model_outputs.csv', type=str)
# COVID model settings
parser.add_argument('--covid_model_path', type=str)
parser.add_argument('--covid_model_name', default='Unet', type=str)
parser.add_argument('--covid_encoder_name', default='se_resnet101', type=str)
parser.add_argument('--covid_encoder_weights', default='imagenet', type=str)
parser.add_argument('--covid_in_channels', default=3, type=int)
parser.add_argument('--covid_num_classes', default=1, type=int)
parser.add_argument('--covid_activation', default='sigmoid', type=str)
parser.add_argument('--covid_dropout', default=0.2, type=float)
parser.add_argument('--covid_aux_params', default=True, type=bool)
parser.add_argument('--covid_input_size', nargs='+', default=(480, 480), type=int)
# Lungs model settings
parser.add_argument('--lungs_model_path', type=str)
parser.add_argument('--lungs_model_name', default='Unet', type=str)
parser.add_argument('--lungs_encoder_name', default='se_resnext101_32x4d', type=str)
parser.add_argument('--lungs_encoder_weights', default='imagenet', type=str)
parser.add_argument('--lungs_in_channels', default=3, type=int)
parser.add_argument('--lungs_num_classes', default=1, type=int)
parser.add_argument('--lungs_activation', default='sigmoid', type=str)
parser.add_argument('--lungs_dropout', default=0.2, type=float)
parser.add_argument('--lungs_aux_params', default=False, type=bool)
parser.add_argument('--lungs_input_size', nargs='+', default=(384, 384), type=int)
# Additional settings
parser.add_argument('--automatic_parser', action='store_true')
parser.add_argument('--threshold', default=0.5, type=float)
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
args.covid_input_size = tuple(args.covid_input_size)
args.lungs_input_size = tuple(args.lungs_input_size)
if args.automatic_parser:
covid_model_opts = extract_model_opts(args.covid_model_path)
lungs_model_opts = extract_model_opts(args.lungs_model_path)
args.covid_model_name = covid_model_opts['model_name']
args.covid_encoder_name = covid_model_opts['encoder_name']
args.covid_encoder_weights = covid_model_opts['encoder_weights']
args.lungs_model_name = lungs_model_opts['model_name']
args.lungs_encoder_name = lungs_model_opts['encoder_name']
args.lungs_encoder_weights = lungs_model_opts['encoder_weights']
args.output_dir = os.path.join(args.output_dir, args.covid_model_name)
args.csv_name = '{:s}_{:s}{:s}'.format(
Path(args.csv_name).stem,
args.covid_model_name,
Path(args.csv_name).suffix
)
covid_aux_params = None
if args.covid_aux_params:
covid_aux_params = dict(
pooling='avg',
dropout=args.covid_dropout,
activation=args.covid_activation,
classes=args.covid_num_classes,
)
lungs_aux_params = None
if args.lungs_aux_params:
lungs_aux_params = dict(
pooling='avg',
dropout=args.lungs_dropout,
activation=args.covid_activation,
classes=args.covid_num_classes,
)
covid_model = SegmentationModel(
model_name=args.covid_model_name,
encoder_name=args.covid_encoder_name,
aux_params=covid_aux_params,
encoder_weights=args.covid_encoder_weights,
in_channels=args.covid_in_channels,
num_classes=args.covid_num_classes,
activation=args.covid_activation,
wandb_api_key=None,
)
lungs_model = SegmentationModel(
model_name=args.lungs_model_name,
encoder_name=args.lungs_encoder_name,
aux_params=lungs_aux_params,
encoder_weights=args.lungs_encoder_weights,
in_channels=args.lungs_in_channels,
num_classes=args.lungs_num_classes,
activation=args.lungs_activation,
wandb_api_key=None,
)
covid_model = covid_model.build_model()
lungs_model = lungs_model.build_model()
covid_model.load_state_dict(torch.load(args.covid_model_path, map_location=device))
lungs_model.load_state_dict(torch.load(args.lungs_model_path, map_location=device))
covid_preprocessing_params = smp.encoders.get_preprocessing_params(
encoder_name=args.covid_encoder_name, pretrained=args.covid_encoder_weights
)
lung_preprocessing_params = smp.encoders.get_preprocessing_params(
encoder_name=args.lungs_encoder_name, pretrained=args.lungs_encoder_weights
)
img_paths = get_list_of_files(args.data_dir, ['mask'])
dataset = InferenceDataset(img_paths, input_size=args.lungs_input_size)
model = CovidScoringNet(
lungs_model,
covid_model,
device,
args.threshold,
args.lungs_input_size,
args.covid_input_size,
covid_preprocessing_params,
lung_preprocessing_params,
crop_type='single_crop',
)
inference(model, dataset, args.output_dir, args.csv_name)
| 39.235602 | 88 | 0.70403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,058 | 0.14118 |
a8422d5e6c8ade1475ebc70a2c91559349806cf9 | 4,267 | py | Python | uproot/const.py | guiguem/uproot | 42173c5a25b5d5858f42788090327030d6aa9b8d | [
"BSD-3-Clause"
] | null | null | null | uproot/const.py | guiguem/uproot | 42173c5a25b5d5858f42788090327030d6aa9b8d | [
"BSD-3-Clause"
] | null | null | null | uproot/const.py | guiguem/uproot | 42173c5a25b5d5858f42788090327030d6aa9b8d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""ROOT constants used in deserialization."""
import numpy
# used in unmarshaling
kByteCountMask = numpy.int64(0x40000000)
kByteCountVMask = numpy.int64(0x4000)
kClassMask = numpy.int64(0x80000000)
kNewClassTag = numpy.int64(0xFFFFFFFF)
kIsOnHeap = numpy.uint32(0x01000000)
kIsReferenced = numpy.uint32(1 << 4)
kMapOffset = 2
# not used?
kNullTag = 0
kNotDeleted = 0x02000000
kZombie = 0x04000000
kBitMask = 0x00ffffff
kDisplacementMask = 0xFF000000
################################################################ core/zip/inc/Compression.h
kZLIB = 1
kLZMA = 2
kOldCompressionAlgo = 3
kLZ4 = 4
kUndefinedCompressionAlgorithm = 5
################################################################ constants for streamers
kBase = 0
kChar = 1
kShort = 2
kInt = 3
kLong = 4
kFloat = 5
kCounter = 6
kCharStar = 7
kDouble = 8
kDouble32 = 9
kLegacyChar = 10
kUChar = 11
kUShort = 12
kUInt = 13
kULong = 14
kBits = 15
kLong64 = 16
kULong64 = 17
kBool = 18
kFloat16 = 19
kOffsetL = 20
kOffsetP = 40
kObject = 61
kAny = 62
kObjectp = 63
kObjectP = 64
kTString = 65
kTObject = 66
kTNamed = 67
kAnyp = 68
kAnyP = 69
kAnyPnoVT = 70
kSTLp = 71
kSkip = 100
kSkipL = 120
kSkipP = 140
kConv = 200
kConvL = 220
kConvP = 240
kSTL = 300
kSTLstring = 365
kStreamer = 500
kStreamLoop = 501
################################################################ constants from core/foundation/inc/ESTLType.h
kNotSTL = 0
kSTLvector = 1
kSTLlist = 2
kSTLdeque = 3
kSTLmap = 4
kSTLmultimap = 5
kSTLset = 6
kSTLmultiset = 7
kSTLbitset = 8
kSTLforwardlist = 9
kSTLunorderedset = 10
kSTLunorderedmultiset = 11
kSTLunorderedmap = 12
kSTLunorderedmultimap = 13
kSTLend = 14
kSTLany = 300
################################################################ IOFeatures
kGenerateOffsetMap = 1
| 32.082707 | 110 | 0.548395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,977 | 0.463323 |
a84343d8f9a70e258120f78238c2960222f6b4ed | 554 | py | Python | LeetCode/LostBoy/771. Jewels and Stones.py | nikku1234/Code-Practise | 94eb6680ea36efd10856c377000219285f77e5a4 | [
"Apache-2.0"
] | 9 | 2020-07-02T06:06:17.000Z | 2022-02-26T11:08:09.000Z | LeetCode/LostBoy/771. Jewels and Stones.py | nikku1234/Code-Practise | 94eb6680ea36efd10856c377000219285f77e5a4 | [
"Apache-2.0"
] | 1 | 2021-11-04T17:26:36.000Z | 2021-11-04T17:26:36.000Z | LeetCode/LostBoy/771. Jewels and Stones.py | nikku1234/Code-Practise | 94eb6680ea36efd10856c377000219285f77e5a4 | [
"Apache-2.0"
] | 8 | 2021-01-31T10:31:12.000Z | 2022-03-13T09:15:55.000Z | #771. Jewels and Stones
class Solution:
def numJewelsInStones(self, jewels: str, stones: str) -> int:
# count = 0
# jewl = {}
# for i in jewels:
# if i not in jewl:
# jewl[i] = 0
# for j in stones:
# if j in jewl:
# count += 1
# return count
# return sum(s in jewels for s in stones)
count = 0
jewl = set(jewels)
for s in stones:
if s in jewl:
count += 1
return count | 25.181818 | 65 | 0.435018 | 529 | 0.954874 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.395307 |
a84368b717e48b36eb701f61e858302d7ec5883b | 277 | py | Python | djblog/comments/admin.py | rewalkerof/microblog | 2ef1652dcb9a7d35eed228f1d74587f4b0fa8912 | [
"MIT"
] | null | null | null | djblog/comments/admin.py | rewalkerof/microblog | 2ef1652dcb9a7d35eed228f1d74587f4b0fa8912 | [
"MIT"
] | null | null | null | djblog/comments/admin.py | rewalkerof/microblog | 2ef1652dcb9a7d35eed228f1d74587f4b0fa8912 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Comment
# Register your models here.
class CommentsAdmin(admin.ModelAdmin):
list_display = ['id', "user", "content", "timestamp"]
class Meta:
model = Comment
admin.site.register(Comment, CommentsAdmin)
| 18.466667 | 57 | 0.714801 | 137 | 0.494585 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.209386 |
a84380c1f97670fcb660541a388530604b2cd9bd | 1,180 | py | Python | bot/audio_trial2.py | Nova-Striker/discord-bot | 1c977711f348467bd73e1886c27ad4a9a93c779b | [
"Apache-2.0"
] | 1 | 2020-11-10T06:33:49.000Z | 2020-11-10T06:33:49.000Z | bot/audio_trial2.py | Nova-Striker/discord-bot | 1c977711f348467bd73e1886c27ad4a9a93c779b | [
"Apache-2.0"
] | null | null | null | bot/audio_trial2.py | Nova-Striker/discord-bot | 1c977711f348467bd73e1886c27ad4a9a93c779b | [
"Apache-2.0"
] | 1 | 2020-11-13T17:12:00.000Z | 2020-11-13T17:12:00.000Z | ##incompleted yt tuitorial
##import discord
##import json
##import asyncio
##import youtube_dl
##import shell
##import os
##from discord.utils import get
##from discord.ext import commands
##
##@client.command(pass_context=True)
##async def join(ctx):
## global voice
## channel=ctx.message.author.voice.channel
## voice=get(client.voice_clients,guild=ctx.guild)
##
## if voice and voice.is_connected():
## await voice.move_to(channel)
## else:
## voice=await chqannel.connect()
## await ctx.send(f"Joined {channel}")
##
##@client.command(pass_context=True)
##async def leave(ctx):
## channel=ctx.message.author.voice.channel
## voice=get(client.voice_clients,guild=ctx.guild)
##
## if voice and voice.is_connected():
## await voice.disconnect()
## await ctx.send(f"Left {channel}")
##
##@client.command(pass_context=True,aliases=["p"])
##async def play(ctx,url:str):
## def check_queue():
## Queue_infile=os.path.indir("./Queue")
## if Queue_infile is True:
## DIR =os.path.abspath(os.path.realpath("Queue"))
## length=len(os.
##
| 28.095238 | 62 | 0.626271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,137 | 0.963559 |
a8465518470f69aed3c070276b30d62403ad5fa0 | 14,426 | py | Python | pyeem/augmentation/base.py | drewmee/PyEEM | 283c01405bf51da6827ba434be53acd580b7642b | [
"MIT"
] | 4 | 2020-09-01T08:27:28.000Z | 2022-03-12T09:11:15.000Z | pyeem/augmentation/base.py | drewmee/PyEEM | 283c01405bf51da6827ba434be53acd580b7642b | [
"MIT"
] | 2 | 2021-06-11T18:20:47.000Z | 2021-11-19T14:11:09.000Z | pyeem/augmentation/base.py | drewmee/PyEEM | 283c01405bf51da6827ba434be53acd580b7642b | [
"MIT"
] | null | null | null | import itertools
import os
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
def _get_steps():
hdf_subdir = "augmentation/"
steps = {"step_name": ["prototypical", "single_sources", "mixtures"]}
steps_df = pd.DataFrame(steps)
steps_df["hdf_path"] = hdf_subdir + steps_df["step_name"]
# Impose order on the augmentation steps:
steps_df["step_name"] = pd.Categorical(
steps_df["step_name"], ["prototypical", "single_sources", "mixtures"]
)
steps_df.sort_values("step_name", inplace=True, ignore_index=True)
return steps_df
def prototypical_spectrum(dataset, source_df):
"""Weighted average of calibration spectra with randomly assigned weights
between 0 and 1.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
source_df (pandas.DataFrame): Calibration information for a single source.
Returns:
pandas.DataFrame: A prototypical Excitation Emission Matrix for a single source.
"""
aug_steps_df = _get_steps()
source_name = source_df.index.get_level_values("source").unique().item()
source_units = source_df.index.get_level_values("source_units").unique().item()
intensity_units = (
source_df.index.get_level_values("intensity_units").unique().item()
)
proto_eems = []
for index, row in source_df[source_df["prototypical_sample"]].iterrows():
eem_path = row["hdf_path"]
eem = pd.read_hdf(dataset.hdf, key=eem_path)
proto_eems.append(eem)
# TODO - IMPORTANT: This can't just be the mean of the prototypical samples...
# Need to use the same weighted average as the intensity values!
proto_concentration = source_df[source_df["prototypical_sample"]][
"concentration"
].mean()
"""
weights = []
for i in range(len(proto_eems)):
weights.append(random.uniform(0, 1))
proto_eem = np.average([eem.values for eem in proto_eems], axis=0, weights=weights)
"""
proto_eem = np.average([eem.values for eem in proto_eems], axis=0)
proto_eem = pd.DataFrame(
data=proto_eem, index=proto_eems[0].index, columns=proto_eems[0].columns
)
proto_eem.index.name = "emission_wavelength"
hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][
"hdf_path"
].item()
hdf_path = os.path.join(hdf_path, source_name)
new_indices = np.array(
["source", "proto_conc", "source_units", "intensity_units", "hdf_path"]
)
proto_eem = proto_eem.assign(
**{
"source": source_name,
"proto_conc": proto_concentration,
"source_units": source_units,
"intensity_units": intensity_units,
"hdf_path": hdf_path,
}
)
proto_eem.set_index(new_indices.tolist(), append=True, inplace=True)
new_indices = np.append(new_indices, ("emission_wavelength"))
proto_eem = proto_eem.reorder_levels(new_indices)
proto_eem.to_hdf(dataset.hdf, key=hdf_path)
return proto_eem
def create_prototypical_spectra(dataset, cal_df):
"""Creates a protoypical spectrum for each calibration source in the PyEEM
dataset.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
cal_df (pandas.DataFrame): Calibration information for your dataset
returned from :meth:`pyeem.preprocessing.calibration()`
Returns:
pandas.DataFrame: A table describing the prototypical spectra and their
paths within the HDF5 store.
"""
results_rows = []
for source_name, group in cal_df.groupby(level="source", as_index=False):
proto_eem_df = prototypical_spectrum(dataset, group)
new_indices = proto_eem_df.index.droplevel("emission_wavelength").unique()
result = dict(zip(list(new_indices.names), list(new_indices.item())))
results_rows.append(result)
results_df = pd.DataFrame(results_rows)
results_index = "source"
results_df.set_index(results_index, inplace=True)
return results_df
def single_source(dataset, source_df, conc_range, num_spectra):
"""Creates augmented single source spectra for a single calibration source.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
source_df (pandas.DataFrame): Calibration information for a single source.
conc_range (tuple of (int, float)): The concentration range which the
augmented single source spectra will occupy.
num_spectra (int): The number of augmented single source spectra to create.
Returns:
pandas.DataFrame: A table describing the source's augmented spectra and their
paths within the HDF5 store.
"""
aug_steps_df = _get_steps()
# Get the source's name
source_name = source_df.index.get_level_values("source").unique().item()
# Get the HDF5 path to the source's prototypical EEM
proto_hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][
"hdf_path"
].item()
proto_hdf_path = os.path.join(proto_hdf_path, source_name)
# Read in the prototypical EEM
proto_eem = pd.read_hdf(dataset.hdf, key=proto_hdf_path)
# Get the source's prototypical concentration
proto_concentration = proto_eem.index.get_level_values("proto_conc").unique().item()
# Remove the concentration index from the dataframe
proto_eem.reset_index(level=["proto_conc"], drop=True, inplace=True)
# Get the slope and intercept of the source's calibration function
slope = source_df.index.get_level_values("slope").unique().item()
y_intercept = source_df.index.get_level_values("intercept").unique().item()
"""
slope = (
cal_df.xs(source_name, level="source")
.index.get_level_values("slope")
.unique()
.item()
)
y_intercept = (
cal_df.xs(source_name, level="source")
.index.get_level_values("intercept")
.unique()
.item()
)
"""
# Generate the 1D polynomial
cal_func = np.poly1d([slope, y_intercept])
# Generate the concentration range based on the argument's
concentration_range = np.linspace(conc_range[0], conc_range[1], num=num_spectra)
# Create a new HDF5 path for the single source spectra
hdf_path = aug_steps_df[aug_steps_df["step_name"] == "single_sources"][
"hdf_path"
].item()
hdf_path = os.path.join(hdf_path, source_name)
# aug_ss_dfs: A list which we will iteratively append single source spectra to. For each
# concentration in the concentration range. Then we will turn the list of DFs
# into a single DF by using concat()
aug_ss_dfs = []
sources = list(dataset.calibration_sources)
for new_concentration in concentration_range:
scalar = cal_func(new_concentration) / cal_func(proto_concentration)
ss_eem = proto_eem * scalar
# Make sure there are no negative values
ss_eem.clip(lower=0, inplace=True)
label = np.zeros(len(sources))
source_index = sources.index(source_name)
label[source_index] = new_concentration
ss_eem.index.name = "emission_wavelength"
ss_eem = ss_eem.assign(**dict(zip(sources, label)))
new_indices = sources
ss_eem.set_index(new_indices, append=True, inplace=True)
new_indices = [
"source",
"source_units",
"intensity_units",
"hdf_path",
] + new_indices
new_indices.append("emission_wavelength")
ss_eem = ss_eem.reorder_levels(new_indices)
ss_eem.rename(index={proto_hdf_path: hdf_path}, inplace=True)
aug_ss_dfs.append(ss_eem)
aug_ss_df = pd.concat(aug_ss_dfs)
aug_ss_df.to_hdf(dataset.hdf, key=hdf_path)
return aug_ss_df
def create_single_source_spectra(dataset, cal_df, conc_range, num_spectra):
"""Creates augmented single source spectra for each calibration source in the
PyEEM dataset.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
cal_df (pandas.DataFrame): Calibration information for your dataset
returned from :meth:`pyeem.preprocessing.calibration()`
conc_range (tuple of (int, float)): The concentration range which the
augmented single source spectra will occupy.
num_spectra (int): The number of augmented single source spectra for each
calibration source.
Returns:
pandas.DataFrame: A table describing the augmented single source spectra
and their paths within the HDF5 store.
"""
aug_ss_dfs = []
for source_name, group in tqdm(cal_df.groupby(level="source", as_index=False)):
ss_df = single_source(
dataset, group, conc_range=conc_range, num_spectra=num_spectra
)
ss_df = (
ss_df.index.droplevel(["emission_wavelength"])
.unique()
.to_frame()
.reset_index(drop=True)
)
ss_df.set_index(
["source", "source_units", "intensity_units", "hdf_path"], inplace=True
)
aug_ss_dfs.append(ss_df)
aug_ss_df = pd.concat(aug_ss_dfs)
return aug_ss_df
"""
def mixture():
return
"""
def create_mixture_spectra(dataset, cal_df, conc_range, num_steps, scale="logarithmic"):
"""Creates augmented mixture spectra by summing together augmented single source spectra.
The number of augmented mixtures created is equal to the Cartesian product composed of...
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
cal_df (pandas.DataFrame): Calibration information for your dataset
returned from :meth:`pyeem.preprocessing.calibration()`
conc_range (tuple of (int, float)): The concentration range which the
augmented spectra mixtures will occupy.
num_steps (int): The number of intervals within the concentration range.
scale (str, optional): Determines how the concentrations will be spaced along
the given concentration range. Options are "linear" and "logarithmic". Defaults to "logarithmic".
Raises:
Exception: Raised if calibration sources are reported in different units.
ValueError: Raised if the scale argument is a value other than linear" or "logarithmic".
Returns:
pandas.DataFrame: A table describing the augmented mixture spectra
and their paths within the HDF5 store.
"""
if cal_df.index.get_level_values("source_units").nunique() != 1:
raise Exception(
"Sources must be reported in the same units in order create augmented mixtures."
)
sources = cal_df.index.get_level_values(level="source").unique().to_list()
source_units = cal_df.index.get_level_values("source_units").unique().item()
intensity_units = (
cal_df.index.get_level_values(level="intensity_units").unique().item()
)
aug_steps_df = _get_steps()
hdf_path = aug_steps_df[aug_steps_df["step_name"] == "mixtures"]["hdf_path"].item()
proto_spectra = []
for source_name, group in cal_df.groupby(level="source", as_index=False):
# Get the HDF5 path to the source's prototypical EEM
proto_hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][
"hdf_path"
].item()
proto_hdf_path = os.path.join(proto_hdf_path, source_name)
# Read in the prototypical EEM
proto_eem = pd.read_hdf(dataset.hdf, key=proto_hdf_path)
proto_spectra.append(proto_eem)
proto_eem_df = pd.concat(proto_spectra)
if scale == "logarithmic":
number_range = np.geomspace(conc_range[0], conc_range[1], num=num_steps)
elif scale == "linear":
number_range = np.linspace(conc_range[0], conc_range[1], num=num_steps)
else:
raise ValueError("scale must be 'logarithmic' or 'linear'")
cartesian_product = [
p for p in itertools.product(number_range.tolist(), repeat=len(sources))
]
aug = []
for conc_set in tqdm(cartesian_product, desc="Creating Augmented Mixtures"):
mix = []
# TODO - it'd be a good idea to break this out into another function.
# Call it mixture() -- returns a single mixture EEM
for index, label in enumerate(zip(sources, conc_set)):
source_name = label[0]
new_concentration = label[1]
slope = (
cal_df.xs(source_name, level="source")
.index.get_level_values("slope")
.unique()
.item()
)
y_intercept = (
cal_df.xs(source_name, level="source")
.index.get_level_values("intercept")
.unique()
.item()
)
cal_func = np.poly1d([slope, y_intercept])
proto_eem = proto_eem_df.xs(source_name, level="source", drop_level=False)
proto_concentration = (
proto_eem.index.get_level_values("proto_conc").unique().item()
)
proto_eem.reset_index(level=["proto_conc"], drop=True, inplace=True)
scalar = cal_func(new_concentration) / cal_func(proto_concentration)
new_eem = proto_eem * scalar
# Make sure there are no negative values
new_eem.clip(lower=0, inplace=True)
mix.append(new_eem)
mix_eem = pd.concat(mix).sum(level="emission_wavelength")
mix_eem = mix_eem.assign(**dict(zip(sources, conc_set)))
mix_eem["hdf_path"] = hdf_path
mix_eem["source"] = "mixture"
mix_eem["source_units"] = source_units
mix_eem["intensity_units"] = intensity_units
new_indices = [
"source",
"source_units",
"intensity_units",
"hdf_path",
] + sources
mix_eem.set_index(new_indices, append=True, inplace=True)
new_indices = np.append(new_indices, ("emission_wavelength"))
mix_eem = mix_eem.reorder_levels(new_indices)
aug.append(mix_eem)
aug_mix_df = pd.concat(aug)
aug_mix_df.to_hdf(dataset.hdf, key=hdf_path)
aug_mix_df = (
aug_mix_df.index.droplevel(["emission_wavelength"])
.unique()
.to_frame()
.reset_index(drop=True)
)
aug_mix_df.set_index(
["source", "source_units", "intensity_units", "hdf_path"], inplace=True
)
return aug_mix_df
| 37.665796 | 109 | 0.661722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,252 | 0.433384 |
a8475c65df4075b3b08496dd18fcba18f180aafe | 4,306 | py | Python | pgoapi/protos/POGOProtos/Networking/Responses/SetAvatarResponse_pb2.py | PogoHop/pgoapi-hsvr | b5761159e0240bbb81ef6c257fe2eb1bc1ce2d47 | [
"MIT"
] | null | null | null | pgoapi/protos/POGOProtos/Networking/Responses/SetAvatarResponse_pb2.py | PogoHop/pgoapi-hsvr | b5761159e0240bbb81ef6c257fe2eb1bc1ce2d47 | [
"MIT"
] | null | null | null | pgoapi/protos/POGOProtos/Networking/Responses/SetAvatarResponse_pb2.py | PogoHop/pgoapi-hsvr | b5761159e0240bbb81ef6c257fe2eb1bc1ce2d47 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/SetAvatarResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data import PlayerData_pb2 as POGOProtos_dot_Data_dot_PlayerData__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/SetAvatarResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n7POGOProtos/Networking/Responses/SetAvatarResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a POGOProtos/Data/PlayerData.proto\"\xd7\x01\n\x11SetAvatarResponse\x12I\n\x06status\x18\x01 \x01(\x0e\x32\x39.POGOProtos.Networking.Responses.SetAvatarResponse.Status\x12\x30\n\x0bplayer_data\x18\x02 \x01(\x0b\x32\x1b.POGOProtos.Data.PlayerData\"E\n\x06Status\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x16\n\x12\x41VATAR_ALREADY_SET\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_PlayerData__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SETAVATARRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='POGOProtos.Networking.Responses.SetAvatarResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVATAR_ALREADY_SET', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILURE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=273,
serialized_end=342,
)
_sym_db.RegisterEnumDescriptor(_SETAVATARRESPONSE_STATUS)
_SETAVATARRESPONSE = _descriptor.Descriptor(
name='SetAvatarResponse',
full_name='POGOProtos.Networking.Responses.SetAvatarResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='POGOProtos.Networking.Responses.SetAvatarResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_data', full_name='POGOProtos.Networking.Responses.SetAvatarResponse.player_data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SETAVATARRESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=127,
serialized_end=342,
)
_SETAVATARRESPONSE.fields_by_name['status'].enum_type = _SETAVATARRESPONSE_STATUS
_SETAVATARRESPONSE.fields_by_name['player_data'].message_type = POGOProtos_dot_Data_dot_PlayerData__pb2._PLAYERDATA
_SETAVATARRESPONSE_STATUS.containing_type = _SETAVATARRESPONSE
DESCRIPTOR.message_types_by_name['SetAvatarResponse'] = _SETAVATARRESPONSE
SetAvatarResponse = _reflection.GeneratedProtocolMessageType('SetAvatarResponse', (_message.Message,), dict(
DESCRIPTOR = _SETAVATARRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.SetAvatarResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.SetAvatarResponse)
))
_sym_db.RegisterMessage(SetAvatarResponse)
# @@protoc_insertion_point(module_scope)
| 38.106195 | 529 | 0.760102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,351 | 0.313748 |
a8480382db69714973a4b7c38f535e4ba7563557 | 376 | py | Python | control/src/robocup/motion-control/VelocityKalmanTuner/dynamics.py | RRRekkitRalph/robocup-firmware | a1502fdb3401e7e998a29c5fae22ef597dcc2dc5 | [
"Apache-2.0"
] | 26 | 2016-07-06T11:25:43.000Z | 2022-02-27T00:24:02.000Z | control/src/robocup/motion-control/VelocityKalmanTuner/dynamics.py | RRRekkitRalph/robocup-firmware | a1502fdb3401e7e998a29c5fae22ef597dcc2dc5 | [
"Apache-2.0"
] | 98 | 2016-07-04T22:43:46.000Z | 2021-11-10T00:12:02.000Z | control/src/robocup/motion-control/VelocityKalmanTuner/dynamics.py | RRRekkitRalph/robocup-firmware | a1502fdb3401e7e998a29c5fae22ef597dcc2dc5 | [
"Apache-2.0"
] | 16 | 2016-07-07T10:50:11.000Z | 2021-12-06T11:20:28.000Z | from abc import ABC, abstractmethod
class Dynamics(ABC):
"""
An abstract class which outlines the basic functionalities
that an object simulating dynamics should implement
"""
@abstractmethod
def step(self, dt):
pass
@abstractmethod
def get_state(self):
pass
@abstractmethod
def get_measurements(self):
pass | 18.8 | 62 | 0.659574 | 338 | 0.898936 | 0 | 0 | 165 | 0.43883 | 0 | 0 | 130 | 0.345745 |
a8487bc9414b8343a3c74e1a9dd0ffa5bd1bc6e4 | 1,012 | py | Python | algs4/max_pq.py | dumpmemory/algs4-py | 8555076b554583b5438ed5180e2815cf049fb233 | [
"MIT"
] | 230 | 2018-02-27T02:26:44.000Z | 2022-03-29T10:26:57.000Z | algs4/max_pq.py | dumpmemory/algs4-py | 8555076b554583b5438ed5180e2815cf049fb233 | [
"MIT"
] | 5 | 2018-04-06T12:08:56.000Z | 2021-12-19T09:44:58.000Z | algs4/max_pq.py | dumpmemory/algs4-py | 8555076b554583b5438ed5180e2815cf049fb233 | [
"MIT"
] | 55 | 2018-02-27T02:26:45.000Z | 2022-03-30T03:51:41.000Z | class MaxPQ:
def __init__(self):
self.pq = []
def insert(self, v):
self.pq.append(v)
self.swim(len(self.pq) - 1)
def max(self):
return self.pq[0]
def del_max(self, ):
m = self.pq[0]
self.pq[0], self.pq[-1] = self.pq[-1], self.pq[0]
self.pq = self.pq[:-1]
self.sink(0)
return m
def is_empty(self, ):
return not self.pq
def size(self, ):
return len(self.pq)
def swim(self, k):
while k > 0 and self.pq[(k - 1) // 2] < self.pq[k]:
self.pq[k], self.pq[
(k - 1) // 2] = self.pq[(k - 1) // 2], self.pq[k]
k = k // 2
def sink(self, k):
N = len(self.pq)
while 2 * k + 1 <= N - 1:
j = 2 * k + 1
if j < N - 1 and self.pq[j] < self.pq[j + 1]:
j += 1
if self.pq[k] > self.pq[j]:
break
self.pq[k], self.pq[j] = self.pq[j], self.pq[k]
k = j
| 23 | 65 | 0.414032 | 1,011 | 0.999012 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a8498638fe63f75b8fb7831a168989a3237bda41 | 887 | py | Python | PP4E-Examples-1.4/Examples/PP4E/System/Threads/four-threads.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | PP4E-Examples-1.4/Examples/PP4E/System/Threads/four-threads.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | PP4E-Examples-1.4/Examples/PP4E/System/Threads/four-threads.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | """
four different ways to run an action in a thread; all print 4294967296,
but prints should be synchronized with a mutex here to avoid overlap
"""
import threading, _thread
def action(i):
print(i ** 32)
# subclass with state
class Mythread(threading.Thread):
def __init__(self, i):
self.i = i
threading.Thread.__init__(self)
def run(self): # redefine run for action
print(self.i ** 32)
Mythread(2).start() # start invokes run()
# pass action in
thread = threading.Thread(target=(lambda: action(2))) # run invokes target
thread.start()
# same but no lambda wrapper for state
threading.Thread(target=action, args=(2,)).start() # callable plus its args
# basic thread module
_thread.start_new_thread(action, (2,)) # all-function interface
| 31.678571 | 83 | 0.621195 | 231 | 0.260428 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.403608 |
a84a24163aa53482623f2284b32fdd15724ce41b | 9,399 | py | Python | src/server_dgram/server.py | kn1m/LocalizationTDOA | 203f1afdd5ae01329f218e962ccf2cf648bb26db | [
"MIT"
] | 36 | 2016-08-31T06:30:08.000Z | 2021-01-19T13:36:51.000Z | src/server_dgram/server.py | kn1m/LocalizationTDOA | 203f1afdd5ae01329f218e962ccf2cf648bb26db | [
"MIT"
] | 1 | 2018-05-31T15:07:28.000Z | 2019-05-03T10:10:14.000Z | src/server_dgram/server.py | kn1m/LocalizationTDOA | 203f1afdd5ae01329f218e962ccf2cf648bb26db | [
"MIT"
] | 17 | 2016-12-01T09:04:48.000Z | 2021-08-29T01:43:51.000Z | import logging
import socket
import numpy
import time
from cPickle import loads
from scipy import linalg
from matplotlib import pyplot
from multiprocessing import Array
from src.logic import helpers
from src.logic.parallel_process import ProcessParallel
from scipy import *
from numpy import *
class Server:
def __init__(self,
server_address,
server_port,
true_positions,
estimated_positions,
sensor_positions,
microphone_amount,
trials,
coordinates,
cores_amount):
self.__x, self.__y, self.__z = coordinates
self.__server_address = server_address
self.__microphone_amount = microphone_amount
self.__server_port = server_port
self.__true_positions = true_positions
self.__estimated_positions = estimated_positions
self.__trials = trials
self.__sensor_positions = sensor_positions
self.__distances = []
self.__time_delays = []
self.__padding = []
self.__cores_amount = cores_amount
self.__microphone_data = None
self.__raw_microphone_data = []
def generate_data(self):
self.generate_source_positions()
self.generate_distances()
self.prepare()
def run(self, received_data):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = (self.__server_address, self.__server_port)
logging.info('Starting up on %s port %s', self.__server_address, self.__server_port)
sock.bind(server_address)
microphones_data = {}
received_data_count = 0
while received_data_count < self.__microphone_amount:
logging.info('Waiting to receive message...')
data, address = sock.recvfrom(65535 - 28)
logging.info("Received %s", len(data))
if len(data) == 36:
received_data[received_data_count] = microphones_data[data]
received_data_count += 1
logging.info("Received data from %s microphones", received_data_count)
else:
microphone_id = data[0:36]
if not microphone_id in microphones_data:
microphones_data[microphone_id] = data[36:]
else:
microphones_data[microphone_id] += data[36:]
logging.info("Received data from all microphones")
def generate_source_positions(self):
logging.info('Generating sources positions.')
for i in range(self.__trials):
#r = numpy.random.rand(1) * 50
#t = numpy.random.rand(1) * 2 * math.pi
r = 0.1 * 50
t = 0.2 * 50
z = 0.3 * 20
x = r * math.cos(t)
y = r * math.sin(t)
#z = numpy.random.rand(1) * 20
self.__true_positions[i, 0] = x
self.__true_positions[i, 1] = y
self.__true_positions[i, 2] = z
logging.info('Generated sources positions.')
def generate_distances(self):
logging.info('Generating distances.')
self.__distances = numpy.zeros((self.__trials, self.__microphone_amount))
for i in range(self.__trials):
for j in range(self.__microphone_amount):
x1 = self.__true_positions[i, 0]
y1 = self.__true_positions[i, 1]
z1 = self.__true_positions[i, 2]
x2 = self.__sensor_positions[j, 0]
y2 = self.__sensor_positions[j, 1]
z2 = self.__sensor_positions[j, 2]
self.__distances[i, j] = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2)
logging.info('Generated distances.')
def log_results(self):
for trial_number in range(self.__trials):
logging.info('Trial number: %d', trial_number + 1)
logging.info('Estimated X = %.15f, Estimated Y = %.15f, Estimated Z = %.15f',
float(self.__estimated_positions[trial_number][0]),
float(self.__estimated_positions[trial_number][1]),
float(self.__estimated_positions[trial_number][2]))
logging.info('True X = %.15f, True Y = %.15f, True Z = %.15f',
float(self.__true_positions[trial_number][0]),
float(self.__true_positions[trial_number][1]),
float(self.__true_positions[trial_number][2]))
def draw_plot(self):
pyplot.plot(self.__true_positions[:, 0], self.__true_positions[:, 1], 'bd', label='True position')
pyplot.plot(self.__estimated_positions[:, 0], self.__estimated_positions[:, 1], 'r+',
label='Estimated position')
pyplot.legend(loc='upper right', numpoints=1)
pyplot.xlabel('X coordinate of target')
pyplot.ylabel('Y coordinate of target')
pyplot.title('TDOA Hyperbolic Localization')
pyplot.axis([-50, 50, -50, 50])
pyplot.show()
def prepare(self):
logging.info('Preparing stage started.')
self.__time_delays = numpy.divide(self.__distances, 340.29)
self.__padding = numpy.multiply(self.__time_delays, 44100)
logging.info('Preparing stage ended.')
def handle_retrieved_data(self, received_data):
for i in range(self.__trials):
x = self.__true_positions[i, 0]
y = self.__true_positions[i, 1]
z = self.__true_positions[i, 2]
data = []
for j in range(self.__microphone_amount):
data.append(received_data[j])
multi_track = numpy.array([loads(raw) for raw in data])
logging.info('Prepared all data.')
logging.info('Started source localization.')
x, y, z = self.locate(self.__sensor_positions, multi_track)
logging.info('Localized source.')
self.__estimated_positions[i, 0] = x
self.__estimated_positions[i, 1] = y
self.__estimated_positions[i, 2] = z
def locate(self, sensor_positions, multi_track):
s = sensor_positions.shape
len = s[0]
time_delays = numpy.zeros((len, 1))
starts = time.time()
if self.__cores_amount == 1:
for p in range(len):
time_delays[p] = helpers.time_delay_function(multi_track[0,], multi_track[p,])
else:
pp = ProcessParallel()
outs = Array('d', range(len))
ranges = []
for result in helpers.per_delta(0, len, len / self.__cores_amount):
ranges.append(result)
for start, end in ranges:
pp.add_task(helpers.time_delay_function_optimized, (start, end, outs, multi_track))
pp.start_all()
pp.join_all()
for idx, res in enumerate(outs):
time_delays[idx] = res
ends = time.time()
logging.info('%.15f passed for localization computation trial.', ends - starts)
Amat = numpy.zeros((len, 1))
Bmat = numpy.zeros((len, 1))
Cmat = numpy.zeros((len, 1))
Dmat = numpy.zeros((len, 1))
for i in range(2, len):
x1 = sensor_positions[0, 0]
y1 = sensor_positions[0, 1]
z1 = sensor_positions[0, 2]
x2 = sensor_positions[1, 0]
y2 = sensor_positions[1, 1]
z2 = sensor_positions[1, 2]
xi = sensor_positions[i, 0]
yi = sensor_positions[i, 1]
zi = sensor_positions[i, 2]
if time_delays[i] == 0 and time_delays[1] == 0:
Amat[i] = 0
Bmat[i] = 0
Cmat[i] = 0
Dmat[i] = 0
continue
if time_delays[i] == 0:
ti_value = 0
else:
ti_value = 1 / (340.29 * time_delays[i])
if time_delays[1] == 0:
t1_value = 0
else:
t1_value = 1 / (340.29 * time_delays[1])
Amat[i] = ti_value * (-2 * x1 + 2 * xi) - t1_value * (
-2 * x1 + 2 * x2)
Bmat[i] = ti_value * (-2 * y1 + 2 * yi) - t1_value * (
-2 * y1 + 2 * y2)
Cmat[i] = ti_value * (-2 * z1 + 2 * zi) - t1_value * (
-2 * z1 + 2 * z2)
Sum1 = (x1 ** 2) + (y1 ** 2) + (z1 ** 2) - (xi ** 2) - (yi ** 2) - (zi ** 2)
Sum2 = (x1 ** 2) + (y1 ** 2) + (z1 ** 2) - (x2 ** 2) - (y2 ** 2) - (z2 ** 2)
Dmat[i] = 340.29 * (time_delays[i] - time_delays[1]) + ti_value * Sum1 - t1_value * Sum2
M = numpy.zeros((len + 1, 3))
D = numpy.zeros((len + 1, 1))
for i in range(len):
M[i, 0] = Amat[i]
M[i, 1] = Bmat[i]
M[i, 2] = Cmat[i]
D[i] = Dmat[i]
M = numpy.array(M[2:len, :])
D = numpy.array(D[2:len])
D = numpy.multiply(-1, D)
Minv = linalg.pinv(M)
T = numpy.dot(Minv, D)
x = T[0]
y = T[1]
z = T[2]
return x, y, z
@property
def padding(self):
return self.__padding
@property
def distances(self):
return self.__distances
| 34.428571 | 106 | 0.543249 | 9,102 | 0.968401 | 0 | 0 | 128 | 0.013618 | 0 | 0 | 835 | 0.088839 |
a84a5b5438a8189b9258fa08dad350324920becc | 1,139 | py | Python | tests/test_comparisons.py | opensafely/covid19-vaccine-coverage-tpp-emis | 79195309b06608baffb0d3eb57f7b99bfec17fcc | [
"MIT"
] | null | null | null | tests/test_comparisons.py | opensafely/covid19-vaccine-coverage-tpp-emis | 79195309b06608baffb0d3eb57f7b99bfec17fcc | [
"MIT"
] | null | null | null | tests/test_comparisons.py | opensafely/covid19-vaccine-coverage-tpp-emis | 79195309b06608baffb0d3eb57f7b99bfec17fcc | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from analysis.comparisons import gt, gte, lt, lte
def test_comparisons():
# a | b | gt | gte | lt | lte
# ---+---+----+-----+----+-----
# 1 | 1 | F | T | F | T
# 1 | 2 | F | F | T | T
# 2 | 1 | T | T | F | F
# 1 | - | T | T | F | F
# - | 1 | F | F | T | T
# - | - | F | F | F | F
# This makes things line up nicely
T = True
F = False
df = pd.DataFrame.from_records(
[
(1, 1, F, T, F, T),
(1, 2, F, F, T, T),
(2, 1, T, T, F, F),
(1, 0, T, T, F, F),
(0, 1, F, F, T, T),
(0, 0, F, F, F, F),
],
columns=["a", "b", "gt", "gte", "lt", "lte"],
).replace(0, np.nan)
assert_series_equal(gt(df["a"], df["b"]), df["gt"], check_names=False)
assert_series_equal(gte(df["a"], df["b"]), df["gte"], check_names=False)
assert_series_equal(lt(df["a"], df["b"]), df["lt"], check_names=False)
assert_series_equal(lte(df["a"], df["b"]), df["lte"], check_names=False)
| 29.973684 | 76 | 0.437226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.294118 |
a84ae539adfe720fa4cf764a55105118bf3a75ad | 1,127 | py | Python | tests/conftest.py | Shaza-Is/Re-IDOL | 5db3d950b2d3d8dbdbcafa77803b271c01e3ada1 | [
"Apache-2.0"
] | 2 | 2021-11-16T02:23:09.000Z | 2022-03-01T07:40:35.000Z | tests/conftest.py | Shaza-Is/Re-IDOL | 5db3d950b2d3d8dbdbcafa77803b271c01e3ada1 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | Shaza-Is/Re-IDOL | 5db3d950b2d3d8dbdbcafa77803b271c01e3ada1 | [
"Apache-2.0"
] | null | null | null | import pytest
import numpy as np
import os
import pyarrow as pa
import pyarrow.feather as feather
import pandas as pd
from app.services.preprocessor import PreProcessor
from typing import List
@pytest.fixture
def preprocessor() -> PreProcessor:
return PreProcessor("datasets/csvs/train.csv", "datasets/csvs/building1.csv")
@pytest.fixture
def generic_csv() -> str:
arr = np.random.rand(20, 20)
path = "datasets/csvs/dummy.csv"
np.savetxt(path, arr)
yield path
os.remove(path)
@pytest.fixture
def generic_feathers() -> List[str]:
base_path = "datasets/gen"
files = []
n_files = 30
col_rows = 20
rows = [f"row{x}" for x in range(0, col_rows)]
columns = [f"column{x}" for x in range(0, col_rows)]
for number in range(0, n_files):
arr = np.random.rand(col_rows , col_rows)
df = pd.DataFrame(arr, index = rows, columns = columns)
file_path = f"{base_path}/gen_{number}.feather"
files.append(file_path)
feather.write_feather(df, file_path)
yield (files, n_files, col_rows)
for file in files:
os.remove(file)
| 22.54 | 81 | 0.668146 | 0 | 0 | 757 | 0.671695 | 922 | 0.818101 | 0 | 0 | 149 | 0.132209 |
a84b4fb4bb163f73f874300aa9b30127b25e1b1a | 2,811 | py | Python | color_histogram/results/hist_2d.py | waragai-katsunori/ColorHistogram | f57c1115a94aa72387a6e40aef88b1861eb470ab | [
"MIT"
] | 73 | 2016-03-09T06:38:47.000Z | 2021-08-20T10:12:10.000Z | color_histogram/results/hist_2d.py | waragai-katsunori/ColorHistogram | f57c1115a94aa72387a6e40aef88b1861eb470ab | [
"MIT"
] | 3 | 2017-10-31T14:11:54.000Z | 2021-04-06T15:51:37.000Z | color_histogram/results/hist_2d.py | waragai-katsunori/ColorHistogram | f57c1115a94aa72387a6e40aef88b1861eb470ab | [
"MIT"
] | 32 | 2016-10-22T06:17:44.000Z | 2021-10-30T16:20:11.000Z |
# -*- coding: utf-8 -*-
# # @package color_histogram.results.hist_2d
#
# Compute 2D color histogram result.
# @author tody
# @date 2015/08/28
import os
import numpy as np
import matplotlib.pyplot as plt
from color_histogram.io_util.image import loadRGB
from color_histogram.cv.image import rgb, to32F
from color_histogram.datasets.datasets import dataFile
from color_histogram.results.results import resultFile, batchResults
from color_histogram.plot.window import showMaximize
from color_histogram.core.hist_2d import Hist2D
from color_histogram.util.timer import timing_func
# # Plot 2D color histograms for the target image, color space, channels.
@timing_func
def plotHistogram2D(image, num_bins, color_space, channels, ax):
font_size = 15
plt.title("%s (%s, %s): %s bins" % (color_space,
color_space[channels[0]],
color_space[channels[1]],
num_bins), fontsize=font_size)
hist2D = Hist2D(image, num_bins=num_bins, color_space=color_space, channels=channels)
hist2D.plot(ax)
# # Create histogram 2D result function.
def histogram2DResultFunc(num_bins=32):
def func(image_file):
histogram2DResult(image_file, num_bins)
return func
# # Compute histogram 2D result for the image file.
def histogram2DResult(image_file, num_bins=32, image=None, tile=None):
image_name = os.path.basename(image_file)
if image is None:
image_name = os.path.basename(image_file)
image_name = os.path.splitext(image_name)[0]
image = loadRGB(image_file)
if tile is None:
tile = image
fig_w = 10
fig_h = 6
fig = plt.figure(figsize=(fig_w, fig_h))
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.95, wspace=0.3, hspace=0.2)
font_size = 15
fig.suptitle("Hisotogram 2D", fontsize=font_size)
h, w = image.shape[:2]
fig.add_subplot(231)
plt.title("Original Image: %s x %s" % (w, h), fontsize=font_size)
plt.imshow(tile)
plt.axis('off')
color_space = "hsv"
channels_list = [[0, 1], [0, 2], [1, 2]]
plot_id = 234
for channels in channels_list:
ax = fig.add_subplot(plot_id)
plotHistogram2D(image, num_bins, color_space, channels, ax)
plot_id += 1
result_name = image_name + "_hist2D"
result_file = resultFile(result_name)
plt.savefig(result_file, transparent=True)
# # Compute histogram 2D results for the given data names, ids.
def histogram2DResults(data_names, data_ids, num_bins=32):
batchResults(data_names, data_ids, histogram2DResultFunc(num_bins), "Histogram 2D")
if __name__ == '__main__':
data_names = ["flower"]
data_ids = [0, 1, 2]
histogram2DResults(data_names, data_ids)
| 30.554348 | 90 | 0.677695 | 0 | 0 | 0 | 0 | 464 | 0.165066 | 0 | 0 | 491 | 0.174671 |
a84ba5a5805b33c3b1ad0afc0fc822a27f2c5d05 | 2,926 | py | Python | tests/test_python.py | sfeltman/libsmf | a70eb477732d2e8584af59389b4909541cf5bc98 | [
"BSD-2-Clause"
] | 1 | 2019-04-15T01:37:55.000Z | 2019-04-15T01:37:55.000Z | tests/test_python.py | sfeltman/libsmf | a70eb477732d2e8584af59389b4909541cf5bc98 | [
"BSD-2-Clause"
] | null | null | null | tests/test_python.py | sfeltman/libsmf | a70eb477732d2e8584af59389b4909541cf5bc98 | [
"BSD-2-Clause"
] | null | null | null | import os
import unittest
import tempfile
from gi.repository import Smf
class Test(unittest.TestCase):
def setUp(self):
self.path = os.path.dirname(__file__)
def compare_smf_files(self, a, b):
self.assertEqual(a.format, b.format)
self.assertEqual(a.ppqn, b.ppqn)
self.assertEqual(a.frames_per_second, b.frames_per_second)
self.assertEqual(a.resolution, b.resolution)
self.assertEqual(a.number_of_tracks, b.number_of_tracks)
self.assertEqual(len(a.tracks_array), len(b.tracks_array))
for i in range(a.number_of_tracks):
tracka = a.tracks_array[i]
trackb = b.tracks_array[i]
self.assertEqual(tracka.smf, a)
self.assertEqual(trackb.smf, b)
self.assertEqual(tracka.track_number, trackb.track_number)
self.assertEqual(tracka.number_of_events, trackb.number_of_events)
self.assertEqual(tracka.file_buffer_length, trackb.file_buffer_length)
self.assertEqual(tracka.last_status, trackb.last_status)
self.assertEqual(tracka.next_event_offset, trackb.next_event_offset)
#self.assertEqual(tracka.next_event_number, trackb.next_event_number)
#self.assertEqual(tracka.time_of_next_event, trackb.time_of_next_event)
tracka_events = tracka.events_array
trackb_events = trackb.events_array
for j in range(tracka.number_of_events):
eventa = tracka_events[j]
eventb = trackb_events[j]
self.assertEqual(tracka, eventa.track)
self.assertEqual(trackb, eventb.track)
self.assertEqual(eventa.event_number, eventb.event_number)
self.assertEqual(eventa.delta_time_pulses, eventb.delta_time_pulses)
self.assertEqual(eventa.time_pulses, eventb.time_pulses)
self.assertEqual(eventa.time_seconds, eventb.time_seconds)
self.assertEqual(eventa.track_number, eventb.track_number)
self.assertEqual(eventa.midi_buffer_length, eventb.midi_buffer_length)
self.assertEqual(eventa.get_buffer(), eventb.get_buffer())
@unittest.expectedFailure
def test_tempo_ref_counts(self):
bach = Smf.File.load(os.path.join(self.path, 'chpn_op53.mid'))
tempo = bach.get_last_tempo()
#self.assertEqual(tempo.ref_count, 2)
bach.remove_tempo(tempo)
self.assertEqual(tempo.ref_count, 1)
def test_file_ref_count(self):
pass
def test_bach_read_write_read_compare(self):
orig = Smf.File.load(os.path.join(self.path, 'chpn_op53.mid'))
handle, temp_filename = tempfile.mkstemp('mid')
os.close(handle)
orig.save(temp_filename)
new = Smf.File.load(temp_filename)
self.compare_smf_files(orig, new)
if __name__ == '__main__':
unittest.main()
| 38 | 86 | 0.669515 | 2,800 | 0.956938 | 0 | 0 | 295 | 0.10082 | 0 | 0 | 222 | 0.075871 |