hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9a65c9c39bb1b0664afb6f88c492d94f51b41c5
| 3,981
|
py
|
Python
|
scikitplot/tests/test_clustering.py
|
leozhoujf/scikit-plot
|
2dd3e6a76df77edcbd724c4db25575f70abb57cb
|
[
"MIT"
] | 2,360
|
2017-02-12T01:43:09.000Z
|
2022-03-31T10:06:31.000Z
|
scikitplot/tests/test_clustering.py
|
leozhoujf/scikit-plot
|
2dd3e6a76df77edcbd724c4db25575f70abb57cb
|
[
"MIT"
] | 79
|
2017-02-12T21:42:08.000Z
|
2022-02-28T03:00:44.000Z
|
scikitplot/tests/test_clustering.py
|
leozhoujf/scikit-plot
|
2dd3e6a76df77edcbd724c4db25575f70abb57cb
|
[
"MIT"
] | 302
|
2017-02-17T19:36:33.000Z
|
2022-01-28T16:22:06.000Z
|
from __future__ import absolute_import
import unittest
import scikitplot
import warnings
import numpy as np
from sklearn.datasets import load_iris as load_data
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
class TestClassifierFactory(unittest.TestCase):
def setUp(self):
class Clusterer:
def __init__(self):
pass
def fit(self):
pass
def fit_predict(self):
pass
class NotClusterer:
def __init__(self):
pass
self.Clusterer = Clusterer
self.NotClusterer = NotClusterer
def test_instance_validation(self):
clf = self.Clusterer()
scikitplot.clustering_factory(clf)
not_clf = self.NotClusterer()
self.assertRaises(TypeError, scikitplot.clustering_factory, not_clf)
def test_method_insertion(self):
clf = self.Clusterer()
scikitplot.clustering_factory(clf)
assert hasattr(clf, 'plot_silhouette')
assert hasattr(clf, 'plot_elbow_curve')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
scikitplot.clustering_factory(clf)
assert len(w) >= 2
for warning in w[1:]:
assert issubclass(warning.category, UserWarning)
assert ' method already in clf. ' \
'Overriding anyway. This may ' \
'result in unintended behavior.' in str(warning.message)
class TestPlotSilhouette(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.X, self.y = load_data(return_X_y=True)
p = np.random.permutation(len(self.X))
self.X, self.y = self.X[p], self.y[p]
def tearDown(self):
plt.close("all")
def test_copy(self):
np.random.seed(0)
clf = KMeans()
scikitplot.clustering_factory(clf)
ax = clf.plot_silhouette(self.X)
assert not hasattr(clf, "cluster_centers_")
ax = clf.plot_silhouette(self.X, copy=False)
assert hasattr(clf, "cluster_centers_")
def test_cmap(self):
np.random.seed(0)
clf = KMeans()
scikitplot.clustering_factory(clf)
ax = clf.plot_silhouette(self.X, cmap='Spectral')
ax = clf.plot_silhouette(self.X, cmap=plt.cm.Spectral)
def test_ax(self):
np.random.seed(0)
clf = KMeans()
scikitplot.clustering_factory(clf)
fig, ax = plt.subplots(1, 1)
out_ax = clf.plot_silhouette(self.X)
assert ax is not out_ax
out_ax = clf.plot_silhouette(self.X, ax=ax)
assert ax is out_ax
class TestPlotElbow(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.X, self.y = load_data(return_X_y=True)
p = np.random.permutation(len(self.X))
self.X, self.y = self.X[p], self.y[p]
def tearDown(self):
plt.close("all")
def test_n_clusters_in_clf(self):
np.random.seed(0)
class DummyClusterer:
def __init__(self):
pass
def fit(self):
pass
def fit_predict(self):
pass
clf = DummyClusterer()
scikitplot.clustering_factory(clf)
self.assertRaises(TypeError, clf.plot_elbow_curve, self.X)
def test_cluster_ranges(self):
np.random.seed(0)
clf = KMeans()
scikitplot.clustering_factory(clf)
ax = clf.plot_elbow_curve(self.X, cluster_ranges=range(1, 10))
ax = clf.plot_elbow_curve(self.X)
def test_ax(self):
np.random.seed(0)
clf = KMeans()
scikitplot.clustering_factory(clf)
fig, ax = plt.subplots(1, 1)
out_ax = clf.plot_elbow_curve(self.X)
assert ax is not out_ax
out_ax = clf.plot_elbow_curve(self.X, ax=ax)
assert ax is out_ax
if __name__ == '__main__':
unittest.main()
| 28.234043
| 79
| 0.60412
|
48aa586eb94948416c769721ea3d9bc286163e95
| 4,994
|
py
|
Python
|
PROJ/LEVY/Credit_Default_Swaps/Script_Credit_Default_Swap.py
|
mattslezak-shell/PROJ_Option_Pricing_Matlab
|
6105bd00ba3471802180c122fdf81e90833a91c4
|
[
"MIT"
] | null | null | null |
PROJ/LEVY/Credit_Default_Swaps/Script_Credit_Default_Swap.py
|
mattslezak-shell/PROJ_Option_Pricing_Matlab
|
6105bd00ba3471802180c122fdf81e90833a91c4
|
[
"MIT"
] | null | null | null |
PROJ/LEVY/Credit_Default_Swaps/Script_Credit_Default_Swap.py
|
mattslezak-shell/PROJ_Option_Pricing_Matlab
|
6105bd00ba3471802180c122fdf81e90833a91c4
|
[
"MIT"
] | 1
|
2022-01-07T15:31:45.000Z
|
2022-01-07T15:31:45.000Z
|
# Generated with SMOP 0.41-beta
try:
from smop.libsmop import *
except ImportError:
raise ImportError('File compiled with `smop3`, please install `smop3` to run it.') from None
# Script_Credit_Default_Swap.m
##################################################################
### CREDIT DEFAULT SWAP / DEFAULT PROBABILITY CALCULATOR
##################################################################
# Descritpion: Script to Calc Fair Spread of Credit Default Swaps (and default probabilities) in Levy Models
# using the PROJ method
# Author: Justin Kirkby
# References: (1) Efficient Option Pricing By Frame Duality with The Fast
# Fourier Transform, SIAM J. Financial Math., 2015
# (2) American and exotic option pricing with jump diffusions and other Levy Processes,
# J. Compuational Finance, 2018
##################################################################
folder,name,ext=fileparts(which(mfilename('fullpath')),nargout=3)
# Script_Credit_Default_Swap.m:12
cd(folder)
addpath('../RN_CHF')
addpath('../Helper_Functions')
##############################################
### Step 1) CHOOSE CONTRACT/GENERAL PARAMETERS
##############################################
# For details on CDS model, see reference (2) above
r=0.04
# Script_Credit_Default_Swap.m:21
T=1
# Script_Credit_Default_Swap.m:22
M=52
# Script_Credit_Default_Swap.m:23
R=0.4
# Script_Credit_Default_Swap.m:24
L=0.4
# Script_Credit_Default_Swap.m:25
##############################################
### Step 2) CHOOSE MODEL PARAMETERS
##############################################
model=1
# Script_Credit_Default_Swap.m:30
params=cellarray([])
# Script_Credit_Default_Swap.m:31
if model == 1:
params.sigmaBSM = copy(0.2)
# Script_Credit_Default_Swap.m:34
else:
if model == 2:
params.C = copy(0.02)
# Script_Credit_Default_Swap.m:37
params.G = copy(5)
# Script_Credit_Default_Swap.m:38
params.MM = copy(15)
# Script_Credit_Default_Swap.m:39
params.Y = copy(1.2)
# Script_Credit_Default_Swap.m:40
else:
if model == 3:
params.alpha = copy(15)
# Script_Credit_Default_Swap.m:43
params.beta = copy(- 5)
# Script_Credit_Default_Swap.m:44
params.delta = copy(0.5)
# Script_Credit_Default_Swap.m:45
else:
if model == 4:
params.sigma = copy(0.12)
# Script_Credit_Default_Swap.m:48
params.lam = copy(0.4)
# Script_Credit_Default_Swap.m:49
params.muj = copy(- 0.12)
# Script_Credit_Default_Swap.m:50
params.sigmaj = copy(0.18)
# Script_Credit_Default_Swap.m:51
else:
if model == 5:
params.sigma = copy(0.15)
# Script_Credit_Default_Swap.m:54
params.lam = copy(3)
# Script_Credit_Default_Swap.m:55
params.p_up = copy(0.2)
# Script_Credit_Default_Swap.m:56
params.eta1 = copy(25)
# Script_Credit_Default_Swap.m:57
params.eta2 = copy(10)
# Script_Credit_Default_Swap.m:58
##############################################
### Step 3) CHOOSE PROJ PARAMETERS
##############################################
UseCumulant=1
# Script_Credit_Default_Swap.m:65
mult=2
# Script_Credit_Default_Swap.m:66
#---------------------
# APPROACH 1: Cumulant Based approach for grid width
# (see "Robust Option Pricing with Characteritics Functions and the BSpline Order of Density Projection")
#---------------------
if UseCumulant == 1:
logN=14
# Script_Credit_Default_Swap.m:73
L1=12
# Script_Credit_Default_Swap.m:74
#---------------------
# APPROACH 2: Manual GridWidth approach
#---------------------
else:
P=10
# Script_Credit_Default_Swap.m:79
Pbar=3
# Script_Credit_Default_Swap.m:80
##############################################
### PRICE
##############################################
### Note: rnCHF is the risk netural CHF, c1,c2,c4 are the cumulants
modelInput=getModelInput(model,T / M,r,0,params)
# Script_Credit_Default_Swap.m:87
if UseCumulant == 1:
alpha=getTruncationAlpha(T,L1,modelInput,model)
# Script_Credit_Default_Swap.m:90
else:
logN=P + Pbar
# Script_Credit_Default_Swap.m:92
alpha=2 ** Pbar / 2
# Script_Credit_Default_Swap.m:93
N=2 ** logN
# Script_Credit_Default_Swap.m:95
tic
prob,spread=PROJ_CDS(R,L,M,T,r,N,alpha,mult,modelInput.rnCHF,nargout=2)
# Script_Credit_Default_Swap.m:98
toc
fprintf('Default Prob: %.8f \n',prob)
fprintf('CDS Spread: %.8f \n',spread)
| 34.441379
| 109
| 0.536444
|
903c70142d3a0859ba4ad1d0da17c99ad9affdd4
| 1,302
|
py
|
Python
|
tests/test_time.py
|
User3574/hyperqueue
|
d4dea5a805925cb624eb81da65840d5a8226d4a9
|
[
"MIT"
] | null | null | null |
tests/test_time.py
|
User3574/hyperqueue
|
d4dea5a805925cb624eb81da65840d5a8226d4a9
|
[
"MIT"
] | 21
|
2021-03-31T16:00:39.000Z
|
2021-05-06T08:41:26.000Z
|
tests/test_time.py
|
User3574/hyperqueue
|
d4dea5a805925cb624eb81da65840d5a8226d4a9
|
[
"MIT"
] | 1
|
2021-05-04T07:40:58.000Z
|
2021-05-04T07:40:58.000Z
|
import time
from .conftest import HqEnv
from .utils import wait_for_job_state
def test_job_time_request1(hq_env: HqEnv):
# Tests that tasks are send only to worker3 and worker 4 (because of time requests)
hq_env.start_server()
hq_env.start_worker(args=["--time-limit", "2s"])
hq_env.start_worker(args=["--time-limit", "4s"])
hq_env.start_worker(args=["--time-limit", "10s"])
hq_env.start_worker()
hq_env.command(["submit", "--array=1-20", "--time-request=5s", "--", "ls"])
wait_for_job_state(hq_env, 1, "FINISHED")
table = hq_env.command(["job", "1"], as_table=True)
assert {"worker3", "worker4"} == set(table.get_row_value("Workers").split(", "))
def test_job_time_request2(hq_env: HqEnv):
# Test that a tasks with time request is not sent to worker without remaining lifetime
hq_env.start_server()
hq_env.start_worker(args=["--time-limit", "4s"])
hq_env.command(["submit", "--time-request=2s", "--", "ls"])
time.sleep(2.2)
hq_env.command(["submit", "--time-request=2s", "--", "ls"])
time.sleep(1.0)
# hq_env.start_worker(args=["--time-limit", "5s"])
wait_for_job_state(hq_env, 1, "FINISHED")
table = hq_env.command(["jobs"], as_table=True)
assert table[1][2] == "FINISHED"
assert table[2][2] == "WAITING"
| 37.2
| 90
| 0.655146
|
5e848cefa51531281aca4628861c0876202cb02e
| 1,312
|
py
|
Python
|
tests/test_client.py
|
symvo/udsi
|
715a3bed866af628fa217504e2fd41377555f0f2
|
[
"MIT"
] | 7
|
2019-07-08T01:05:51.000Z
|
2019-07-09T11:57:51.000Z
|
tests/test_client.py
|
symvo/udsi
|
715a3bed866af628fa217504e2fd41377555f0f2
|
[
"MIT"
] | 1
|
2019-07-07T03:12:48.000Z
|
2019-07-07T04:12:38.000Z
|
tests/test_client.py
|
rlygud/udsi
|
715a3bed866af628fa217504e2fd41377555f0f2
|
[
"MIT"
] | 1
|
2019-03-02T03:58:24.000Z
|
2019-03-02T03:58:24.000Z
|
"""
tests.test_client
~~~~~~~~~~~~~~~~~
This module implements the unit tests for the `client` module.
"""
import json
from tests.utils import async_test, make_client, make_file, cleanup
class TestClient:
""" Test class for the `client` module. """
@async_test
async def test_init(self):
client = make_client()
assert "aperio-root-folder" in json.dumps(client.root)
@async_test
async def test_upload(self):
client = make_client()
file, sheet = await make_file(client)
assert type(sheet) is dict
assert "temp" in json.dumps(sheet)
await cleanup(client, sheet.get("spreadsheetId"))
@async_test
async def test_get(self):
client = make_client()
file, sheet = await make_file(client)
id = sheet.get("spreadsheetId")
sheet, data = await client.get(id)
assert type(sheet) is dict
assert "temp" in json.dumps(sheet)
await cleanup(client, sheet.get("spreadsheetId"))
@async_test
async def test_list(self):
client = make_client()
file, sheet = await make_file(client)
files = await client.list()
assert type(files) is list
assert "temp" in json.dumps(files)
await cleanup(client, sheet.get("spreadsheetId"))
| 23.428571
| 67
| 0.628049
|
d6ecbf212234d97a9d5afe1102eabbc8e2ee2aff
| 468
|
py
|
Python
|
stubs/micropython-v1_10-esp8266/zlib.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_10-esp8266/zlib.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_10-esp8266/zlib.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
"""
Module: 'zlib' on esp8266 v1.10
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.2.0-dev(9422289)', version='v1.10-8-g8b7039d7d on 2019-01-26', machine='ESP module with ESP8266')
# Stubber: 1.1.0 - updated
from typing import Any
class DecompIO:
""""""
def read(self, *args) -> Any:
pass
def readinto(self, *args) -> Any:
pass
def readline(self, *args) -> Any:
pass
def decompress(*args) -> Any:
pass
| 19.5
| 155
| 0.600427
|
b76bb81952192c1adb7cdb422d3d29ad5cc0c17c
| 6,795
|
py
|
Python
|
pandapower/test/topology/test_graph_searches.py
|
mathildebadoual/pandapower
|
9ba4bcb78e84b644d2ba6df0c08e285c54af8ddc
|
[
"BSD-3-Clause"
] | 1
|
2020-10-19T06:39:15.000Z
|
2020-10-19T06:39:15.000Z
|
pandapower/test/topology/test_graph_searches.py
|
miek770/pandapower
|
de004efc1b7432a633792af4f551f7635a02db47
|
[
"BSD-3-Clause"
] | null | null | null |
pandapower/test/topology/test_graph_searches.py
|
miek770/pandapower
|
de004efc1b7432a633792af4f551f7635a02db47
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pandapower as pp
import pytest
import pandapower.topology as top
@pytest.fixture
def feeder_network():
net = pp.create_empty_network()
current_bus = pp.create_bus(net, vn_kv=20.)
pp.create_ext_grid(net, current_bus)
for length in [12, 6, 8]:
new_bus = pp.create_bus(net, vn_kv=20.)
pp.create_line(net, current_bus, new_bus, length_km=length,
std_type="NA2XS2Y 1x185 RM/25 12/20 kV")
current_bus = new_bus
pp.create_line(net, current_bus, 0, length_km=5, std_type="NA2XS2Y 1x185 RM/25 12/20 kV")
return net
def test_determine_stubs(feeder_network):
net = feeder_network
sec_bus = pp.create_bus(net, vn_kv=20.)
sec_line = pp.create_line(net, 3, sec_bus, length_km=3, std_type="NA2XS2Y 1x185 RM/25 12/20 kV")
top.determine_stubs(net)
assert not np.any(net.bus.on_stub.loc[set(net.bus.index) - {sec_bus}].values)
assert not np.any(net.line.is_stub.loc[set(net.line.index) - {sec_line}].values)
assert net.bus.on_stub.at[sec_bus]
assert net.line.is_stub.at[sec_line]
def test_distance(feeder_network):
net = feeder_network
dist = top.calc_distance_to_bus(net, 0)
assert np.allclose(dist.sort_index().values, [0, 12, 13, 5])
dist = top.calc_distance_to_bus(net, 0, notravbuses={3})
assert np.allclose(dist.sort_index().values, [0, 12, 18, 5])
pp.create_switch(net, bus=3, element=2, et="l", closed=False)
dist = top.calc_distance_to_bus(net, 0)
assert np.allclose(dist.sort_index().values, [0, 12, 18, 5])
def test_unsupplied_buses_with_in_service():
# IS ext_grid --- open switch --- OOS bus --- open switch --- IS bus
net = pp.create_empty_network()
bus_sl = pp.create_bus(net, 0.4)
pp.create_ext_grid(net, bus_sl)
bus0 = pp.create_bus(net, 0.4, in_service=False)
pp.create_switch(net, bus_sl, bus0, 'b', False)
bus1 = pp.create_bus(net, 0.4, in_service=True)
pp.create_switch(net, bus0, bus1, 'b', False)
ub = top.unsupplied_buses(net)
assert ub == {2}
# OOS ext_grid --- closed switch --- IS bus
net = pp.create_empty_network()
bus_sl = pp.create_bus(net, 0.4)
pp.create_ext_grid(net, bus_sl, in_service=False)
bus0 = pp.create_bus(net, 0.4, in_service=True)
pp.create_switch(net, bus_sl, bus0, 'b', True)
ub = top.unsupplied_buses(net)
assert ub == {0, 1}
def test_unsupplied_buses_with_switches():
net = pp.create_empty_network()
pp.create_buses(net, 8, 20)
pp.create_buses(net, 5, 0.4)
pp.create_ext_grid(net, 0)
pp.create_line(net, 0, 1, 1.2, "NA2XS2Y 1x185 RM/25 12/20 kV")
pp.create_switch(net, 0, 0, "l", closed=True)
pp.create_switch(net, 1, 0, "l", closed=False)
pp.create_line(net, 0, 2, 1.2, "NA2XS2Y 1x185 RM/25 12/20 kV")
pp.create_switch(net, 0, 1, "l", closed=False)
pp.create_switch(net, 2, 1, "l", closed=True)
pp.create_line(net, 0, 3, 1.2, "NA2XS2Y 1x185 RM/25 12/20 kV")
pp.create_switch(net, 0, 2, "l", closed=False)
pp.create_switch(net, 3, 2, "l", closed=False)
pp.create_line(net, 0, 4, 1.2, "NA2XS2Y 1x185 RM/25 12/20 kV")
pp.create_switch(net, 0, 3, "l", closed=True)
pp.create_switch(net, 4, 3, "l", closed=True)
pp.create_line(net, 0, 5, 1.2, "NA2XS2Y 1x185 RM/25 12/20 kV")
pp.create_switch(net, 0, 6, "b", closed=True)
pp.create_switch(net, 0, 7, "b", closed=False)
pp.create_transformer(net, 0, 8, "0.63 MVA 20/0.4 kV")
pp.create_switch(net, 0, 0, "t", closed=True)
pp.create_switch(net, 8, 0, "t", closed=False)
pp.create_transformer(net, 0, 9, "0.63 MVA 20/0.4 kV")
pp.create_switch(net, 0, 1, "t", closed=False)
pp.create_switch(net, 9, 1, "t", closed=True)
pp.create_transformer(net, 0, 10, "0.63 MVA 20/0.4 kV")
pp.create_switch(net, 0, 2, "t", closed=False)
pp.create_switch(net, 10, 2, "t", closed=False)
pp.create_transformer(net, 0, 11, "0.63 MVA 20/0.4 kV")
pp.create_switch(net, 0, 3, "t", closed=True)
pp.create_switch(net, 11, 3, "t", closed=True)
pp.create_transformer(net, 0, 12, "0.63 MVA 20/0.4 kV")
pp.create_buses(net, 2, 20)
pp.create_impedance(net, 0, 13, 1, 1, 10)
pp.create_impedance(net, 0, 14, 1, 1, 10, in_service=False)
ub = top.unsupplied_buses(net)
assert ub == {1, 2, 3, 7, 8, 9, 10, 14}
ub = top.unsupplied_buses(net, respect_switches=False)
assert ub == {14}
def test_graph_characteristics(feeder_network):
# adapt network
net = feeder_network
bus0 = pp.create_bus(net, vn_kv=20.0)
bus1 = pp.create_bus(net, vn_kv=20.0)
bus2 = pp.create_bus(net, vn_kv=20.0)
bus3 = pp.create_bus(net, vn_kv=20.0)
bus4 = pp.create_bus(net, vn_kv=20.0)
bus5 = pp.create_bus(net, vn_kv=20.0)
bus6 = pp.create_bus(net, vn_kv=20.0)
bus7 = pp.create_bus(net, vn_kv=20.0)
bus8 = pp.create_bus(net, vn_kv=20.0)
bus9 = pp.create_bus(net, vn_kv=20.0)
new_connections = [(3, bus0), (bus0, bus1), (bus0, bus2), (1, bus3), (2, bus4), (bus3, bus4),
(bus4, bus5), (bus4, bus6), (bus5, bus6), (2, bus7), (bus7, bus8),
(bus8, bus9), (bus9, bus7)]
for fb, tb in new_connections:
pp.create_line(net, fb, tb, length_km=1.0, std_type="NA2XS2Y 1x185 RM/25 12/20 kV")
# get characteristics
mg = top.create_nxgraph(net, respect_switches=False)
characteristics = ["bridges", "articulation_points", "connected", "stub_buses",
"required_bridges", "notn1_areas"]
char_dict = top.find_graph_characteristics(mg, net.ext_grid.bus, characteristics)
bridges = char_dict["bridges"]
articulation_points = char_dict["articulation_points"]
connected = char_dict["connected"]
stub_buses = char_dict["stub_buses"]
required_bridges = char_dict["required_bridges"]
notn1_areas = char_dict["notn1_areas"]
assert bridges == {(3, 4), (4, 5), (4, 6), (2, 11)}
assert articulation_points == {8, 3, 4, 2, 11}
assert connected == {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}
assert stub_buses == {4, 5, 6, 11, 12, 13}
assert required_bridges == {4: [(3, 4)], 5: [(3, 4), (4, 5)], 6: [(3, 4), (4, 6)], 11: [(2, 11)],
12: [(2, 11)], 13: [(2, 11)]}
assert notn1_areas == {8: {9, 10}, 3: {4, 5, 6}, 2: {11, 12, 13}}
if __name__ == '__main__':
pass
# pytest.main(["test_graph_searches.py"])
| 40.446429
| 102
| 0.62443
|
36f9027be726a5817af930bcce359eae07b4a862
| 1,010
|
py
|
Python
|
dynamicArray.py
|
eagletusk/pythonPractice
|
4ac8d8c2f45072407b8f25514a3e54bddcd38a33
|
[
"MIT"
] | null | null | null |
dynamicArray.py
|
eagletusk/pythonPractice
|
4ac8d8c2f45072407b8f25514a3e54bddcd38a33
|
[
"MIT"
] | null | null | null |
dynamicArray.py
|
eagletusk/pythonPractice
|
4ac8d8c2f45072407b8f25514a3e54bddcd38a33
|
[
"MIT"
] | null | null | null |
import ctypes
import sys
class DynamicArray(object):
def __init__(self):
self.n = 0
self.capacity = 1
self.A = self.make_array(self.capacity)
def __len__(self):
return self.n
def __getitem__(self,k):
if not 0 <= k < self.n:
return IndexError('this is an index error')
return self.A[k]
def append(self,ele):
if self.n == self.capacity:
self._resize(2*self.capacity) #2x if capacity isn't enough
self.A[self.n] = ele
self.n +=1
def _resize(self,new_cap):
B = self.make_array(new_cap)
for k in range(self.n):
B[k] = self.A[k]
self.A = B
self.capacity = new_cap
print(sys.getsizeof(self.A), self.capacity,self.n)
def make_array(self,new_cap):
# make raw array
return (new_cap * ctypes.py_object)()
arr = DynamicArray()
arr.append(3)
arr.append(3)
arr.append(3)
arr.append(3)
arr.append(3)
arr.append(3)
arr.append(3)
arr.append(3)
print(arr.capacity)
print(sys.getsizeof(arr))
| 16.031746
| 64
| 0.630693
|
43dd52b92b3c64de56c0f3364f7b0d95ae9b79b9
| 46,710
|
py
|
Python
|
ZER0/022.py
|
applevinc/Project-Euler
|
2755a00df43e65b99b964587e29621cc8ebcc50a
|
[
"MIT"
] | null | null | null |
ZER0/022.py
|
applevinc/Project-Euler
|
2755a00df43e65b99b964587e29621cc8ebcc50a
|
[
"MIT"
] | null | null | null |
ZER0/022.py
|
applevinc/Project-Euler
|
2755a00df43e65b99b964587e29621cc8ebcc50a
|
[
"MIT"
] | 1
|
2021-03-17T07:19:55.000Z
|
2021-03-17T07:19:55.000Z
|
names = ["MARY","PATRICIA","LINDA","BARBARA","ELIZABETH","JENNIFER","MARIA","SUSAN","MARGARET","DOROTHY","LISA","NANCY","KAREN","BETTY","HELEN","SANDRA","DONNA","CAROL","RUTH","SHARON","MICHELLE","LAURA","SARAH","KIMBERLY","DEBORAH","JESSICA","SHIRLEY","CYNTHIA","ANGELA","MELISSA","BRENDA","AMY","ANNA","REBECCA","VIRGINIA","KATHLEEN","PAMELA","MARTHA","DEBRA","AMANDA","STEPHANIE","CAROLYN","CHRISTINE","MARIE","JANET","CATHERINE","FRANCES","ANN","JOYCE","DIANE","ALICE","JULIE","HEATHER","TERESA","DORIS","GLORIA","EVELYN","JEAN","CHERYL","MILDRED","KATHERINE","JOAN","ASHLEY","JUDITH","ROSE","JANICE","KELLY","NICOLE","JUDY","CHRISTINA","KATHY","THERESA","BEVERLY","DENISE","TAMMY","IRENE","JANE","LORI","RACHEL","MARILYN","ANDREA","KATHRYN","LOUISE","SARA","ANNE","JACQUELINE","WANDA","BONNIE","JULIA","RUBY","LOIS","TINA","PHYLLIS","NORMA","PAULA","DIANA","ANNIE","LILLIAN","EMILY","ROBIN","PEGGY","CRYSTAL","GLADYS","RITA","DAWN","CONNIE","FLORENCE","TRACY","EDNA","TIFFANY","CARMEN","ROSA","CINDY","GRACE","WENDY","VICTORIA","EDITH","KIM","SHERRY","SYLVIA","JOSEPHINE","THELMA","SHANNON","SHEILA","ETHEL","ELLEN","ELAINE","MARJORIE","CARRIE","CHARLOTTE","MONICA","ESTHER","PAULINE","EMMA","JUANITA","ANITA","RHONDA","HAZEL","AMBER","EVA","DEBBIE","APRIL","LESLIE","CLARA","LUCILLE","JAMIE","JOANNE","ELEANOR","VALERIE","DANIELLE","MEGAN","ALICIA","SUZANNE","MICHELE","GAIL","BERTHA","DARLENE","VERONICA","JILL","ERIN","GERALDINE","LAUREN","CATHY","JOANN","LORRAINE","LYNN","SALLY","REGINA","ERICA","BEATRICE","DOLORES","BERNICE","AUDREY","YVONNE","ANNETTE","JUNE","SAMANTHA","MARION","DANA","STACY","ANA","RENEE","IDA","VIVIAN","ROBERTA","HOLLY","BRITTANY","MELANIE","LORETTA","YOLANDA","JEANETTE","LAURIE","KATIE","KRISTEN","VANESSA","ALMA","SUE","ELSIE","BETH","JEANNE","VICKI","CARLA","TARA","ROSEMARY","EILEEN","TERRI","GERTRUDE","LUCY","TONYA","ELLA","STACEY","WILMA","GINA","KRISTIN","JESSIE","NATALIE","AGNES","VERA","WILLIE","CHARLENE","BESSIE","DELORES","MELINDA","PEARL","ARLENE","MAUREEN","COLLEEN","ALLISON","TAMARA","JOY","GEORGIA","CONSTANCE","LILLIE","CLAUDIA","JACKIE","MARCIA","TANYA","NELLIE","MINNIE","MARLENE","HEIDI","GLENDA","LYDIA","VIOLA","COURTNEY","MARIAN","STELLA","CAROLINE","DORA","JO","VICKIE","MATTIE","TERRY","MAXINE","IRMA","MABEL","MARSHA","MYRTLE","LENA","CHRISTY","DEANNA","PATSY","HILDA","GWENDOLYN","JENNIE","NORA","MARGIE","NINA","CASSANDRA","LEAH","PENNY","KAY","PRISCILLA","NAOMI","CAROLE","BRANDY","OLGA","BILLIE","DIANNE","TRACEY","LEONA","JENNY","FELICIA","SONIA","MIRIAM","VELMA","BECKY","BOBBIE","VIOLET","KRISTINA","TONI","MISTY","MAE","SHELLY","DAISY","RAMONA","SHERRI","ERIKA","KATRINA","CLAIRE","LINDSEY","LINDSAY","GENEVA","GUADALUPE","BELINDA","MARGARITA","SHERYL","CORA","FAYE","ADA","NATASHA","SABRINA","ISABEL","MARGUERITE","HATTIE","HARRIET","MOLLY","CECILIA","KRISTI","BRANDI","BLANCHE","SANDY","ROSIE","JOANNA","IRIS","EUNICE","ANGIE","INEZ","LYNDA","MADELINE","AMELIA","ALBERTA","GENEVIEVE","MONIQUE","JODI","JANIE","MAGGIE","KAYLA","SONYA","JAN","LEE","KRISTINE","CANDACE","FANNIE","MARYANN","OPAL","ALISON","YVETTE","MELODY","LUZ","SUSIE","OLIVIA","FLORA","SHELLEY","KRISTY","MAMIE","LULA","LOLA","VERNA","BEULAH","ANTOINETTE","CANDICE","JUANA","JEANNETTE","PAM","KELLI","HANNAH","WHITNEY","BRIDGET","KARLA","CELIA","LATOYA","PATTY","SHELIA","GAYLE","DELLA","VICKY","LYNNE","SHERI","MARIANNE","KARA","JACQUELYN","ERMA","BLANCA","MYRA","LETICIA","PAT","KRISTA","ROXANNE","ANGELICA","JOHNNIE","ROBYN","FRANCIS","ADRIENNE","ROSALIE","ALEXANDRA","BROOKE","BETHANY","SADIE","BERNADETTE","TRACI","JODY","KENDRA","JASMINE","NICHOLE","RACHAEL","CHELSEA","MABLE","ERNESTINE","MURIEL","MARCELLA","ELENA","KRYSTAL","ANGELINA","NADINE","KARI","ESTELLE","DIANNA","PAULETTE","LORA","MONA","DOREEN","ROSEMARIE","ANGEL","DESIREE","ANTONIA","HOPE","GINGER","JANIS","BETSY","CHRISTIE","FREDA","MERCEDES","MEREDITH","LYNETTE","TERI","CRISTINA","EULA","LEIGH","MEGHAN","SOPHIA","ELOISE","ROCHELLE","GRETCHEN","CECELIA","RAQUEL","HENRIETTA","ALYSSA","JANA","KELLEY","GWEN","KERRY","JENNA","TRICIA","LAVERNE","OLIVE","ALEXIS","TASHA","SILVIA","ELVIRA","CASEY","DELIA","SOPHIE","KATE","PATTI","LORENA","KELLIE","SONJA","LILA","LANA","DARLA","MAY","MINDY","ESSIE","MANDY","LORENE","ELSA","JOSEFINA","JEANNIE","MIRANDA","DIXIE","LUCIA","MARTA","FAITH","LELA","JOHANNA","SHARI","CAMILLE","TAMI","SHAWNA","ELISA","EBONY","MELBA","ORA","NETTIE","TABITHA","OLLIE","JAIME","WINIFRED","KRISTIE","MARINA","ALISHA","AIMEE","RENA","MYRNA","MARLA","TAMMIE","LATASHA","BONITA","PATRICE","RONDA","SHERRIE","ADDIE","FRANCINE","DELORIS","STACIE","ADRIANA","CHERI","SHELBY","ABIGAIL","CELESTE","JEWEL","CARA","ADELE","REBEKAH","LUCINDA","DORTHY","CHRIS","EFFIE","TRINA","REBA","SHAWN","SALLIE","AURORA","LENORA","ETTA","LOTTIE","KERRI","TRISHA","NIKKI","ESTELLA","FRANCISCA","JOSIE","TRACIE","MARISSA","KARIN","BRITTNEY","JANELLE","LOURDES","LAUREL","HELENE","FERN","ELVA","CORINNE","KELSEY","INA","BETTIE","ELISABETH","AIDA","CAITLIN","INGRID","IVA","EUGENIA","CHRISTA","GOLDIE","CASSIE","MAUDE","JENIFER","THERESE","FRANKIE","DENA","LORNA","JANETTE","LATONYA","CANDY","MORGAN","CONSUELO","TAMIKA","ROSETTA","DEBORA","CHERIE","POLLY","DINA","JEWELL","FAY","JILLIAN","DOROTHEA","NELL","TRUDY","ESPERANZA","PATRICA","KIMBERLEY","SHANNA","HELENA","CAROLINA","CLEO","STEFANIE","ROSARIO","OLA","JANINE","MOLLIE","LUPE","ALISA","LOU","MARIBEL","SUSANNE","BETTE","SUSANA","ELISE","CECILE","ISABELLE","LESLEY","JOCELYN","PAIGE","JONI","RACHELLE","LEOLA","DAPHNE","ALTA","ESTER","PETRA","GRACIELA","IMOGENE","JOLENE","KEISHA","LACEY","GLENNA","GABRIELA","KERI","URSULA","LIZZIE","KIRSTEN","SHANA","ADELINE","MAYRA","JAYNE","JACLYN","GRACIE","SONDRA","CARMELA","MARISA","ROSALIND","CHARITY","TONIA","BEATRIZ","MARISOL","CLARICE","JEANINE","SHEENA","ANGELINE","FRIEDA","LILY","ROBBIE","SHAUNA","MILLIE","CLAUDETTE","CATHLEEN","ANGELIA","GABRIELLE","AUTUMN","KATHARINE","SUMMER","JODIE","STACI","LEA","CHRISTI","JIMMIE","JUSTINE","ELMA","LUELLA","MARGRET","DOMINIQUE","SOCORRO","RENE","MARTINA","MARGO","MAVIS","CALLIE","BOBBI","MARITZA","LUCILE","LEANNE","JEANNINE","DEANA","AILEEN","LORIE","LADONNA","WILLA","MANUELA","GALE","SELMA","DOLLY","SYBIL","ABBY","LARA","DALE","IVY","DEE","WINNIE","MARCY","LUISA","JERI","MAGDALENA","OFELIA","MEAGAN","AUDRA","MATILDA","LEILA","CORNELIA","BIANCA","SIMONE","BETTYE","RANDI","VIRGIE","LATISHA","BARBRA","GEORGINA","ELIZA","LEANN","BRIDGETTE","RHODA","HALEY","ADELA","NOLA","BERNADINE","FLOSSIE","ILA","GRETA","RUTHIE","NELDA","MINERVA","LILLY","TERRIE","LETHA","HILARY","ESTELA","VALARIE","BRIANNA","ROSALYN","EARLINE","CATALINA","AVA","MIA","CLARISSA","LIDIA","CORRINE","ALEXANDRIA","CONCEPCION","TIA","SHARRON","RAE","DONA","ERICKA","JAMI","ELNORA","CHANDRA","LENORE","NEVA","MARYLOU","MELISA","TABATHA","SERENA","AVIS","ALLIE","SOFIA","JEANIE","ODESSA","NANNIE","HARRIETT","LORAINE","PENELOPE","MILAGROS","EMILIA","BENITA","ALLYSON","ASHLEE","TANIA","TOMMIE","ESMERALDA","KARINA","EVE","PEARLIE","ZELMA","MALINDA","NOREEN","TAMEKA","SAUNDRA","HILLARY","AMIE","ALTHEA","ROSALINDA","JORDAN","LILIA","ALANA","GAY","CLARE","ALEJANDRA","ELINOR","MICHAEL","LORRIE","JERRI","DARCY","EARNESTINE","CARMELLA","TAYLOR","NOEMI","MARCIE","LIZA","ANNABELLE","LOUISA","EARLENE","MALLORY","CARLENE","NITA","SELENA","TANISHA","KATY","JULIANNE","JOHN","LAKISHA","EDWINA","MARICELA","MARGERY","KENYA","DOLLIE","ROXIE","ROSLYN","KATHRINE","NANETTE","CHARMAINE","LAVONNE","ILENE","KRIS","TAMMI","SUZETTE","CORINE","KAYE","JERRY","MERLE","CHRYSTAL","LINA","DEANNE","LILIAN","JULIANA","ALINE","LUANN","KASEY","MARYANNE","EVANGELINE","COLETTE","MELVA","LAWANDA","YESENIA","NADIA","MADGE","KATHIE","EDDIE","OPHELIA","VALERIA","NONA","MITZI","MARI","GEORGETTE","CLAUDINE","FRAN","ALISSA","ROSEANN","LAKEISHA","SUSANNA","REVA","DEIDRE","CHASITY","SHEREE","CARLY","JAMES","ELVIA","ALYCE","DEIRDRE","GENA","BRIANA","ARACELI","KATELYN","ROSANNE","WENDI","TESSA","BERTA","MARVA","IMELDA","MARIETTA","MARCI","LEONOR","ARLINE","SASHA","MADELYN","JANNA","JULIETTE","DEENA","AURELIA","JOSEFA","AUGUSTA","LILIANA","YOUNG","CHRISTIAN","LESSIE","AMALIA","SAVANNAH","ANASTASIA","VILMA","NATALIA","ROSELLA","LYNNETTE","CORINA","ALFREDA","LEANNA","CAREY","AMPARO","COLEEN","TAMRA","AISHA","WILDA","KARYN","CHERRY","QUEEN","MAURA","MAI","EVANGELINA","ROSANNA","HALLIE","ERNA","ENID","MARIANA","LACY","JULIET","JACKLYN","FREIDA","MADELEINE","MARA","HESTER","CATHRYN","LELIA","CASANDRA","BRIDGETT","ANGELITA","JANNIE","DIONNE","ANNMARIE","KATINA","BERYL","PHOEBE","MILLICENT","KATHERYN","DIANN","CARISSA","MARYELLEN","LIZ","LAURI","HELGA","GILDA","ADRIAN","RHEA","MARQUITA","HOLLIE","TISHA","TAMERA","ANGELIQUE","FRANCESCA","BRITNEY","KAITLIN","LOLITA","FLORINE","ROWENA","REYNA","TWILA","FANNY","JANELL","INES","CONCETTA","BERTIE","ALBA","BRIGITTE","ALYSON","VONDA","PANSY","ELBA","NOELLE","LETITIA","KITTY","DEANN","BRANDIE","LOUELLA","LETA","FELECIA","SHARLENE","LESA","BEVERLEY","ROBERT","ISABELLA","HERMINIA","TERRA","CELINA","TORI","OCTAVIA","JADE","DENICE","GERMAINE","SIERRA","MICHELL","CORTNEY","NELLY","DORETHA","SYDNEY","DEIDRA","MONIKA","LASHONDA","JUDI","CHELSEY","ANTIONETTE","MARGOT","BOBBY","ADELAIDE","NAN","LEEANN","ELISHA","DESSIE","LIBBY","KATHI","GAYLA","LATANYA","MINA","MELLISA","KIMBERLEE","JASMIN","RENAE","ZELDA","ELDA","MA","JUSTINA","GUSSIE","EMILIE","CAMILLA","ABBIE","ROCIO","KAITLYN","JESSE","EDYTHE","ASHLEIGH","SELINA","LAKESHA","GERI","ALLENE","PAMALA","MICHAELA","DAYNA","CARYN","ROSALIA","SUN","JACQULINE","REBECA","MARYBETH","KRYSTLE","IOLA","DOTTIE","BENNIE","BELLE","AUBREY","GRISELDA","ERNESTINA","ELIDA","ADRIANNE","DEMETRIA","DELMA","CHONG","JAQUELINE","DESTINY","ARLEEN","VIRGINA","RETHA","FATIMA","TILLIE","ELEANORE","CARI","TREVA","BIRDIE","WILHELMINA","ROSALEE","MAURINE","LATRICE","YONG","JENA","TARYN","ELIA","DEBBY","MAUDIE","JEANNA","DELILAH","CATRINA","SHONDA","HORTENCIA","THEODORA","TERESITA","ROBBIN","DANETTE","MARYJANE","FREDDIE","DELPHINE","BRIANNE","NILDA","DANNA","CINDI","BESS","IONA","HANNA","ARIEL","WINONA","VIDA","ROSITA","MARIANNA","WILLIAM","RACHEAL","GUILLERMINA","ELOISA","CELESTINE","CAREN","MALISSA","LONA","CHANTEL","SHELLIE","MARISELA","LEORA","AGATHA","SOLEDAD","MIGDALIA","IVETTE","CHRISTEN","ATHENA","JANEL","CHLOE","VEDA","PATTIE","TESSIE","TERA","MARILYNN","LUCRETIA","KARRIE","DINAH","DANIELA","ALECIA","ADELINA","VERNICE","SHIELA","PORTIA","MERRY","LASHAWN","DEVON","DARA","TAWANA","OMA","VERDA","CHRISTIN","ALENE","ZELLA","SANDI","RAFAELA","MAYA","KIRA","CANDIDA","ALVINA","SUZAN","SHAYLA","LYN","LETTIE","ALVA","SAMATHA","ORALIA","MATILDE","MADONNA","LARISSA","VESTA","RENITA","INDIA","DELOIS","SHANDA","PHILLIS","LORRI","ERLINDA","CRUZ","CATHRINE","BARB","ZOE","ISABELL","IONE","GISELA","CHARLIE","VALENCIA","ROXANNA","MAYME","KISHA","ELLIE","MELLISSA","DORRIS","DALIA","BELLA","ANNETTA","ZOILA","RETA","REINA","LAURETTA","KYLIE","CHRISTAL","PILAR","CHARLA","ELISSA","TIFFANI","TANA","PAULINA","LEOTA","BREANNA","JAYME","CARMEL","VERNELL","TOMASA","MANDI","DOMINGA","SANTA","MELODIE","LURA","ALEXA","TAMELA","RYAN","MIRNA","KERRIE","VENUS","NOEL","FELICITA","CRISTY","CARMELITA","BERNIECE","ANNEMARIE","TIARA","ROSEANNE","MISSY","CORI","ROXANA","PRICILLA","KRISTAL","JUNG","ELYSE","HAYDEE","ALETHA","BETTINA","MARGE","GILLIAN","FILOMENA","CHARLES","ZENAIDA","HARRIETTE","CARIDAD","VADA","UNA","ARETHA","PEARLINE","MARJORY","MARCELA","FLOR","EVETTE","ELOUISE","ALINA","TRINIDAD","DAVID","DAMARIS","CATHARINE","CARROLL","BELVA","NAKIA","MARLENA","LUANNE","LORINE","KARON","DORENE","DANITA","BRENNA","TATIANA","SAMMIE","LOUANN","LOREN","JULIANNA","ANDRIA","PHILOMENA","LUCILA","LEONORA","DOVIE","ROMONA","MIMI","JACQUELIN","GAYE","TONJA","MISTI","JOE","GENE","CHASTITY","STACIA","ROXANN","MICAELA","NIKITA","MEI","VELDA","MARLYS","JOHNNA","AURA","LAVERN","IVONNE","HAYLEY","NICKI","MAJORIE","HERLINDA","GEORGE","ALPHA","YADIRA","PERLA","GREGORIA","DANIEL","ANTONETTE","SHELLI","MOZELLE","MARIAH","JOELLE","CORDELIA","JOSETTE","CHIQUITA","TRISTA","LOUIS","LAQUITA","GEORGIANA","CANDI","SHANON","LONNIE","HILDEGARD","CECIL","VALENTINA","STEPHANY","MAGDA","KAROL","GERRY","GABRIELLA","TIANA","ROMA","RICHELLE","RAY","PRINCESS","OLETA","JACQUE","IDELLA","ALAINA","SUZANNA","JOVITA","BLAIR","TOSHA","RAVEN","NEREIDA","MARLYN","KYLA","JOSEPH","DELFINA","TENA","STEPHENIE","SABINA","NATHALIE","MARCELLE","GERTIE","DARLEEN","THEA","SHARONDA","SHANTEL","BELEN","VENESSA","ROSALINA","ONA","GENOVEVA","COREY","CLEMENTINE","ROSALBA","RENATE","RENATA","MI","IVORY","GEORGIANNA","FLOY","DORCAS","ARIANA","TYRA","THEDA","MARIAM","JULI","JESICA","DONNIE","VIKKI","VERLA","ROSELYN","MELVINA","JANNETTE","GINNY","DEBRAH","CORRIE","ASIA","VIOLETA","MYRTIS","LATRICIA","COLLETTE","CHARLEEN","ANISSA","VIVIANA","TWYLA","PRECIOUS","NEDRA","LATONIA","LAN","HELLEN","FABIOLA","ANNAMARIE","ADELL","SHARYN","CHANTAL","NIKI","MAUD","LIZETTE","LINDY","KIA","KESHA","JEANA","DANELLE","CHARLINE","CHANEL","CARROL","VALORIE","LIA","DORTHA","CRISTAL","SUNNY","LEONE","LEILANI","GERRI","DEBI","ANDRA","KESHIA","IMA","EULALIA","EASTER","DULCE","NATIVIDAD","LINNIE","KAMI","GEORGIE","CATINA","BROOK","ALDA","WINNIFRED","SHARLA","RUTHANN","MEAGHAN","MAGDALENE","LISSETTE","ADELAIDA","VENITA","TRENA","SHIRLENE","SHAMEKA","ELIZEBETH","DIAN","SHANTA","MICKEY","LATOSHA","CARLOTTA","WINDY","SOON","ROSINA","MARIANN","LEISA","JONNIE","DAWNA","CATHIE","BILLY","ASTRID","SIDNEY","LAUREEN","JANEEN","HOLLI","FAWN","VICKEY","TERESSA","SHANTE","RUBYE","MARCELINA","CHANDA","CARY","TERESE","SCARLETT","MARTY","MARNIE","LULU","LISETTE","JENIFFER","ELENOR","DORINDA","DONITA","CARMAN","BERNITA","ALTAGRACIA","ALETA","ADRIANNA","ZORAIDA","RONNIE","NICOLA","LYNDSEY","KENDALL","JANINA","CHRISSY","AMI","STARLA","PHYLIS","PHUONG","KYRA","CHARISSE","BLANCH","SANJUANITA","RONA","NANCI","MARILEE","MARANDA","CORY","BRIGETTE","SANJUANA","MARITA","KASSANDRA","JOYCELYN","IRA","FELIPA","CHELSIE","BONNY","MIREYA","LORENZA","KYONG","ILEANA","CANDELARIA","TONY","TOBY","SHERIE","OK","MARK","LUCIE","LEATRICE","LAKESHIA","GERDA","EDIE","BAMBI","MARYLIN","LAVON","HORTENSE","GARNET","EVIE","TRESSA","SHAYNA","LAVINA","KYUNG","JEANETTA","SHERRILL","SHARA","PHYLISS","MITTIE","ANABEL","ALESIA","THUY","TAWANDA","RICHARD","JOANIE","TIFFANIE","LASHANDA","KARISSA","ENRIQUETA","DARIA","DANIELLA","CORINNA","ALANNA","ABBEY","ROXANE","ROSEANNA","MAGNOLIA","LIDA","KYLE","JOELLEN","ERA","CORAL","CARLEEN","TRESA","PEGGIE","NOVELLA","NILA","MAYBELLE","JENELLE","CARINA","NOVA","MELINA","MARQUERITE","MARGARETTE","JOSEPHINA","EVONNE","DEVIN","CINTHIA","ALBINA","TOYA","TAWNYA","SHERITA","SANTOS","MYRIAM","LIZABETH","LISE","KEELY","JENNI","GISELLE","CHERYLE","ARDITH","ARDIS","ALESHA","ADRIANE","SHAINA","LINNEA","KAROLYN","HONG","FLORIDA","FELISHA","DORI","DARCI","ARTIE","ARMIDA","ZOLA","XIOMARA","VERGIE","SHAMIKA","NENA","NANNETTE","MAXIE","LOVIE","JEANE","JAIMIE","INGE","FARRAH","ELAINA","CAITLYN","STARR","FELICITAS","CHERLY","CARYL","YOLONDA","YASMIN","TEENA","PRUDENCE","PENNIE","NYDIA","MACKENZIE","ORPHA","MARVEL","LIZBETH","LAURETTE","JERRIE","HERMELINDA","CAROLEE","TIERRA","MIRIAN","META","MELONY","KORI","JENNETTE","JAMILA","ENA","ANH","YOSHIKO","SUSANNAH","SALINA","RHIANNON","JOLEEN","CRISTINE","ASHTON","ARACELY","TOMEKA","SHALONDA","MARTI","LACIE","KALA","JADA","ILSE","HAILEY","BRITTANI","ZONA","SYBLE","SHERRYL","RANDY","NIDIA","MARLO","KANDICE","KANDI","DEB","DEAN","AMERICA","ALYCIA","TOMMY","RONNA","NORENE","MERCY","JOSE","INGEBORG","GIOVANNA","GEMMA","CHRISTEL","AUDRY","ZORA","VITA","VAN","TRISH","STEPHAINE","SHIRLEE","SHANIKA","MELONIE","MAZIE","JAZMIN","INGA","HOA","HETTIE","GERALYN","FONDA","ESTRELLA","ADELLA","SU","SARITA","RINA","MILISSA","MARIBETH","GOLDA","EVON","ETHELYN","ENEDINA","CHERISE","CHANA","VELVA","TAWANNA","SADE","MIRTA","LI","KARIE","JACINTA","ELNA","DAVINA","CIERRA","ASHLIE","ALBERTHA","TANESHA","STEPHANI","NELLE","MINDI","LU","LORINDA","LARUE","FLORENE","DEMETRA","DEDRA","CIARA","CHANTELLE","ASHLY","SUZY","ROSALVA","NOELIA","LYDA","LEATHA","KRYSTYNA","KRISTAN","KARRI","DARLINE","DARCIE","CINDA","CHEYENNE","CHERRIE","AWILDA","ALMEDA","ROLANDA","LANETTE","JERILYN","GISELE","EVALYN","CYNDI","CLETA","CARIN","ZINA","ZENA","VELIA","TANIKA","PAUL","CHARISSA","THOMAS","TALIA","MARGARETE","LAVONDA","KAYLEE","KATHLENE","JONNA","IRENA","ILONA","IDALIA","CANDIS","CANDANCE","BRANDEE","ANITRA","ALIDA","SIGRID","NICOLETTE","MARYJO","LINETTE","HEDWIG","CHRISTIANA","CASSIDY","ALEXIA","TRESSIE","MODESTA","LUPITA","LITA","GLADIS","EVELIA","DAVIDA","CHERRI","CECILY","ASHELY","ANNABEL","AGUSTINA","WANITA","SHIRLY","ROSAURA","HULDA","EUN","BAILEY","YETTA","VERONA","THOMASINA","SIBYL","SHANNAN","MECHELLE","LUE","LEANDRA","LANI","KYLEE","KANDY","JOLYNN","FERNE","EBONI","CORENE","ALYSIA","ZULA","NADA","MOIRA","LYNDSAY","LORRETTA","JUAN","JAMMIE","HORTENSIA","GAYNELL","CAMERON","ADRIA","VINA","VICENTA","TANGELA","STEPHINE","NORINE","NELLA","LIANA","LESLEE","KIMBERELY","ILIANA","GLORY","FELICA","EMOGENE","ELFRIEDE","EDEN","EARTHA","CARMA","BEA","OCIE","MARRY","LENNIE","KIARA","JACALYN","CARLOTA","ARIELLE","YU","STAR","OTILIA","KIRSTIN","KACEY","JOHNETTA","JOEY","JOETTA","JERALDINE","JAUNITA","ELANA","DORTHEA","CAMI","AMADA","ADELIA","VERNITA","TAMAR","SIOBHAN","RENEA","RASHIDA","OUIDA","ODELL","NILSA","MERYL","KRISTYN","JULIETA","DANICA","BREANNE","AUREA","ANGLEA","SHERRON","ODETTE","MALIA","LORELEI","LIN","LEESA","KENNA","KATHLYN","FIONA","CHARLETTE","SUZIE","SHANTELL","SABRA","RACQUEL","MYONG","MIRA","MARTINE","LUCIENNE","LAVADA","JULIANN","JOHNIE","ELVERA","DELPHIA","CLAIR","CHRISTIANE","CHAROLETTE","CARRI","AUGUSTINE","ASHA","ANGELLA","PAOLA","NINFA","LEDA","LAI","EDA","SUNSHINE","STEFANI","SHANELL","PALMA","MACHELLE","LISSA","KECIA","KATHRYNE","KARLENE","JULISSA","JETTIE","JENNIFFER","HUI","CORRINA","CHRISTOPHER","CAROLANN","ALENA","TESS","ROSARIA","MYRTICE","MARYLEE","LIANE","KENYATTA","JUDIE","JANEY","IN","ELMIRA","ELDORA","DENNA","CRISTI","CATHI","ZAIDA","VONNIE","VIVA","VERNIE","ROSALINE","MARIELA","LUCIANA","LESLI","KARAN","FELICE","DENEEN","ADINA","WYNONA","TARSHA","SHERON","SHASTA","SHANITA","SHANI","SHANDRA","RANDA","PINKIE","PARIS","NELIDA","MARILOU","LYLA","LAURENE","LACI","JOI","JANENE","DOROTHA","DANIELE","DANI","CAROLYNN","CARLYN","BERENICE","AYESHA","ANNELIESE","ALETHEA","THERSA","TAMIKO","RUFINA","OLIVA","MOZELL","MARYLYN","MADISON","KRISTIAN","KATHYRN","KASANDRA","KANDACE","JANAE","GABRIEL","DOMENICA","DEBBRA","DANNIELLE","CHUN","BUFFY","BARBIE","ARCELIA","AJA","ZENOBIA","SHAREN","SHAREE","PATRICK","PAGE","MY","LAVINIA","KUM","KACIE","JACKELINE","HUONG","FELISA","EMELIA","ELEANORA","CYTHIA","CRISTIN","CLYDE","CLARIBEL","CARON","ANASTACIA","ZULMA","ZANDRA","YOKO","TENISHA","SUSANN","SHERILYN","SHAY","SHAWANDA","SABINE","ROMANA","MATHILDA","LINSEY","KEIKO","JOANA","ISELA","GRETTA","GEORGETTA","EUGENIE","DUSTY","DESIRAE","DELORA","CORAZON","ANTONINA","ANIKA","WILLENE","TRACEE","TAMATHA","REGAN","NICHELLE","MICKIE","MAEGAN","LUANA","LANITA","KELSIE","EDELMIRA","BREE","AFTON","TEODORA","TAMIE","SHENA","MEG","LINH","KELI","KACI","DANYELLE","BRITT","ARLETTE","ALBERTINE","ADELLE","TIFFINY","STORMY","SIMONA","NUMBERS","NICOLASA","NICHOL","NIA","NAKISHA","MEE","MAIRA","LOREEN","KIZZY","JOHNNY","JAY","FALLON","CHRISTENE","BOBBYE","ANTHONY","YING","VINCENZA","TANJA","RUBIE","RONI","QUEENIE","MARGARETT","KIMBERLI","IRMGARD","IDELL","HILMA","EVELINA","ESTA","EMILEE","DENNISE","DANIA","CARL","CARIE","ANTONIO","WAI","SANG","RISA","RIKKI","PARTICIA","MUI","MASAKO","MARIO","LUVENIA","LOREE","LONI","LIEN","KEVIN","GIGI","FLORENCIA","DORIAN","DENITA","DALLAS","CHI","BILLYE","ALEXANDER","TOMIKA","SHARITA","RANA","NIKOLE","NEOMA","MARGARITE","MADALYN","LUCINA","LAILA","KALI","JENETTE","GABRIELE","EVELYNE","ELENORA","CLEMENTINA","ALEJANDRINA","ZULEMA","VIOLETTE","VANNESSA","THRESA","RETTA","PIA","PATIENCE","NOELLA","NICKIE","JONELL","DELTA","CHUNG","CHAYA","CAMELIA","BETHEL","ANYA","ANDREW","THANH","SUZANN","SPRING","SHU","MILA","LILLA","LAVERNA","KEESHA","KATTIE","GIA","GEORGENE","EVELINE","ESTELL","ELIZBETH","VIVIENNE","VALLIE","TRUDIE","STEPHANE","MICHEL","MAGALY","MADIE","KENYETTA","KARREN","JANETTA","HERMINE","HARMONY","DRUCILLA","DEBBI","CELESTINA","CANDIE","BRITNI","BECKIE","AMINA","ZITA","YUN","YOLANDE","VIVIEN","VERNETTA","TRUDI","SOMMER","PEARLE","PATRINA","OSSIE","NICOLLE","LOYCE","LETTY","LARISA","KATHARINA","JOSELYN","JONELLE","JENELL","IESHA","HEIDE","FLORINDA","FLORENTINA","FLO","ELODIA","DORINE","BRUNILDA","BRIGID","ASHLI","ARDELLA","TWANA","THU","TARAH","SUNG","SHEA","SHAVON","SHANE","SERINA","RAYNA","RAMONITA","NGA","MARGURITE","LUCRECIA","KOURTNEY","KATI","JESUS","JESENIA","DIAMOND","CRISTA","AYANA","ALICA","ALIA","VINNIE","SUELLEN","ROMELIA","RACHELL","PIPER","OLYMPIA","MICHIKO","KATHALEEN","JOLIE","JESSI","JANESSA","HANA","HA","ELEASE","CARLETTA","BRITANY","SHONA","SALOME","ROSAMOND","REGENA","RAINA","NGOC","NELIA","LOUVENIA","LESIA","LATRINA","LATICIA","LARHONDA","JINA","JACKI","HOLLIS","HOLLEY","EMMY","DEEANN","CORETTA","ARNETTA","VELVET","THALIA","SHANICE","NETA","MIKKI","MICKI","LONNA","LEANA","LASHUNDA","KILEY","JOYE","JACQULYN","IGNACIA","HYUN","HIROKO","HENRY","HENRIETTE","ELAYNE","DELINDA","DARNELL","DAHLIA","COREEN","CONSUELA","CONCHITA","CELINE","BABETTE","AYANNA","ANETTE","ALBERTINA","SKYE","SHAWNEE","SHANEKA","QUIANA","PAMELIA","MIN","MERRI","MERLENE","MARGIT","KIESHA","KIERA","KAYLENE","JODEE","JENISE","ERLENE","EMMIE","ELSE","DARYL","DALILA","DAISEY","CODY","CASIE","BELIA","BABARA","VERSIE","VANESA","SHELBA","SHAWNDA","SAM","NORMAN","NIKIA","NAOMA","MARNA","MARGERET","MADALINE","LAWANA","KINDRA","JUTTA","JAZMINE","JANETT","HANNELORE","GLENDORA","GERTRUD","GARNETT","FREEDA","FREDERICA","FLORANCE","FLAVIA","DENNIS","CARLINE","BEVERLEE","ANJANETTE","VALDA","TRINITY","TAMALA","STEVIE","SHONNA","SHA","SARINA","ONEIDA","MICAH","MERILYN","MARLEEN","LURLINE","LENNA","KATHERIN","JIN","JENI","HAE","GRACIA","GLADY","FARAH","ERIC","ENOLA","EMA","DOMINQUE","DEVONA","DELANA","CECILA","CAPRICE","ALYSHA","ALI","ALETHIA","VENA","THERESIA","TAWNY","SONG","SHAKIRA","SAMARA","SACHIKO","RACHELE","PAMELLA","NICKY","MARNI","MARIEL","MAREN","MALISA","LIGIA","LERA","LATORIA","LARAE","KIMBER","KATHERN","KAREY","JENNEFER","JANETH","HALINA","FREDIA","DELISA","DEBROAH","CIERA","CHIN","ANGELIKA","ANDREE","ALTHA","YEN","VIVAN","TERRESA","TANNA","SUK","SUDIE","SOO","SIGNE","SALENA","RONNI","REBBECCA","MYRTIE","MCKENZIE","MALIKA","MAIDA","LOAN","LEONARDA","KAYLEIGH","FRANCE","ETHYL","ELLYN","DAYLE","CAMMIE","BRITTNI","BIRGIT","AVELINA","ASUNCION","ARIANNA","AKIKO","VENICE","TYESHA","TONIE","TIESHA","TAKISHA","STEFFANIE","SINDY","SANTANA","MEGHANN","MANDA","MACIE","LADY","KELLYE","KELLEE","JOSLYN","JASON","INGER","INDIRA","GLINDA","GLENNIS","FERNANDA","FAUSTINA","ENEIDA","ELICIA","DOT","DIGNA","DELL","ARLETTA","ANDRE","WILLIA","TAMMARA","TABETHA","SHERRELL","SARI","REFUGIO","REBBECA","PAULETTA","NIEVES","NATOSHA","NAKITA","MAMMIE","KENISHA","KAZUKO","KASSIE","GARY","EARLEAN","DAPHINE","CORLISS","CLOTILDE","CAROLYNE","BERNETTA","AUGUSTINA","AUDREA","ANNIS","ANNABELL","YAN","TENNILLE","TAMICA","SELENE","SEAN","ROSANA","REGENIA","QIANA","MARKITA","MACY","LEEANNE","LAURINE","KYM","JESSENIA","JANITA","GEORGINE","GENIE","EMIKO","ELVIE","DEANDRA","DAGMAR","CORIE","COLLEN","CHERISH","ROMAINE","PORSHA","PEARLENE","MICHELINE","MERNA","MARGORIE","MARGARETTA","LORE","KENNETH","JENINE","HERMINA","FREDERICKA","ELKE","DRUSILLA","DORATHY","DIONE","DESIRE","CELENA","BRIGIDA","ANGELES","ALLEGRA","THEO","TAMEKIA","SYNTHIA","STEPHEN","SOOK","SLYVIA","ROSANN","REATHA","RAYE","MARQUETTA","MARGART","LING","LAYLA","KYMBERLY","KIANA","KAYLEEN","KATLYN","KARMEN","JOELLA","IRINA","EMELDA","ELENI","DETRA","CLEMMIE","CHERYLL","CHANTELL","CATHEY","ARNITA","ARLA","ANGLE","ANGELIC","ALYSE","ZOFIA","THOMASINE","TENNIE","SON","SHERLY","SHERLEY","SHARYL","REMEDIOS","PETRINA","NICKOLE","MYUNG","MYRLE","MOZELLA","LOUANNE","LISHA","LATIA","LANE","KRYSTA","JULIENNE","JOEL","JEANENE","JACQUALINE","ISAURA","GWENDA","EARLEEN","DONALD","CLEOPATRA","CARLIE","AUDIE","ANTONIETTA","ALISE","ALEX","VERDELL","VAL","TYLER","TOMOKO","THAO","TALISHA","STEVEN","SO","SHEMIKA","SHAUN","SCARLET","SAVANNA","SANTINA","ROSIA","RAEANN","ODILIA","NANA","MINNA","MAGAN","LYNELLE","LE","KARMA","JOEANN","IVANA","INELL","ILANA","HYE","HONEY","HEE","GUDRUN","FRANK","DREAMA","CRISSY","CHANTE","CARMELINA","ARVILLA","ARTHUR","ANNAMAE","ALVERA","ALEIDA","AARON","YEE","YANIRA","VANDA","TIANNA","TAM","STEFANIA","SHIRA","PERRY","NICOL","NANCIE","MONSERRATE","MINH","MELYNDA","MELANY","MATTHEW","LOVELLA","LAURE","KIRBY","KACY","JACQUELYNN","HYON","GERTHA","FRANCISCO","ELIANA","CHRISTENA","CHRISTEEN","CHARISE","CATERINA","CARLEY","CANDYCE","ARLENA","AMMIE","YANG","WILLETTE","VANITA","TUYET","TINY","SYREETA","SILVA","SCOTT","RONALD","PENNEY","NYLA","MICHAL","MAURICE","MARYAM","MARYA","MAGEN","LUDIE","LOMA","LIVIA","LANELL","KIMBERLIE","JULEE","DONETTA","DIEDRA","DENISHA","DEANE","DAWNE","CLARINE","CHERRYL","BRONWYN","BRANDON","ALLA","VALERY","TONDA","SUEANN","SORAYA","SHOSHANA","SHELA","SHARLEEN","SHANELLE","NERISSA","MICHEAL","MERIDITH","MELLIE","MAYE","MAPLE","MAGARET","LUIS","LILI","LEONILA","LEONIE","LEEANNA","LAVONIA","LAVERA","KRISTEL","KATHEY","KATHE","JUSTIN","JULIAN","JIMMY","JANN","ILDA","HILDRED","HILDEGARDE","GENIA","FUMIKO","EVELIN","ERMELINDA","ELLY","DUNG","DOLORIS","DIONNA","DANAE","BERNEICE","ANNICE","ALIX","VERENA","VERDIE","TRISTAN","SHAWNNA","SHAWANA","SHAUNNA","ROZELLA","RANDEE","RANAE","MILAGRO","LYNELL","LUISE","LOUIE","LOIDA","LISBETH","KARLEEN","JUNITA","JONA","ISIS","HYACINTH","HEDY","GWENN","ETHELENE","ERLINE","EDWARD","DONYA","DOMONIQUE","DELICIA","DANNETTE","CICELY","BRANDA","BLYTHE","BETHANN","ASHLYN","ANNALEE","ALLINE","YUKO","VELLA","TRANG","TOWANDA","TESHA","SHERLYN","NARCISA","MIGUELINA","MERI","MAYBELL","MARLANA","MARGUERITA","MADLYN","LUNA","LORY","LORIANN","LIBERTY","LEONORE","LEIGHANN","LAURICE","LATESHA","LARONDA","KATRICE","KASIE","KARL","KALEY","JADWIGA","GLENNIE","GEARLDINE","FRANCINA","EPIFANIA","DYAN","DORIE","DIEDRE","DENESE","DEMETRICE","DELENA","DARBY","CRISTIE","CLEORA","CATARINA","CARISA","BERNIE","BARBERA","ALMETA","TRULA","TEREASA","SOLANGE","SHEILAH","SHAVONNE","SANORA","ROCHELL","MATHILDE","MARGARETA","MAIA","LYNSEY","LAWANNA","LAUNA","KENA","KEENA","KATIA","JAMEY","GLYNDA","GAYLENE","ELVINA","ELANOR","DANUTA","DANIKA","CRISTEN","CORDIE","COLETTA","CLARITA","CARMON","BRYNN","AZUCENA","AUNDREA","ANGELE","YI","WALTER","VERLIE","VERLENE","TAMESHA","SILVANA","SEBRINA","SAMIRA","REDA","RAYLENE","PENNI","PANDORA","NORAH","NOMA","MIREILLE","MELISSIA","MARYALICE","LARAINE","KIMBERY","KARYL","KARINE","KAM","JOLANDA","JOHANA","JESUSA","JALEESA","JAE","JACQUELYNE","IRISH","ILUMINADA","HILARIA","HANH","GENNIE","FRANCIE","FLORETTA","EXIE","EDDA","DREMA","DELPHA","BEV","BARBAR","ASSUNTA","ARDELL","ANNALISA","ALISIA","YUKIKO","YOLANDO","WONDA","WEI","WALTRAUD","VETA","TEQUILA","TEMEKA","TAMEIKA","SHIRLEEN","SHENITA","PIEDAD","OZELLA","MIRTHA","MARILU","KIMIKO","JULIANE","JENICE","JEN","JANAY","JACQUILINE","HILDE","FE","FAE","EVAN","EUGENE","ELOIS","ECHO","DEVORAH","CHAU","BRINDA","BETSEY","ARMINDA","ARACELIS","APRYL","ANNETT","ALISHIA","VEOLA","USHA","TOSHIKO","THEOLA","TASHIA","TALITHA","SHERY","RUDY","RENETTA","REIKO","RASHEEDA","OMEGA","OBDULIA","MIKA","MELAINE","MEGGAN","MARTIN","MARLEN","MARGET","MARCELINE","MANA","MAGDALEN","LIBRADA","LEZLIE","LEXIE","LATASHIA","LASANDRA","KELLE","ISIDRA","ISA","INOCENCIA","GWYN","FRANCOISE","ERMINIA","ERINN","DIMPLE","DEVORA","CRISELDA","ARMANDA","ARIE","ARIANE","ANGELO","ANGELENA","ALLEN","ALIZA","ADRIENE","ADALINE","XOCHITL","TWANNA","TRAN","TOMIKO","TAMISHA","TAISHA","SUSY","SIU","RUTHA","ROXY","RHONA","RAYMOND","OTHA","NORIKO","NATASHIA","MERRIE","MELVIN","MARINDA","MARIKO","MARGERT","LORIS","LIZZETTE","LEISHA","KAILA","KA","JOANNIE","JERRICA","JENE","JANNET","JANEE","JACINDA","HERTA","ELENORE","DORETTA","DELAINE","DANIELL","CLAUDIE","CHINA","BRITTA","APOLONIA","AMBERLY","ALEASE","YURI","YUK","WEN","WANETA","UTE","TOMI","SHARRI","SANDIE","ROSELLE","REYNALDA","RAGUEL","PHYLICIA","PATRIA","OLIMPIA","ODELIA","MITZIE","MITCHELL","MISS","MINDA","MIGNON","MICA","MENDY","MARIVEL","MAILE","LYNETTA","LAVETTE","LAURYN","LATRISHA","LAKIESHA","KIERSTEN","KARY","JOSPHINE","JOLYN","JETTA","JANISE","JACQUIE","IVELISSE","GLYNIS","GIANNA","GAYNELLE","EMERALD","DEMETRIUS","DANYELL","DANILLE","DACIA","CORALEE","CHER","CEOLA","BRETT","BELL","ARIANNE","ALESHIA","YUNG","WILLIEMAE","TROY","TRINH","THORA","TAI","SVETLANA","SHERIKA","SHEMEKA","SHAUNDA","ROSELINE","RICKI","MELDA","MALLIE","LAVONNA","LATINA","LARRY","LAQUANDA","LALA","LACHELLE","KLARA","KANDIS","JOHNA","JEANMARIE","JAYE","HANG","GRAYCE","GERTUDE","EMERITA","EBONIE","CLORINDA","CHING","CHERY","CAROLA","BREANN","BLOSSOM","BERNARDINE","BECKI","ARLETHA","ARGELIA","ARA","ALITA","YULANDA","YON","YESSENIA","TOBI","TASIA","SYLVIE","SHIRL","SHIRELY","SHERIDAN","SHELLA","SHANTELLE","SACHA","ROYCE","REBECKA","REAGAN","PROVIDENCIA","PAULENE","MISHA","MIKI","MARLINE","MARICA","LORITA","LATOYIA","LASONYA","KERSTIN","KENDA","KEITHA","KATHRIN","JAYMIE","JACK","GRICELDA","GINETTE","ERYN","ELINA","ELFRIEDA","DANYEL","CHEREE","CHANELLE","BARRIE","AVERY","AURORE","ANNAMARIA","ALLEEN","AILENE","AIDE","YASMINE","VASHTI","VALENTINE","TREASA","TORY","TIFFANEY","SHERYLL","SHARIE","SHANAE","SAU","RAISA","PA","NEDA","MITSUKO","MIRELLA","MILDA","MARYANNA","MARAGRET","MABELLE","LUETTA","LORINA","LETISHA","LATARSHA","LANELLE","LAJUANA","KRISSY","KARLY","KARENA","JON","JESSIKA","JERICA","JEANELLE","JANUARY","JALISA","JACELYN","IZOLA","IVEY","GREGORY","EUNA","ETHA","DREW","DOMITILA","DOMINICA","DAINA","CREOLA","CARLI","CAMIE","BUNNY","BRITTNY","ASHANTI","ANISHA","ALEEN","ADAH","YASUKO","WINTER","VIKI","VALRIE","TONA","TINISHA","THI","TERISA","TATUM","TANEKA","SIMONNE","SHALANDA","SERITA","RESSIE","REFUGIA","PAZ","OLENE","NA","MERRILL","MARGHERITA","MANDIE","MAN","MAIRE","LYNDIA","LUCI","LORRIANE","LORETA","LEONIA","LAVONA","LASHAWNDA","LAKIA","KYOKO","KRYSTINA","KRYSTEN","KENIA","KELSI","JUDE","JEANICE","ISOBEL","GEORGIANN","GENNY","FELICIDAD","EILENE","DEON","DELOISE","DEEDEE","DANNIE","CONCEPTION","CLORA","CHERILYN","CHANG","CALANDRA","BERRY","ARMANDINA","ANISA","ULA","TIMOTHY","TIERA","THERESSA","STEPHANIA","SIMA","SHYLA","SHONTA","SHERA","SHAQUITA","SHALA","SAMMY","ROSSANA","NOHEMI","NERY","MORIAH","MELITA","MELIDA","MELANI","MARYLYNN","MARISHA","MARIETTE","MALORIE","MADELENE","LUDIVINA","LORIA","LORETTE","LORALEE","LIANNE","LEON","LAVENIA","LAURINDA","LASHON","KIT","KIMI","KEILA","KATELYNN","KAI","JONE","JOANE","JI","JAYNA","JANELLA","JA","HUE","HERTHA","FRANCENE","ELINORE","DESPINA","DELSIE","DEEDRA","CLEMENCIA","CARRY","CAROLIN","CARLOS","BULAH","BRITTANIE","BOK","BLONDELL","BIBI","BEAULAH","BEATA","ANNITA","AGRIPINA","VIRGEN","VALENE","UN","TWANDA","TOMMYE","TOI","TARRA","TARI","TAMMERA","SHAKIA","SADYE","RUTHANNE","ROCHEL","RIVKA","PURA","NENITA","NATISHA","MING","MERRILEE","MELODEE","MARVIS","LUCILLA","LEENA","LAVETA","LARITA","LANIE","KEREN","ILEEN","GEORGEANN","GENNA","GENESIS","FRIDA","EWA","EUFEMIA","EMELY","ELA","EDYTH","DEONNA","DEADRA","DARLENA","CHANELL","CHAN","CATHERN","CASSONDRA","CASSAUNDRA","BERNARDA","BERNA","ARLINDA","ANAMARIA","ALBERT","WESLEY","VERTIE","VALERI","TORRI","TATYANA","STASIA","SHERISE","SHERILL","SEASON","SCOTTIE","SANDA","RUTHE","ROSY","ROBERTO","ROBBI","RANEE","QUYEN","PEARLY","PALMIRA","ONITA","NISHA","NIESHA","NIDA","NEVADA","NAM","MERLYN","MAYOLA","MARYLOUISE","MARYLAND","MARX","MARTH","MARGENE","MADELAINE","LONDA","LEONTINE","LEOMA","LEIA","LAWRENCE","LAURALEE","LANORA","LAKITA","KIYOKO","KETURAH","KATELIN","KAREEN","JONIE","JOHNETTE","JENEE","JEANETT","IZETTA","HIEDI","HEIKE","HASSIE","HAROLD","GIUSEPPINA","GEORGANN","FIDELA","FERNANDE","ELWANDA","ELLAMAE","ELIZ","DUSTI","DOTTY","CYNDY","CORALIE","CELESTA","ARGENTINA","ALVERTA","XENIA","WAVA","VANETTA","TORRIE","TASHINA","TANDY","TAMBRA","TAMA","STEPANIE","SHILA","SHAUNTA","SHARAN","SHANIQUA","SHAE","SETSUKO","SERAFINA","SANDEE","ROSAMARIA","PRISCILA","OLINDA","NADENE","MUOI","MICHELINA","MERCEDEZ","MARYROSE","MARIN","MARCENE","MAO","MAGALI","MAFALDA","LOGAN","LINN","LANNIE","KAYCE","KAROLINE","KAMILAH","KAMALA","JUSTA","JOLINE","JENNINE","JACQUETTA","IRAIDA","GERALD","GEORGEANNA","FRANCHESCA","FAIRY","EMELINE","ELANE","EHTEL","EARLIE","DULCIE","DALENE","CRIS","CLASSIE","CHERE","CHARIS","CAROYLN","CARMINA","CARITA","BRIAN","BETHANIE","AYAKO","ARICA","AN","ALYSA","ALESSANDRA","AKILAH","ADRIEN","ZETTA","YOULANDA","YELENA","YAHAIRA","XUAN","WENDOLYN","VICTOR","TIJUANA","TERRELL","TERINA","TERESIA","SUZI","SUNDAY","SHERELL","SHAVONDA","SHAUNTE","SHARDA","SHAKITA","SENA","RYANN","RUBI","RIVA","REGINIA","REA","RACHAL","PARTHENIA","PAMULA","MONNIE","MONET","MICHAELE","MELIA","MARINE","MALKA","MAISHA","LISANDRA","LEO","LEKISHA","LEAN","LAURENCE","LAKENDRA","KRYSTIN","KORTNEY","KIZZIE","KITTIE","KERA","KENDAL","KEMBERLY","KANISHA","JULENE","JULE","JOSHUA","JOHANNE","JEFFREY","JAMEE","HAN","HALLEY","GIDGET","GALINA","FREDRICKA","FLETA","FATIMAH","EUSEBIA","ELZA","ELEONORE","DORTHEY","DORIA","DONELLA","DINORAH","DELORSE","CLARETHA","CHRISTINIA","CHARLYN","BONG","BELKIS","AZZIE","ANDERA","AIKO","ADENA","YER","YAJAIRA","WAN","VANIA","ULRIKE","TOSHIA","TIFANY","STEFANY","SHIZUE","SHENIKA","SHAWANNA","SHAROLYN","SHARILYN","SHAQUANA","SHANTAY","SEE","ROZANNE","ROSELEE","RICKIE","REMONA","REANNA","RAELENE","QUINN","PHUNG","PETRONILA","NATACHA","NANCEY","MYRL","MIYOKO","MIESHA","MERIDETH","MARVELLA","MARQUITTA","MARHTA","MARCHELLE","LIZETH","LIBBIE","LAHOMA","LADAWN","KINA","KATHELEEN","KATHARYN","KARISA","KALEIGH","JUNIE","JULIEANN","JOHNSIE","JANEAN","JAIMEE","JACKQUELINE","HISAKO","HERMA","HELAINE","GWYNETH","GLENN","GITA","EUSTOLIA","EMELINA","ELIN","EDRIS","DONNETTE","DONNETTA","DIERDRE","DENAE","DARCEL","CLAUDE","CLARISA","CINDERELLA","CHIA","CHARLESETTA","CHARITA","CELSA","CASSY","CASSI","CARLEE","BRUNA","BRITTANEY","BRANDE","BILLI","BAO","ANTONETTA","ANGLA","ANGELYN","ANALISA","ALANE","WENONA","WENDIE","VERONIQUE","VANNESA","TOBIE","TEMPIE","SUMIKO","SULEMA","SPARKLE","SOMER","SHEBA","SHAYNE","SHARICE","SHANEL","SHALON","SAGE","ROY","ROSIO","ROSELIA","RENAY","REMA","REENA","PORSCHE","PING","PEG","OZIE","ORETHA","ORALEE","ODA","NU","NGAN","NAKESHA","MILLY","MARYBELLE","MARLIN","MARIS","MARGRETT","MARAGARET","MANIE","LURLENE","LILLIA","LIESELOTTE","LAVELLE","LASHAUNDA","LAKEESHA","KEITH","KAYCEE","KALYN","JOYA","JOETTE","JENAE","JANIECE","ILLA","GRISEL","GLAYDS","GENEVIE","GALA","FREDDA","FRED","ELMER","ELEONOR","DEBERA","DEANDREA","DAN","CORRINNE","CORDIA","CONTESSA","COLENE","CLEOTILDE","CHARLOTT","CHANTAY","CECILLE","BEATRIS","AZALEE","ARLEAN","ARDATH","ANJELICA","ANJA","ALFREDIA","ALEISHA","ADAM","ZADA","YUONNE","XIAO","WILLODEAN","WHITLEY","VENNIE","VANNA","TYISHA","TOVA","TORIE","TONISHA","TILDA","TIEN","TEMPLE","SIRENA","SHERRIL","SHANTI","SHAN","SENAIDA","SAMELLA","ROBBYN","RENDA","REITA","PHEBE","PAULITA","NOBUKO","NGUYET","NEOMI","MOON","MIKAELA","MELANIA","MAXIMINA","MARG","MAISIE","LYNNA","LILLI","LAYNE","LASHAUN","LAKENYA","LAEL","KIRSTIE","KATHLINE","KASHA","KARLYN","KARIMA","JOVAN","JOSEFINE","JENNELL","JACQUI","JACKELYN","HYO","HIEN","GRAZYNA","FLORRIE","FLORIA","ELEONORA","DWANA","DORLA","DONG","DELMY","DEJA","DEDE","DANN","CRYSTA","CLELIA","CLARIS","CLARENCE","CHIEKO","CHERLYN","CHERELLE","CHARMAIN","CHARA","CAMMY","BEE","ARNETTE","ARDELLE","ANNIKA","AMIEE","AMEE","ALLENA","YVONE","YUKI","YOSHIE","YEVETTE","YAEL","WILLETTA","VONCILE","VENETTA","TULA","TONETTE","TIMIKA","TEMIKA","TELMA","TEISHA","TAREN","TA","STACEE","SHIN","SHAWNTA","SATURNINA","RICARDA","POK","PASTY","ONIE","NUBIA","MORA","MIKE","MARIELLE","MARIELLA","MARIANELA","MARDELL","MANY","LUANNA","LOISE","LISABETH","LINDSY","LILLIANA","LILLIAM","LELAH","LEIGHA","LEANORA","LANG","KRISTEEN","KHALILAH","KEELEY","KANDRA","JUNKO","JOAQUINA","JERLENE","JANI","JAMIKA","JAME","HSIU","HERMILA","GOLDEN","GENEVIVE","EVIA","EUGENA","EMMALINE","ELFREDA","ELENE","DONETTE","DELCIE","DEEANNA","DARCEY","CUC","CLARINDA","CIRA","CHAE","CELINDA","CATHERYN","CATHERIN","CASIMIRA","CARMELIA","CAMELLIA","BREANA","BOBETTE","BERNARDINA","BEBE","BASILIA","ARLYNE","AMAL","ALAYNA","ZONIA","ZENIA","YURIKO","YAEKO","WYNELL","WILLOW","WILLENA","VERNIA","TU","TRAVIS","TORA","TERRILYN","TERICA","TENESHA","TAWNA","TAJUANA","TAINA","STEPHNIE","SONA","SOL","SINA","SHONDRA","SHIZUKO","SHERLENE","SHERICE","SHARIKA","ROSSIE","ROSENA","RORY","RIMA","RIA","RHEBA","RENNA","PETER","NATALYA","NANCEE","MELODI","MEDA","MAXIMA","MATHA","MARKETTA","MARICRUZ","MARCELENE","MALVINA","LUBA","LOUETTA","LEIDA","LECIA","LAURAN","LASHAWNA","LAINE","KHADIJAH","KATERINE","KASI","KALLIE","JULIETTA","JESUSITA","JESTINE","JESSIA","JEREMY","JEFFIE","JANYCE","ISADORA","GEORGIANNE","FIDELIA","EVITA","EURA","EULAH","ESTEFANA","ELSY","ELIZABET","ELADIA","DODIE","DION","DIA","DENISSE","DELORAS","DELILA","DAYSI","DAKOTA","CURTIS","CRYSTLE","CONCHA","COLBY","CLARETTA","CHU","CHRISTIA","CHARLSIE","CHARLENA","CARYLON","BETTYANN","ASLEY","ASHLEA","AMIRA","AI","AGUEDA","AGNUS","YUETTE","VINITA","VICTORINA","TYNISHA","TREENA","TOCCARA","TISH","THOMASENA","TEGAN","SOILA","SHILOH","SHENNA","SHARMAINE","SHANTAE","SHANDI","SEPTEMBER","SARAN","SARAI","SANA","SAMUEL","SALLEY","ROSETTE","ROLANDE","REGINE","OTELIA","OSCAR","OLEVIA","NICHOLLE","NECOLE","NAIDA","MYRTA","MYESHA","MITSUE","MINTA","MERTIE","MARGY","MAHALIA","MADALENE","LOVE","LOURA","LOREAN","LEWIS","LESHA","LEONIDA","LENITA","LAVONE","LASHELL","LASHANDRA","LAMONICA","KIMBRA","KATHERINA","KARRY","KANESHA","JULIO","JONG","JENEVA","JAQUELYN","HWA","GILMA","GHISLAINE","GERTRUDIS","FRANSISCA","FERMINA","ETTIE","ETSUKO","ELLIS","ELLAN","ELIDIA","EDRA","DORETHEA","DOREATHA","DENYSE","DENNY","DEETTA","DAINE","CYRSTAL","CORRIN","CAYLA","CARLITA","CAMILA","BURMA","BULA","BUENA","BLAKE","BARABARA","AVRIL","AUSTIN","ALAINE","ZANA","WILHEMINA","WANETTA","VIRGIL","VI","VERONIKA","VERNON","VERLINE","VASILIKI","TONITA","TISA","TEOFILA","TAYNA","TAUNYA","TANDRA","TAKAKO","SUNNI","SUANNE","SIXTA","SHARELL","SEEMA","RUSSELL","ROSENDA","ROBENA","RAYMONDE","PEI","PAMILA","OZELL","NEIDA","NEELY","MISTIE","MICHA","MERISSA","MAURITA","MARYLN","MARYETTA","MARSHALL","MARCELL","MALENA","MAKEDA","MADDIE","LOVETTA","LOURIE","LORRINE","LORILEE","LESTER","LAURENA","LASHAY","LARRAINE","LAREE","LACRESHA","KRISTLE","KRISHNA","KEVA","KEIRA","KAROLE","JOIE","JINNY","JEANNETTA","JAMA","HEIDY","GILBERTE","GEMA","FAVIOLA","EVELYNN","ENDA","ELLI","ELLENA","DIVINA","DAGNY","COLLENE","CODI","CINDIE","CHASSIDY","CHASIDY","CATRICE","CATHERINA","CASSEY","CAROLL","CARLENA","CANDRA","CALISTA","BRYANNA","BRITTENY","BEULA","BARI","AUDRIE","AUDRIA","ARDELIA","ANNELLE","ANGILA","ALONA","ALLYN","DOUGLAS","ROGER","JONATHAN","RALPH","NICHOLAS","BENJAMIN","BRUCE","HARRY","WAYNE","STEVE","HOWARD","ERNEST","PHILLIP","TODD","CRAIG","ALAN","PHILIP","EARL","DANNY","BRYAN","STANLEY","LEONARD","NATHAN","MANUEL","RODNEY","MARVIN","VINCENT","JEFFERY","JEFF","CHAD","JACOB","ALFRED","BRADLEY","HERBERT","FREDERICK","EDWIN","DON","RICKY","RANDALL","BARRY","BERNARD","LEROY","MARCUS","THEODORE","CLIFFORD","MIGUEL","JIM","TOM","CALVIN","BILL","LLOYD","DEREK","WARREN","DARRELL","JEROME","FLOYD","ALVIN","TIM","GORDON","GREG","JORGE","DUSTIN","PEDRO","DERRICK","ZACHARY","HERMAN","GLEN","HECTOR","RICARDO","RICK","BRENT","RAMON","GILBERT","MARC","REGINALD","RUBEN","NATHANIEL","RAFAEL","EDGAR","MILTON","RAUL","BEN","CHESTER","DUANE","FRANKLIN","BRAD","RON","ROLAND","ARNOLD","HARVEY","JARED","ERIK","DARRYL","NEIL","JAVIER","FERNANDO","CLINTON","TED","MATHEW","TYRONE","DARREN","LANCE","KURT","ALLAN","NELSON","GUY","CLAYTON","HUGH","MAX","DWAYNE","DWIGHT","ARMANDO","FELIX","EVERETT","IAN","WALLACE","KEN","BOB","ALFREDO","ALBERTO","DAVE","IVAN","BYRON","ISAAC","MORRIS","CLIFTON","WILLARD","ROSS","ANDY","SALVADOR","KIRK","SERGIO","SETH","KENT","TERRANCE","EDUARDO","TERRENCE","ENRIQUE","WADE","STUART","FREDRICK","ARTURO","ALEJANDRO","NICK","LUTHER","WENDELL","JEREMIAH","JULIUS","OTIS","TREVOR","OLIVER","LUKE","HOMER","GERARD","DOUG","KENNY","HUBERT","LYLE","MATT","ALFONSO","ORLANDO","REX","CARLTON","ERNESTO","NEAL","PABLO","LORENZO","OMAR","WILBUR","GRANT","HORACE","RODERICK","ABRAHAM","WILLIS","RICKEY","ANDRES","CESAR","JOHNATHAN","MALCOLM","RUDOLPH","DAMON","KELVIN","PRESTON","ALTON","ARCHIE","MARCO","WM","PETE","RANDOLPH","GARRY","GEOFFREY","JONATHON","FELIPE","GERARDO","ED","DOMINIC","DELBERT","COLIN","GUILLERMO","EARNEST","LUCAS","BENNY","SPENCER","RODOLFO","MYRON","EDMUND","GARRETT","SALVATORE","CEDRIC","LOWELL","GREGG","SHERMAN","WILSON","SYLVESTER","ROOSEVELT","ISRAEL","JERMAINE","FORREST","WILBERT","LELAND","SIMON","CLARK","IRVING","BRYANT","OWEN","RUFUS","WOODROW","KRISTOPHER","MACK","LEVI","MARCOS","GUSTAVO","JAKE","LIONEL","GILBERTO","CLINT","NICOLAS","ISMAEL","ORVILLE","ERVIN","DEWEY","AL","WILFRED","JOSH","HUGO","IGNACIO","CALEB","TOMAS","SHELDON","ERICK","STEWART","DOYLE","DARREL","ROGELIO","TERENCE","SANTIAGO","ALONZO","ELIAS","BERT","ELBERT","RAMIRO","CONRAD","NOAH","GRADY","PHIL","CORNELIUS","LAMAR","ROLANDO","CLAY","PERCY","DEXTER","BRADFORD","DARIN","AMOS","MOSES","IRVIN","SAUL","ROMAN","RANDAL","TIMMY","DARRIN","WINSTON","BRENDAN","ABEL","DOMINICK","BOYD","EMILIO","ELIJAH","DOMINGO","EMMETT","MARLON","EMANUEL","JERALD","EDMOND","EMIL","DEWAYNE","WILL","OTTO","TEDDY","REYNALDO","BRET","JESS","TRENT","HUMBERTO","EMMANUEL","STEPHAN","VICENTE","LAMONT","GARLAND","MILES","EFRAIN","HEATH","RODGER","HARLEY","ETHAN","ELDON","ROCKY","PIERRE","JUNIOR","FREDDY","ELI","BRYCE","ANTOINE","STERLING","CHASE","GROVER","ELTON","CLEVELAND","DYLAN","CHUCK","DAMIAN","REUBEN","STAN","AUGUST","LEONARDO","JASPER","RUSSEL","ERWIN","BENITO","HANS","MONTE","BLAINE","ERNIE","CURT","QUENTIN","AGUSTIN","MURRAY","JAMAL","ADOLFO","HARRISON","TYSON","BURTON","BRADY","ELLIOTT","WILFREDO","BART","JARROD","VANCE","DENIS","DAMIEN","JOAQUIN","HARLAN","DESMOND","ELLIOT","DARWIN","GREGORIO","BUDDY","XAVIER","KERMIT","ROSCOE","ESTEBAN","ANTON","SOLOMON","SCOTTY","NORBERT","ELVIN","WILLIAMS","NOLAN","ROD","QUINTON","HAL","BRAIN","ROB","ELWOOD","KENDRICK","DARIUS","MOISES","FIDEL","THADDEUS","CLIFF","MARCEL","JACKSON","RAPHAEL","BRYON","ARMAND","ALVARO","JEFFRY","DANE","JOESPH","THURMAN","NED","RUSTY","MONTY","FABIAN","REGGIE","MASON","GRAHAM","ISAIAH","VAUGHN","GUS","LOYD","DIEGO","ADOLPH","NORRIS","MILLARD","ROCCO","GONZALO","DERICK","RODRIGO","WILEY","RIGOBERTO","ALPHONSO","TY","NOE","VERN","REED","JEFFERSON","ELVIS","BERNARDO","MAURICIO","HIRAM","DONOVAN","BASIL","RILEY","NICKOLAS","MAYNARD","SCOT","VINCE","QUINCY","EDDY","SEBASTIAN","FEDERICO","ULYSSES","HERIBERTO","DONNELL","COLE","DAVIS","GAVIN","EMERY","WARD","ROMEO","JAYSON","DANTE","CLEMENT","COY","MAXWELL","JARVIS","BRUNO","ISSAC","DUDLEY","BROCK","SANFORD","CARMELO","BARNEY","NESTOR","STEFAN","DONNY","ART","LINWOOD","BEAU","WELDON","GALEN","ISIDRO","TRUMAN","DELMAR","JOHNATHON","SILAS","FREDERIC","DICK","IRWIN","MERLIN","CHARLEY","MARCELINO","HARRIS","CARLO","TRENTON","KURTIS","HUNTER","AURELIO","WINFRED","VITO","COLLIN","DENVER","CARTER","LEONEL","EMORY","PASQUALE","MOHAMMAD","MARIANO","DANIAL","LANDON","DIRK","BRANDEN","ADAN","BUFORD","GERMAN","WILMER","EMERSON","ZACHERY","FLETCHER","JACQUES","ERROL","DALTON","MONROE","JOSUE","EDWARDO","BOOKER","WILFORD","SONNY","SHELTON","CARSON","THERON","RAYMUNDO","DAREN","HOUSTON","ROBBY","LINCOLN","GENARO","BENNETT","OCTAVIO","CORNELL","HUNG","ARRON","ANTONY","HERSCHEL","GIOVANNI","GARTH","CYRUS","CYRIL","RONNY","LON","FREEMAN","DUNCAN","KENNITH","CARMINE","ERICH","CHADWICK","WILBURN","RUSS","REID","MYLES","ANDERSON","MORTON","JONAS","FOREST","MITCHEL","MERVIN","ZANE","RICH","JAMEL","LAZARO","ALPHONSE","RANDELL","MAJOR","JARRETT","BROOKS","ABDUL","LUCIANO","SEYMOUR","EUGENIO","MOHAMMED","VALENTIN","CHANCE","ARNULFO","LUCIEN","FERDINAND","THAD","EZRA","ALDO","RUBIN","ROYAL","MITCH","EARLE","ABE","WYATT","MARQUIS","LANNY","KAREEM","JAMAR","BORIS","ISIAH","EMILE","ELMO","ARON","LEOPOLDO","EVERETTE","JOSEF","ELOY","RODRICK","REINALDO","LUCIO","JERROD","WESTON","HERSHEL","BARTON","PARKER","LEMUEL","BURT","JULES","GIL","ELISEO","AHMAD","NIGEL","EFREN","ANTWAN","ALDEN","MARGARITO","COLEMAN","DINO","OSVALDO","LES","DEANDRE","NORMAND","KIETH","TREY","NORBERTO","NAPOLEON","JEROLD","FRITZ","ROSENDO","MILFORD","CHRISTOPER","ALFONZO","LYMAN","JOSIAH","BRANT","WILTON","RICO","JAMAAL","DEWITT","BRENTON","OLIN","FOSTER","FAUSTINO","CLAUDIO","JUDSON","GINO","EDGARDO","ALEC","TANNER","JARRED","DONN","TAD","PRINCE","PORFIRIO","ODIS","LENARD","CHAUNCEY","TOD","MEL","MARCELO","KORY","AUGUSTUS","KEVEN","HILARIO","BUD","SAL","ORVAL","MAURO","ZACHARIAH","OLEN","ANIBAL","MILO","JED","DILLON","AMADO","NEWTON","LENNY","RICHIE","HORACIO","BRICE","MOHAMED","DELMER","DARIO","REYES","MAC","JONAH","JERROLD","ROBT","HANK","RUPERT","ROLLAND","KENTON","DAMION","ANTONE","WALDO","FREDRIC","BRADLY","KIP","BURL","WALKER","TYREE","JEFFEREY","AHMED","WILLY","STANFORD","OREN","NOBLE","MOSHE","MIKEL","ENOCH","BRENDON","QUINTIN","JAMISON","FLORENCIO","DARRICK","TOBIAS","HASSAN","GIUSEPPE","DEMARCUS","CLETUS","TYRELL","LYNDON","KEENAN","WERNER","GERALDO","COLUMBUS","CHET","BERTRAM","MARKUS","HUEY","HILTON","DWAIN","DONTE","TYRON","OMER","ISAIAS","HIPOLITO","FERMIN","ADALBERTO","BO","BARRETT","TEODORO","MCKINLEY","MAXIMO","GARFIELD","RALEIGH","LAWERENCE","ABRAM","RASHAD","KING","EMMITT","DARON","SAMUAL","MIQUEL","EUSEBIO","DOMENIC","DARRON","BUSTER","WILBER","RENATO","JC","HOYT","HAYWOOD","EZEKIEL","CHAS","FLORENTINO","ELROY","CLEMENTE","ARDEN","NEVILLE","EDISON","DESHAWN","NATHANIAL","JORDON","DANILO","CLAUD","SHERWOOD","RAYMON","RAYFORD","CRISTOBAL","AMBROSE","TITUS","HYMAN","FELTON","EZEQUIEL","ERASMO","STANTON","LONNY","LEN","IKE","MILAN","LINO","JAROD","HERB","ANDREAS","WALTON","RHETT","PALMER","DOUGLASS","CORDELL","OSWALDO","ELLSWORTH","VIRGILIO","TONEY","NATHANAEL","DEL","BENEDICT","MOSE","JOHNSON","ISREAL","GARRET","FAUSTO","ASA","ARLEN","ZACK","WARNER","MODESTO","FRANCESCO","MANUAL","GAYLORD","GASTON","FILIBERTO","DEANGELO","MICHALE","GRANVILLE","WES","MALIK","ZACKARY","TUAN","ELDRIDGE","CRISTOPHER","CORTEZ","ANTIONE","MALCOM","LONG","KOREY","JOSPEH","COLTON","WAYLON","VON","HOSEA","SHAD","SANTO","RUDOLF","ROLF","REY","RENALDO","MARCELLUS","LUCIUS","KRISTOFER","BOYCE","BENTON","HAYDEN","HARLAND","ARNOLDO","RUEBEN","LEANDRO","KRAIG","JERRELL","JEROMY","HOBERT","CEDRICK","ARLIE","WINFORD","WALLY","LUIGI","KENETH","JACINTO","GRAIG","FRANKLYN","EDMUNDO","SID","PORTER","LEIF","JERAMY","BUCK","WILLIAN","VINCENZO","SHON","LYNWOOD","JERE","HAI","ELDEN","DORSEY","DARELL","BRODERICK","ALONSO"]
names.sort()
print(names[937])
def wordScore(name):
score = 0
for c in name:
score += ord(c) - ord('A') + 1
return score
total_score = 0
for i in range(len(names)):
total_score += (wordScore(names[i]) * (i+1))
print(total_score) # 871198282
| 2,595
| 46,457
| 0.666388
|
2817ea9f93b3564d2899cd722a2895a06f379b9f
| 10,839
|
py
|
Python
|
sdks/python/apache_beam/io/external/xlang_kinesisio_it_test.py
|
jzju/beam
|
d9876ea6bdef22b959ded2c16751057a418468bb
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 5,279
|
2016-12-29T04:00:44.000Z
|
2022-03-31T22:56:45.000Z
|
sdks/python/apache_beam/io/external/xlang_kinesisio_it_test.py
|
jzju/beam
|
d9876ea6bdef22b959ded2c16751057a418468bb
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 14,149
|
2016-12-28T00:43:50.000Z
|
2022-03-31T23:50:22.000Z
|
sdks/python/apache_beam/io/external/xlang_kinesisio_it_test.py
|
damondouglas/beam
|
4774ac713f427fefb38114f661516faef26d8207
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 3,763
|
2016-12-29T04:06:10.000Z
|
2022-03-31T22:25:49.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration test for Python cross-language pipelines for Java KinesisIO.
If you want to run the tests on localstack then run it just with pipeline
options.
To test it on a real AWS account you need to pass some additional params, e.g.:
python setup.py nosetests \
--tests=apache_beam.io.external.xlang_kinesisio_it_test \
--test-pipeline-options="
--use_real_aws
--aws_kinesis_stream=<STREAM_NAME>
--aws_access_key=<AWS_ACCESS_KEY>
--aws_secret_key=<AWS_SECRET_KEY>
--aws_region=<AWS_REGION>
--runner=FlinkRunner"
"""
# pytype: skip-file
import argparse
import logging
import time
import unittest
import uuid
import apache_beam as beam
from apache_beam.io.kinesis import InitialPositionInStream
from apache_beam.io.kinesis import ReadDataFromKinesis
from apache_beam.io.kinesis import WatermarkPolicy
from apache_beam.io.kinesis import WriteToKinesis
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
import boto3
except ImportError:
boto3 = None
try:
from testcontainers.core.container import DockerContainer
except ImportError:
DockerContainer = None
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
LOCALSTACK_VERSION = '0.11.3'
NUM_RECORDS = 10
MAX_READ_TIME = 5 * 60 * 1000 # 5min
NOW_SECONDS = time.time()
NOW_MILLIS = NOW_SECONDS * 1000
REQUEST_RECORDS_LIMIT = 1000
RECORD = b'record' + str(uuid.uuid4()).encode()
@unittest.skipUnless(DockerContainer, 'testcontainers is not installed.')
@unittest.skipUnless(boto3, 'boto3 is not installed.')
@unittest.skipUnless(
TestPipeline().get_pipeline_options().view_as(StandardOptions).runner,
'Do not run this test on precommit suites.')
class CrossLanguageKinesisIOTest(unittest.TestCase):
@unittest.skipUnless(
TestPipeline().get_option('aws_kinesis_stream'),
'Cannot test on real aws without pipeline options provided')
def test_kinesis_io_roundtrip(self):
# TODO: enable this test for localstack once BEAM-10664 is resolved
self.run_kinesis_write()
self.run_kinesis_read()
@unittest.skipIf(
TestPipeline().get_option('aws_kinesis_stream'),
'Do not test on localstack when pipeline options were provided')
def test_kinesis_write(self):
# TODO: remove this test once BEAM-10664 is resolved
self.run_kinesis_write()
records = self.kinesis_helper.read_from_stream(self.aws_kinesis_stream)
self.assertEqual(
sorted(records),
sorted([RECORD + str(i).encode() for i in range(NUM_RECORDS)]))
def run_kinesis_write(self):
with TestPipeline(options=PipelineOptions(self.pipeline_args)) as p:
p.not_use_test_runner_api = True
_ = (
p
| 'Impulse' >> beam.Impulse()
| 'Generate' >> beam.FlatMap(lambda x: range(NUM_RECORDS)) # pylint: disable=bad-option-value
| 'Map to bytes' >>
beam.Map(lambda x: RECORD + str(x).encode()).with_output_types(bytes)
| 'WriteToKinesis' >> WriteToKinesis(
stream_name=self.aws_kinesis_stream,
aws_access_key=self.aws_access_key,
aws_secret_key=self.aws_secret_key,
region=self.aws_region,
service_endpoint=self.aws_service_endpoint,
verify_certificate=(not self.use_localstack),
partition_key='1',
producer_properties=self.producer_properties,
))
def run_kinesis_read(self):
records = [RECORD + str(i).encode() for i in range(NUM_RECORDS)]
with TestPipeline(options=PipelineOptions(self.pipeline_args)) as p:
result = (
p
| 'ReadFromKinesis' >> ReadDataFromKinesis(
stream_name=self.aws_kinesis_stream,
aws_access_key=self.aws_access_key,
aws_secret_key=self.aws_secret_key,
region=self.aws_region,
service_endpoint=self.aws_service_endpoint,
verify_certificate=not self.use_localstack,
max_num_records=NUM_RECORDS,
max_read_time=MAX_READ_TIME,
request_records_limit=REQUEST_RECORDS_LIMIT,
watermark_policy=WatermarkPolicy.ARRIVAL_TIME,
watermark_idle_duration_threshold=MAX_READ_TIME,
initial_position_in_stream=InitialPositionInStream.AT_TIMESTAMP,
initial_timestamp_in_stream=NOW_MILLIS,
).with_output_types(bytes))
assert_that(result, equal_to(records))
def set_localstack(self):
self.localstack = DockerContainer('localstack/localstack:{}'
.format(LOCALSTACK_VERSION))\
.with_env('SERVICES', 'kinesis')\
.with_env('KINESIS_PORT', '4568')\
.with_env('USE_SSL', 'true')\
.with_exposed_ports(4568)\
.with_volume_mapping('/var/run/docker.sock', '/var/run/docker.sock', 'rw')
# Repeat if ReadTimeout is raised.
for i in range(4):
try:
self.localstack.start()
break
except Exception as e: # pylint: disable=bare-except
if i == 3:
logging.error('Could not initialize localstack container')
raise e
self.aws_service_endpoint = 'https://{}:{}'.format(
self.localstack.get_container_host_ip(),
self.localstack.get_exposed_port('4568'),
)
def setUp(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'--aws_kinesis_stream',
default='beam_kinesis_xlang',
help='Kinesis stream name',
)
parser.add_argument(
'--aws_access_key',
default='accesskey',
help=('Aws access key'),
)
parser.add_argument(
'--aws_secret_key',
default='secretkey',
help='Aws secret key',
)
parser.add_argument(
'--aws_region',
default='us-east-1',
help='Aws region',
)
parser.add_argument(
'--aws_service_endpoint',
default=None,
help='Url to external aws endpoint',
)
parser.add_argument(
'--use_real_aws',
default=False,
dest='use_real_aws',
action='store_true',
help='Flag whether to use real aws for the tests purpose',
)
parser.add_argument(
'--expansion_service',
help='Url to externally launched expansion service.',
)
pipeline = TestPipeline()
argv = pipeline.get_full_options_as_args()
known_args, self.pipeline_args = parser.parse_known_args(argv)
self.aws_kinesis_stream = known_args.aws_kinesis_stream
self.aws_access_key = known_args.aws_access_key
self.aws_secret_key = known_args.aws_secret_key
self.aws_region = known_args.aws_region
self.aws_service_endpoint = known_args.aws_service_endpoint
self.use_localstack = not known_args.use_real_aws
self.expansion_service = known_args.expansion_service
self.producer_properties = {
'CollectionMaxCount': str(NUM_RECORDS),
'ConnectTimeout': str(MAX_READ_TIME),
}
if self.use_localstack:
self.set_localstack()
self.kinesis_helper = KinesisHelper(
self.aws_access_key,
self.aws_secret_key,
self.aws_region,
self.aws_service_endpoint.replace('https', 'http')
if self.aws_service_endpoint else None,
)
if self.use_localstack:
self.kinesis_helper.create_stream(self.aws_kinesis_stream)
def tearDown(self):
if self.use_localstack:
self.kinesis_helper.delete_stream(self.aws_kinesis_stream)
try:
self.localstack.stop()
except: # pylint: disable=bare-except
logging.error('Could not stop the localstack container')
class KinesisHelper:
def __init__(self, access_key, secret_key, region, service_endpoint):
self.kinesis_client = boto3.client(
service_name='kinesis',
region_name=region,
endpoint_url=service_endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
def create_stream(self, stream_name):
# localstack could not have initialized in the container yet so repeat
retries = 10
for i in range(retries):
try:
self.kinesis_client.create_stream(
StreamName=stream_name,
ShardCount=1,
)
time.sleep(2)
break
except: # pylint: disable=bare-except
if i == retries - 1:
logging.error('Could not create kinesis stream')
raise
# Wait for the stream to be active
self.get_first_shard_id(stream_name)
def delete_stream(self, stream_name):
self.kinesis_client.delete_stream(
StreamName=stream_name,
EnforceConsumerDeletion=True,
)
def get_first_shard_id(self, stream_name):
retries = 10
stream = self.kinesis_client.describe_stream(StreamName=stream_name)
for i in range(retries):
if stream['StreamDescription']['StreamStatus'] == 'ACTIVE':
break
time.sleep(2)
if i == retries - 1:
logging.error('Could not initialize kinesis stream')
raise
stream = self.kinesis_client.describe_stream(StreamName=stream_name)
return stream['StreamDescription']['Shards'][0]['ShardId']
def read_from_stream(self, stream_name):
shard_id = self.get_first_shard_id(stream_name)
shard_iterator = self.kinesis_client.get_shard_iterator(
StreamName=stream_name,
ShardId=shard_id,
ShardIteratorType=InitialPositionInStream.AT_TIMESTAMP,
Timestamp=str(NOW_SECONDS),
)
result = self.kinesis_client.get_records(
ShardIterator=shard_iterator['ShardIterator'],
Limit=NUM_RECORDS,
)
return [record['Data'] for record in result['Records']]
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 33.978056
| 104
| 0.69499
|
660646e541153b0568548f56a37ea99a055103d6
| 2,319
|
py
|
Python
|
tests/contrib/operators/test_redis_publish_operator.py
|
dyna-dot/airflow
|
02ef974e4b7c2a91b3074ddd8abcf4cd31d09e6f
|
[
"Apache-2.0"
] | 2
|
2020-09-30T01:06:15.000Z
|
2021-08-07T09:16:21.000Z
|
tests/contrib/operators/test_redis_publish_operator.py
|
dyna-dot/airflow
|
02ef974e4b7c2a91b3074ddd8abcf4cd31d09e6f
|
[
"Apache-2.0"
] | 22
|
2019-12-09T23:22:07.000Z
|
2021-05-12T23:15:40.000Z
|
tests/contrib/operators/test_redis_publish_operator.py
|
dyna-dot/airflow
|
02ef974e4b7c2a91b3074ddd8abcf4cd31d09e6f
|
[
"Apache-2.0"
] | 5
|
2019-11-18T13:19:29.000Z
|
2020-03-25T13:20:29.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import DAG, configuration
from airflow.contrib.operators.redis_publish_operator import RedisPublishOperator
from airflow.contrib.hooks.redis_hook import RedisHook
from airflow.utils import timezone
from mock import MagicMock
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestRedisPublishOperator(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_redis_dag_id', default_args=args)
self.mock_context = MagicMock()
self.channel = 'test'
def test_execute_hello(self):
operator = RedisPublishOperator(
task_id='test_task',
dag=self.dag,
message='hello',
channel=self.channel,
redis_conn_id='redis_default'
)
hook = RedisHook(redis_conn_id='redis_default')
pubsub = hook.get_conn().pubsub()
pubsub.subscribe(self.channel)
operator.execute(self.mock_context)
context_calls = []
self.assertTrue(self.mock_context['ti'].method_calls == context_calls, "context calls should be same")
message = pubsub.get_message()
self.assertEquals(message['type'], 'subscribe')
message = pubsub.get_message()
self.assertEquals(message['type'], 'message')
self.assertEquals(message['data'], b'hello')
pubsub.unsubscribe(self.channel)
| 32.661972
| 110
| 0.693834
|
2c7db60ae54e81e174eaa430d9280b2be5e6a01f
| 108
|
py
|
Python
|
datafiles/__init__.py
|
sjml/synacor-challenge
|
b965e7c6330bcd3e4d4c9f6914720ecb67bbdf4f
|
[
"Unlicense"
] | null | null | null |
datafiles/__init__.py
|
sjml/synacor-challenge
|
b965e7c6330bcd3e4d4c9f6914720ecb67bbdf4f
|
[
"Unlicense"
] | null | null | null |
datafiles/__init__.py
|
sjml/synacor-challenge
|
b965e7c6330bcd3e4d4c9f6914720ecb67bbdf4f
|
[
"Unlicense"
] | null | null | null |
def get(name):
reader = __loader__.get_resource_reader(__name__)
return reader.open_resource(name)
| 21.6
| 53
| 0.759259
|
be56834f99d37619308cab91f487b06d6dd4c6a8
| 1,202
|
py
|
Python
|
lib/ansiblelint/rules/SudoRule.py
|
eahlstrom/ansible-lint
|
abc946f378a3690a4cb06eb9a7e9d60fb46fb35d
|
[
"MIT"
] | 1
|
2020-11-09T09:32:08.000Z
|
2020-11-09T09:32:08.000Z
|
lib/ansiblelint/rules/SudoRule.py
|
eahlstrom/ansible-lint
|
abc946f378a3690a4cb06eb9a7e9d60fb46fb35d
|
[
"MIT"
] | 5
|
2020-02-26T20:10:50.000Z
|
2021-09-23T23:23:18.000Z
|
lib/ansiblelint/rules/SudoRule.py
|
eahlstrom/ansible-lint
|
abc946f378a3690a4cb06eb9a7e9d60fb46fb35d
|
[
"MIT"
] | 2
|
2019-01-18T07:15:27.000Z
|
2019-01-18T08:04:45.000Z
|
from ansiblelint import AnsibleLintRule
class SudoRule(AnsibleLintRule):
id = '103'
shortdesc = 'Deprecated sudo'
description = 'Instead of ``sudo``/``sudo_user``, use ``become``/``become_user``.'
severity = 'VERY_HIGH'
tags = ['deprecated', 'ANSIBLE0008']
version_added = 'historic'
def _check_value(self, play_frag):
results = []
if isinstance(play_frag, dict):
if 'sudo' in play_frag:
results.append(({'sudo': play_frag['sudo']},
'Deprecated sudo feature'))
if 'sudo_user' in play_frag:
results.append(({'sudo_user': play_frag['sudo_user']},
'Deprecated sudo_user feature'))
if 'tasks' in play_frag:
output = self._check_value(play_frag['tasks'])
if output:
results += output
if isinstance(play_frag, list):
for item in play_frag:
output = self._check_value(item)
if output:
results += output
return results
def matchplay(self, file, play):
return self._check_value(play)
| 32.486486
| 86
| 0.542429
|
3815cd4e4f4b3c1afe5db340336f88e3d75868cc
| 2,250
|
py
|
Python
|
tx_salaries/utils/transformers/allen_isd.py
|
texastribune/tx_salaries
|
197d8da4e1783216830b8d0a5adb23c0200fd3e8
|
[
"Apache-2.0"
] | 6
|
2016-05-18T05:53:44.000Z
|
2019-06-13T18:27:50.000Z
|
tx_salaries/utils/transformers/allen_isd.py
|
texastribune/tx_salaries
|
197d8da4e1783216830b8d0a5adb23c0200fd3e8
|
[
"Apache-2.0"
] | 64
|
2015-02-13T18:29:04.000Z
|
2018-06-15T19:48:56.000Z
|
tx_salaries/utils/transformers/allen_isd.py
|
texastribune/tx_salaries
|
197d8da4e1783216830b8d0a5adb23c0200fd3e8
|
[
"Apache-2.0"
] | 2
|
2015-05-08T19:22:12.000Z
|
2016-07-11T16:57:49.000Z
|
from . import base
from . import mixins
from .. import cleaver
from datetime import date
import string
class TransformedRecord(
mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'full_name': 'Full Name',
'department': 'Position Building Desc',
'job_title': 'Position Group Desc',
'hire_date': 'Hire Date',
'compensation': 'Position Contract Amt',
'gender': 'Gender',
'race': 'Race Desc',
'employee_type': 'Position FTE'
}
ORGANIZATION_NAME = 'Allen ISD'
ORGANIZATION_CLASSIFICATION = 'School District'
compensation_type = 'FT'
description = 'Position contract amount'
DATE_PROVIDED = date(2018, 5, 1)
URL = ('http://raw.texastribune.org.s3.amazonaws.com/'
'allen_isd/salaries/2018-04/request.xlsx')
gender_map = {'Female': 'F', 'Male': 'M'}
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.full_name.strip() != ''
@property
def hire_date(self):
raw_date = self.get_mapped_value('hire_date')
return '-'.join([raw_date[-4:], raw_date[:2], raw_date[3:5]])
@property
def compensation(self):
return self.get_mapped_value('compensation').replace(',', '')
@property
def compensation_type(self):
employee_type = self.get_mapped_value('employee_type')
if float(employee_type) >= 1:
return 'FT'
return 'PT'
@property
def person(self):
name = self.get_name()
r = {
'family_name': name.last,
'given_name': name.first,
'additional_name': name.middle,
'name': unicode(name),
'gender': self.gender_map[self.gender.strip()]
}
return r
def get_name(self):
return cleaver.EmployeeNameCleaver(
self.get_mapped_value('full_name')).parse()
transform = base.transform_factory(TransformedRecord)
| 27.108434
| 71
| 0.632889
|
0725e45152165b81bc0c10ae77eedb66090b61e1
| 574
|
py
|
Python
|
examples/full-stack-example/api/todo-python-django/todos/migrations/__init__.py
|
riddopic/opta
|
25fa6435fdc7e2ea9c7963ed74100fffb0743063
|
[
"Apache-2.0"
] | null | null | null |
examples/full-stack-example/api/todo-python-django/todos/migrations/__init__.py
|
riddopic/opta
|
25fa6435fdc7e2ea9c7963ed74100fffb0743063
|
[
"Apache-2.0"
] | null | null | null |
examples/full-stack-example/api/todo-python-django/todos/migrations/__init__.py
|
riddopic/opta
|
25fa6435fdc7e2ea9c7963ed74100fffb0743063
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 BigBitBus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 41
| 74
| 0.763066
|
1f4418c728158668491893f34af02eedbdfbf231
| 1,558
|
py
|
Python
|
megago/precompute_frequency_counts.py
|
MEGA-GO/Mega-GO
|
8921c1b220d24c53947c81b3c16b835f3e31f685
|
[
"MIT"
] | 4
|
2021-03-05T14:56:38.000Z
|
2021-08-19T18:16:10.000Z
|
megago/precompute_frequency_counts.py
|
MEGA-GO/Mega-GO
|
8921c1b220d24c53947c81b3c16b835f3e31f685
|
[
"MIT"
] | 14
|
2020-01-29T16:46:59.000Z
|
2020-06-12T10:11:25.000Z
|
megago/precompute_frequency_counts.py
|
MEGA-GO/Mega-Go
|
8921c1b220d24c53947c81b3c16b835f3e31f685
|
[
"MIT"
] | 3
|
2021-03-09T11:01:31.000Z
|
2022-01-13T10:53:42.000Z
|
import json
import os
from goatools.anno.idtogos_reader import IdToGosReader
from goatools.obo_parser import GODag
from goatools.semantic import TermCounts
from progress.bar import IncrementalBar
from .constants import FREQUENCY_COUNTS_FILE_PATH, UNIPROT_ASSOCIATIONS_FILE_PATH, GO_DAG_FILE_PATH
def _precompute_term_frequencies():
print("Start precomputations of term frequencies...")
go_freq_dict = dict()
go_dag = GODag(GO_DAG_FILE_PATH, prt=open(os.devnull, 'w'))
associations = IdToGosReader(UNIPROT_ASSOCIATIONS_FILE_PATH, godag=go_dag).get_id2gos('all')
term_counts = TermCounts(go_dag, associations)
for i in go_dag.values():
go_freq_dict[i.id] = term_counts.get_count(i.id)
for alt_id in i.alt_ids:
go_freq_dict[alt_id] = term_counts.get_count(i.id)
# write frequency dict to JSON file
with open(FREQUENCY_COUNTS_FILE_PATH, 'w') as json_file:
json.dump(go_freq_dict, json_file)
def get_frequency_counts():
""" This function precomputes the term frequency counts if these are outdated or not present. If they are present and
valid, it will directly return the frequency counts.
Returns
-------
A dictionary that maps each GO-term onto it's frequency counts.
"""
if not os.path.isfile(FREQUENCY_COUNTS_FILE_PATH):
_precompute_term_frequencies()
frequency_dict = json.load(open(FREQUENCY_COUNTS_FILE_PATH))
return frequency_dict
if __name__ == "__main__":
get_frequency_counts()
| 33.869565
| 122
| 0.725931
|
168c853fd2396c97b06e241eef5977c336152d83
| 781
|
py
|
Python
|
alembic/versions/20220322-2253_.py
|
marutoraman/fastapi-awesome-template
|
65613e2227e02633491f582625d9611af9235975
|
[
"MIT"
] | null | null | null |
alembic/versions/20220322-2253_.py
|
marutoraman/fastapi-awesome-template
|
65613e2227e02633491f582625d9611af9235975
|
[
"MIT"
] | null | null | null |
alembic/versions/20220322-2253_.py
|
marutoraman/fastapi-awesome-template
|
65613e2227e02633491f582625d9611af9235975
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 6d4151b10c3e
Revises: 5d2779598260
Create Date: 2022-03-22 22:53:12.719557
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "6d4151b10c3e"
down_revision = "5d2779598260"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("jobs", sa.Column("deleted_at", sa.DateTime(), nullable=True))
op.add_column("users", sa.Column("deleted_at", sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("users", "deleted_at")
op.drop_column("jobs", "deleted_at")
# ### end Alembic commands ###
| 25.193548
| 81
| 0.691421
|
ab6b94ebba07c371af3a43adf5033efc0ed41135
| 8,089
|
py
|
Python
|
models/unet3d.py
|
DSciLab/TDDA
|
193e5dc26c5c0b291c6323e2607a6eb77b2e067e
|
[
"MIT"
] | null | null | null |
models/unet3d.py
|
DSciLab/TDDA
|
193e5dc26c5c0b291c6323e2607a6eb77b2e067e
|
[
"MIT"
] | null | null | null |
models/unet3d.py
|
DSciLab/TDDA
|
193e5dc26c5c0b291c6323e2607a6eb77b2e067e
|
[
"MIT"
] | null | null | null |
import importlib
import torch.nn as nn
from .unet3d_part import DoubleConv, ExtResNetBlock, create_encoders, \
create_decoders
from .utils import number_of_features_per_level
class Abstract3DUNet(nn.Module):
"""
Base class for standard and residual UNet.
Args:
in_channels (int): number of input channels
out_channels (int): number of output segmentation masks;
Note that that the of out_channels might correspond to either
different semantic classes or to different binary segmentation mask.
It's up to the user of the class to interpret the out_channels and
use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class)
or BCEWithLogitsLoss (two-class) respectively)
f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number
of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4
final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the
final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used
to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model.
basic_module: basic model for the encoder/decoder (DoubleConv, ExtResNetBlock, ....)
layer_order (string): determines the order of layers
in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d.
See `SingleConv` for more info
num_groups (int): number of groups for the GroupNorm
num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int)
is_segmentation (bool): if True (semantic segmentation problem) Sigmoid/Softmax normalization is applied
after the final convolution; if False (regression problem) the normalization layer is skipped at the end
testing (bool): if True (testing mode) the `final_activation` (if present, i.e. `is_segmentation=true`)
will be applied as the last operation during the forward pass; if False the model is in training mode
and the `final_activation` (even if present) won't be applied; default: False
conv_kernel_size (int or tuple): size of the convolving kernel in the basic_module
pool_kernel_size (int or tuple): the size of the window
conv_padding (int or tuple): add zero-padding added to all three sides of the input
"""
def __init__(self, in_channels, out_channels, final_sigmoid, basic_module, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=4, is_segmentation=True, testing=False,
conv_kernel_size=3, pool_kernel_size=2, conv_padding=1, **kwargs):
super(Abstract3DUNet, self).__init__()
self.testing = testing
if isinstance(f_maps, int):
f_maps = number_of_features_per_level(f_maps, num_levels=num_levels)
assert isinstance(f_maps, list) or isinstance(f_maps, tuple)
assert len(f_maps) > 1, "Required at least 2 levels in the U-Net"
# create encoder path
self.encoders = create_encoders(in_channels, f_maps, basic_module, conv_kernel_size, conv_padding, layer_order,
num_groups, pool_kernel_size)
# create decoder path
self.decoders = create_decoders(f_maps, basic_module, conv_kernel_size, conv_padding, layer_order, num_groups,
upsample=True)
# in the last layer a 1×1 convolution reduces the number of output
# channels to the number of labels
self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1)
if is_segmentation:
# semantic segmentation problem
if final_sigmoid:
self.final_activation = nn.Sigmoid()
else:
self.final_activation = nn.Softmax(dim=1)
else:
# regression problem
self.final_activation = None
def forward(self, x):
# encoder part
encoders_features = []
for encoder in self.encoders:
x = encoder(x)
# reverse the encoder outputs to be aligned with the decoder
encoders_features.insert(0, x)
# remove the last encoder's output from the list
# !!remember: it's the 1st in the list
encoders_features = encoders_features[1:]
# decoder part
for decoder, encoder_features in zip(self.decoders, encoders_features):
# pass the output from the corresponding encoder and the output
# of the previous decoder
x = decoder(encoder_features, x)
x = self.final_conv(x)
# apply final_activation (i.e. Sigmoid or Softmax) only during prediction. During training the network outputs
# logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric
if self.testing and self.final_activation is not None:
x = self.final_activation(x)
return x
class UNet3D(Abstract3DUNet):
"""
3DUnet model from
`"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation"
<https://arxiv.org/pdf/1606.06650.pdf>`.
Uses `DoubleConv` as a basic_module and nearest neighbor upsampling in the decoder
"""
# def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
# num_groups=8, num_levels=4, is_segmentation=True, conv_padding=1, **kwargs):
def __init__(self, opt):
super(UNet3D, self).__init__(in_channels=opt.image_chan,
out_channels=opt.num_classes,
final_sigmoid=opt.get('final_sigmoid', True),
basic_module=DoubleConv,
f_maps=opt.get('f_maps', 64),
layer_order=opt.get('layer_order', 'gcr'),
num_groups=opt.get('num_groups', 8),
num_levels=opt.get('num_levels', 4),
pool_kernel_size=tuple(opt.get('pool_kernel_size', [2, 2, 2])),
is_segmentation=opt.get('is_segmentation', True),
conv_padding=opt.get('conv_padding', 1))
class ResidualUNet3D(Abstract3DUNet):
"""
Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf.
Uses ExtResNetBlock as a basic building block, summation joining instead
of concatenation joining and transposed convolutions for upsampling (watch out for block artifacts).
Since the model effectively becomes a residual net, in theory it allows for deeper UNet.
"""
# def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
# num_groups=8, num_levels=5, is_segmentation=True, conv_padding=1, **kwargs):
def __init__(self, opt):
super(ResidualUNet3D, self).__init__(in_channels=opt.in_channels,
out_channels=opt.num_classes,
final_sigmoid=opt.get('final_sigmoid', True),
basic_module=ExtResNetBlock,
f_maps=opt.get('f_maps', 64),
layer_order=opt.get('layer_order', 'gcr'),
num_groups=opt.get('num_groups', 8),
num_levels=opt.get('num_levels', 5),
pool_kernel_size=tuple(opt.get('pool_kernel_size', [2, 2, 2])),
is_segmentation=opt.get('is_segmentation', True),
conv_padding=opt.get('conv_padding', 1))
| 52.869281
| 123
| 0.617629
|
8575961db0065271f1df6098396d77a99295fcb5
| 2,981
|
py
|
Python
|
selfdrive/sentry.py
|
powerscv1/openpilot
|
7290f5886db367a68c18ff64fad660b3a593bd96
|
[
"MIT"
] | 2
|
2020-10-20T12:44:16.000Z
|
2020-11-17T16:21:54.000Z
|
selfdrive/sentry.py
|
powerscv1/openpilot
|
7290f5886db367a68c18ff64fad660b3a593bd96
|
[
"MIT"
] | 1
|
2022-02-13T07:28:46.000Z
|
2022-02-13T07:40:59.000Z
|
selfdrive/sentry.py
|
powerscv1/openpilot
|
7290f5886db367a68c18ff64fad660b3a593bd96
|
[
"MIT"
] | 1
|
2022-02-27T06:04:07.000Z
|
2022-02-27T06:04:07.000Z
|
"""Install exception handler for process crash."""
import os
import traceback
import sentry_sdk
from enum import Enum
from sentry_sdk.integrations.threading import ThreadingIntegration
from common.params import Params
from selfdrive.athena.registration import is_registered_device
from selfdrive.hardware import HARDWARE, PC
from selfdrive.swaglog import cloudlog
from selfdrive.version import get_branch, get_commit, get_origin, get_version, \
is_comma_remote, is_dirty, is_tested_branch
class SentryProject(Enum):
# python project
SELFDRIVE = "https://6f3c7076c1e14b2aa10f5dde6dda0cc4@o33823.ingest.sentry.io/77924"
# native project
SELFDRIVE_NATIVE = "https://3e4b586ed21a4479ad5d85083b639bc6@o33823.ingest.sentry.io/157615"
def report_tombstone(fn: str, message: str, contents: str) -> None:
cloudlog.error({'tombstone': message})
with sentry_sdk.configure_scope() as scope:
scope.set_extra("tombstone_fn", fn)
scope.set_extra("tombstone", contents)
sentry_sdk.capture_message(message=message)
sentry_sdk.flush()
def capture_exception(*args, **kwargs) -> None:
# opkr
save_exception(traceback.format_exc())
cloudlog.error("crash", exc_info=kwargs.get('exc_info', 1))
try:
sentry_sdk.capture_exception(*args, **kwargs)
sentry_sdk.flush() # https://github.com/getsentry/sentry-python/issues/291
except Exception:
cloudlog.exception("sentry exception")
def set_tag(key: str, value: str) -> None:
sentry_sdk.set_tag(key, value)
# opkr
def save_exception(exc_text):
if not ("athenad.py" in exc_text or "mapd.py" in exc_text): # ignore athenad.py or mapd.py error
if not os.path.exists('/data/log'):
os.makedirs('/data/log')
log_file = '/data/log/error.txt'
with open(log_file, 'w') as f:
f.write(exc_text)
f.close()
def init(project: SentryProject) -> None:
# forks like to mess with this, so double check
comma_remote = is_comma_remote() and "commaai" in get_origin(default="")
if not comma_remote or not is_registered_device() or PC:
return
env = "release" if is_tested_branch() else "master"
dongle_id = Params().get("DongleId", encoding='utf-8')
integrations = []
if project == SentryProject.SELFDRIVE:
integrations.append(ThreadingIntegration(propagate_hub=True))
else:
sentry_sdk.utils.MAX_STRING_LENGTH = 8192
sentry_sdk.init(project.value,
default_integrations=False,
release=get_version(),
integrations=integrations,
traces_sample_rate=1.0,
environment=env)
sentry_sdk.set_user({"id": dongle_id})
sentry_sdk.set_tag("dirty", is_dirty())
sentry_sdk.set_tag("origin", get_origin())
sentry_sdk.set_tag("branch", get_branch())
sentry_sdk.set_tag("commit", get_commit())
sentry_sdk.set_tag("device", HARDWARE.get_device_type())
if project == SentryProject.SELFDRIVE:
sentry_sdk.Hub.current.start_session()
| 33.122222
| 98
| 0.719222
|
5266bd11e7dd8915232c19158100191b25f30ae6
| 797
|
py
|
Python
|
Django-ArquivosEstaticos/Static/urls.py
|
DjCod3r/PythonScripts
|
95e70ebb81d2bc37b0283daff8ee723c5d2a382c
|
[
"MIT"
] | null | null | null |
Django-ArquivosEstaticos/Static/urls.py
|
DjCod3r/PythonScripts
|
95e70ebb81d2bc37b0283daff8ee723c5d2a382c
|
[
"MIT"
] | null | null | null |
Django-ArquivosEstaticos/Static/urls.py
|
DjCod3r/PythonScripts
|
95e70ebb81d2bc37b0283daff8ee723c5d2a382c
|
[
"MIT"
] | null | null | null |
"""Static URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path , include
urlpatterns = [
path('', include('paginas.urls')),
path('admin/', admin.site.urls),
]
| 34.652174
| 77
| 0.70138
|
c750776ec9ca0fe9993a493b0650285a6b3f1ec3
| 3,742
|
py
|
Python
|
plugins/action/sg_to_vn_to_vlan_info.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
plugins/action/sg_to_vn_to_vlan_info.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
plugins/action/sg_to_vn_to_vlan_info.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.ise.plugins.module_utils.ise import (
ISESDK,
ise_argument_spec,
)
# Get common arguements specification
argument_spec = ise_argument_spec()
# Add arguments specific for this module
argument_spec.update(dict(
id=dict(type="str"),
page=dict(type="int"),
size=dict(type="int"),
filter=dict(type="list"),
filterType=dict(type="str"),
))
required_if = []
required_one_of = []
mutually_exclusive = []
required_together = []
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = True
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def get_object(self, params):
new_object = dict(
id=params.get("id"),
page=params.get("page"),
size=params.get("size"),
filter=params.get("filter"),
filter_type=params.get("filterType"),
)
return new_object
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
ise = ISESDK(params=self._task.args)
id = self._task.args.get("id")
name = self._task.args.get("name")
if id:
response = ise.exec(
family="security_group_to_virtual_network",
function='get_security_groups_to_vn_to_vlan_by_id',
params=self.get_object(self._task.args)
).response['SgtVNVlanContainer']
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
if not name and not id:
response = []
generator = ise.exec(
family="security_group_to_virtual_network",
function='get_security_groups_to_vn_to_vlan_generator',
params=self.get_object(self._task.args),
)
for item in generator:
tmp_response = item.response['SearchResult']['resources']
if isinstance(tmp_response, list):
response += tmp_response
else:
response.append(tmp_response)
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
| 35.301887
| 128
| 0.632015
|
c961983efd65b76f091dd18dbab2af3269c7b66a
| 8,359
|
py
|
Python
|
decoder.py
|
PRRAG/DeepSpeech-PyTorch
|
cc65e008dacce794309b55bc4d3d75e909731b56
|
[
"MIT"
] | 1
|
2020-05-31T13:41:44.000Z
|
2020-05-31T13:41:44.000Z
|
decoder.py
|
PRRAG/DeepSpeech-PyTorch
|
cc65e008dacce794309b55bc4d3d75e909731b56
|
[
"MIT"
] | null | null | null |
decoder.py
|
PRRAG/DeepSpeech-PyTorch
|
cc65e008dacce794309b55bc4d3d75e909731b56
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# Modified to support pytorch Tensors
"""
This file contains different decoder classes for decoding the output after training
"""
import Levenshtein as Lev
import torch
from six.moves import xrange
class Decoder(object):
"""
Basic decoder class from which all other decoders inherit. Implements several
helper functions. Subclasses should implement the decode() method.
Arguments:
labels (string): mapping from integers to characters.
blank_index (int, optional): index for the blank '_' character. Defaults to 0.
space_index (int, optional): index for the space ' ' character. Defaults to 28.
"""
def __init__(self, labels, blank_index=0):
# e.g. labels = "_'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs' '#"
self.labels = labels
self.int_to_char = dict([(i, c) for (i, c) in enumerate(labels)])
self.blank_index = blank_index
space_index = len(labels) # To prevent errors in decode, we add an out of bounds index for the space
if ' ' in labels:
space_index = labels.index(' ')
self.space_index = space_index
def wer(self, s1, s2):
"""
Computes the Word Error Rate, defined as the edit distance between the
two provided sentences after tokenizing to words.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
# build mapping of words to integers
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
# map the words to a char array (Levenshtein packages only accepts
# strings)
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2))
def cer(self, s1, s2):
"""
Computes the Character Error Rate, defined as the edit distance.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
s1, s2, = s1.replace(' ', ''), s2.replace(' ', '')
return Lev.distance(s1, s2)
def decode(self, probs, sizes=None):
"""
Given a matrix of character probabilities, returns the decoder's
best guess of the transcription
Arguments:
probs: Tensor of character probabilities, where probs[c,t]
is the probability of character c at time t
sizes(optional): Size of each sequence in the mini-batch
Returns:
string: sequence of the model's best guess for the transcription
"""
raise NotImplementedError
class BeamCTCDecoder(Decoder):
def __init__(self, labels, lm_path=None, alpha=0, beta=0, cutoff_top_n=40, cutoff_prob=1.0, beam_width=100,
num_processes=4, blank_index=0):
super(BeamCTCDecoder, self).__init__(labels)
try:
from ctcdecode import CTCBeamDecoder
except ImportError:
raise ImportError("BeamCTCDecoder requires paddledecoder package.")
self._decoder = CTCBeamDecoder(labels, lm_path, alpha, beta, cutoff_top_n, cutoff_prob, beam_width,
num_processes, blank_index)
## convert output integer to corresponding strings
def convert_to_strings(self, out, seq_len):
results = []
for b, batch in enumerate(out):
utterances = []
for p, utt in enumerate(batch):
size = seq_len[b][p]
if size > 0:
transcript = ''.join(map(lambda x: self.int_to_char[x.item()], utt[0:size]))
else:
transcript = ''
utterances.append(transcript)
results.append(utterances)
return results
def convert_tensor(self, offsets, sizes):
results = []
for b, batch in enumerate(offsets):
utterances = []
for p, utt in enumerate(batch):
size = sizes[b][p]
if sizes[b][p] > 0:
utterances.append(utt[0:size])
else:
utterances.append(torch.tensor([], dtype=torch.int))
results.append(utterances)
return results
def decode(self, probs, sizes=None):
"""
Decodes probability output using ctcdecode package.
Arguments:
probs: Tensor of character probabilities, where probs[c,t]
is the probability of character c at time t
sizes: Size of each sequence in the mini-batch
Returns:
string: sequences of the model's best guess for the transcription
"""
probs = probs.cpu()
out, scores, offsets, seq_lens = self._decoder.decode(probs, sizes)
strings = self.convert_to_strings(out, seq_lens)
offsets = self.convert_tensor(offsets, seq_lens)
return strings, offsets
class GreedyDecoder(Decoder):
def __init__(self, labels, blank_index=0):
super(GreedyDecoder, self).__init__(labels, blank_index)
"""
This decoder takes the highest-likelihood output label
"""
def convert_to_strings(self, sequences, sizes=None, remove_repetitions=False, return_offsets=False):
"""Given a list of numeric sequences, returns the corresponding strings"""
strings = []
offsets = [] if return_offsets else None
for x in xrange(len(sequences)):
seq_len = sizes[x] if sizes is not None else len(sequences[x])
string, string_offsets = self.process_string(sequences[x], seq_len, remove_repetitions)
strings.append([string]) # We only return one path
if return_offsets:
offsets.append([string_offsets])
if return_offsets:
return strings, offsets
else:
return strings
def process_string(self,sequence, size, remove_repetitions=False):
string = ''
offsets = []
for i in range(size):
char = self.int_to_char[sequence[i].item()]
if char != self.int_to_char[self.blank_index]:
# if this char is a repetition and remove_repetitions=true, then skip
if remove_repetitions and i != 0 and char == self.int_to_char[sequence[i - 1].item()]:
pass
elif char == self.labels[self.space_index]:
string += ' '
offsets.append(i)
else:
string = string + char
offsets.append(i)
return string, torch.tensor(offsets, dtype=torch.int)
def decode(self, probs, sizes=None):
"""
Returns the argmax decoding given the probability matrix. Removes
repeated elements in the sequence, as well as blanks.
Arguments:
probs: Tensor of character probabilities from the network. Expected shape of batch x seq_length x output_dim
sizes(optional): Size of each sequence in the mini-batch
Returns:
strings: sequences of the model's best guess for the transcription on inputs
offsets: time step per character predicted
"""
_, max_probs = torch.max(probs, 2)
strings, offsets = self.convert_to_strings(max_probs.view(max_probs.size(0), max_probs.size(1)), sizes,
remove_repetitions=True, return_offsets=True)
return strings, offsets
| 40.57767
| 120
| 0.601986
|
f73dc04e73c6d5fd2b38b1c5b4f62630509cf49b
| 7,886
|
py
|
Python
|
tests/serializer/test_dumps.py
|
QueoLda/django-unicorn
|
01573cd65282c467bfb0925542b180ffa9efba05
|
[
"MIT"
] | null | null | null |
tests/serializer/test_dumps.py
|
QueoLda/django-unicorn
|
01573cd65282c467bfb0925542b180ffa9efba05
|
[
"MIT"
] | null | null | null |
tests/serializer/test_dumps.py
|
QueoLda/django-unicorn
|
01573cd65282c467bfb0925542b180ffa9efba05
|
[
"MIT"
] | null | null | null |
import json
from decimal import Decimal
from django.db import models
from django.utils.timezone import now
import pytest
from django_unicorn import serializer
from django_unicorn.utils import dicts_equal
from example.coffee.models import Flavor
class SimpleTestModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
app_label = "tests"
class ComplicatedTestModel(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey("self", blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
app_label = "tests"
def test_int():
expected = '{"name":123}'
actual = serializer.dumps({"name": 123})
assert expected == actual
def test_decimal():
expected = '{"name":"123.1"}'
actual = serializer.dumps({"name": Decimal("123.1")})
assert expected == actual
def test_string():
expected = '{"name":"abc"}'
actual = serializer.dumps({"name": "abc"})
assert expected == actual
def test_list():
expected = '{"name":["abc","def"]}'
actual = serializer.dumps({"name": ["abc", "def",]})
assert expected == actual
def test_simple_model():
simple_test_model = SimpleTestModel(id=1, name="abc")
expected = '{"simple_test_model":{"name":"abc","pk":1}}'
actual = serializer.dumps({"simple_test_model": simple_test_model})
assert expected == actual
def test_model_with_datetime(db):
datetime = now()
flavor = Flavor(name="name1", datetime=datetime)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": datetime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3],
"time": None,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_datetime_as_string(db):
datetime = now().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
flavor = Flavor(name="name1", datetime=datetime)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": datetime,
"time": None,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_time_as_string(db):
time = now().strftime("%H:%M:%S.%f")[:-3]
flavor = Flavor(name="name1", time=time)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": None,
"time": time,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_duration_as_string(db):
duration = "-1 day, 19:00:00"
flavor = Flavor(name="name1", duration=duration)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": "-1 19:00:00",
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_foreign_key():
test_model_one = ComplicatedTestModel(id=1, name="abc")
test_model_two = ComplicatedTestModel(id=2, name="def", parent=test_model_one)
expected = '{"test_model_two":{"name":"def","parent":1,"pk":2}}'
actual = serializer.dumps({"test_model_two": test_model_two})
assert expected == actual
def test_model_foreign_key_recursive_parent():
test_model_one = ComplicatedTestModel(id=1, name="abc")
test_model_two = ComplicatedTestModel(id=2, name="def", parent=test_model_one)
test_model_one.parent = test_model_two
expected = '{"test_model_two":{"name":"def","parent":1,"pk":2}}'
actual = serializer.dumps({"test_model_two": test_model_two})
assert expected == actual
@pytest.mark.django_db
def test_dumps_queryset(db):
flavor_one = Flavor(name="name1", label="label1")
flavor_one.save()
flavor_two = Flavor(name="name2", label="label2", parent=flavor_one)
flavor_two.save()
flavors = Flavor.objects.all()
expected_data = {
"flavors": [
{
"name": "name1",
"label": "label1",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor_one.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
"pk": 1,
},
{
"name": "name2",
"label": "label2",
"parent": 1,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor_two.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
"pk": 2,
},
]
}
actual = serializer.dumps({"flavors": flavors})
assert expected_data == json.loads(actual)
def test_get_model_dict():
flavor_one = Flavor(name="name1", label="label1")
actual = serializer._get_model_dict(flavor_one)
expected = {
"pk": None,
"name": "name1",
"label": "label1",
"parent": None,
"decimal_value": None,
"float_value": None,
"uuid": str(flavor_one.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
}
assert expected == actual
def test_float():
expected = '{"name":"0.0"}'
actual = serializer.dumps({"name": 0.0})
assert expected == actual
def test_dict_float():
expected = '{"name":{"another":"0.0"}}'
actual = serializer.dumps({"name": {"another": 0.0}})
assert expected == actual
def test_list_float():
expected = '{"name":[1,2,"0.0"]}'
actual = serializer.dumps({"name": [1, 2, 0.0]})
assert expected == actual
def test_nested_list_float():
expected = '{"name":{"blob":[1,2,"0.0"]}}'
actual = serializer.dumps({"name": {"blob": [1, 2, 0.0]}})
assert expected == actual
def test_nested_list_float_complicated():
expected = '{"name":{"blob":[1,2,"0.0"]},"more":["1.9",2,5],"another":[{"great":"1.0","ok":["1.6","0.0",4]}]}'
actual = serializer.dumps(
{
"name": {"blob": [1, 2, 0.0]},
"more": [1.9, 2, 5],
"another": [{"great": 1.0, "ok": [1.6, 0.0, 4]}],
}
)
assert expected == actual
def test_nested_list_float_less_complicated():
expected = '{"another":[{"great":"1.0","ok":["1.6","0.0",4]}]}'
actual = serializer.dumps({"another": [{"great": 1.0, "ok": [1.6, 0.0, 4]}],})
assert expected == actual
def test_pydantic():
from pydantic import BaseModel
class Book(BaseModel):
title = "The Grapes of Wrath"
author = "John Steinbeck"
expected = '{"title":"The Grapes of Wrath","author":"John Steinbeck"}'
actual = serializer.dumps(Book())
assert expected == actual
| 25.603896
| 114
| 0.546031
|
641e222b25567c7063f788d0dd05c35211d3af39
| 4,031
|
py
|
Python
|
LogSystem_JE/venv/Lib/site-packages/pygments/lexers/chapel.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
LogSystem_JE/venv/Lib/site-packages/pygments/lexers/chapel.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
LogSystem_JE/venv/Lib/site-packages/pygments/lexers/chapel.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pygments.lexers.chapel
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Chapel language.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ChapelLexer']
class ChapelLexer(RegexLexer):
"""
For `Chapel <https://chapel-lang.org/>`_ source.
.. versionadded:: 2.0
"""
name = 'Chapel'
filenames = ['*.chpl']
aliases = ['chapel', 'chpl']
# mimetypes = ['text/x-chapel']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(config|const|in|inout|out|param|ref|type|var)\b',
Keyword.Declaration),
(r'(false|nil|none|true)\b', Keyword.Constant),
(r'(bool|bytes|complex|imag|int|nothing|opaque|range|real|string|uint|void)\b',
Keyword.Type),
(words((
'align', 'as', 'atomic',
'begin', 'borrowed', 'break', 'by',
'catch', 'cobegin', 'coforall', 'continue',
'defer', 'delete', 'dmapped', 'do', 'domain',
'else', 'enum', 'except', 'export', 'extern',
'for', 'forall', 'forwarding',
'if', 'import', 'index', 'init', 'inline',
'label', 'lambda', 'let', 'lifetime', 'local', 'locale'
'new', 'noinit',
'on', 'only', 'otherwise', 'override', 'owned',
'pragma', 'private', 'prototype', 'public',
'reduce', 'require', 'return',
'scan', 'select', 'serial', 'shared', 'single', 'sparse', 'subdomain', 'sync',
'then', 'this', 'throw', 'throws', 'try',
'unmanaged', 'use',
'when', 'where', 'while', 'with',
'yield',
'zip'), suffix=r'\b'),
Keyword),
(r'(iter)((?:\s)+)', bygroups(Keyword, Text), 'procname'),
(r'(proc)((?:\s)+)', bygroups(Keyword, Text), 'procname'),
(r'(class|module|record|union)(\s+)', bygroups(Keyword, Text),
'classname'),
# imaginary integers
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# reals cannot end with a period due to lexical ambiguity with
# .. operator. See reference for rationale.
(r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
(r'\d+[eE][+-]?[0-9]+i?', Number.Float),
# integer literals
# -- binary
(r'0[bB][01]+', Number.Bin),
# -- hex
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- octal
(r'0[oO][0-7]+', Number.Oct),
# -- decimal
(r'[0-9]+', Number.Integer),
# strings
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
# tokens
(r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
r'<=>|<~>|\.\.|by|#|\.\.\.|'
r'&&|\|\||!|&|\||\^|~|<<|>>|'
r'==|!=|<=|>=|<|>|'
r'[+\-*/%]|\*\*)', Operator),
(r'[:;,.?()\[\]{}]', Punctuation),
# identifiers
(r'[a-zA-Z_][\w$]*', Name.Other),
],
'classname': [
(r'[a-zA-Z_][\w$]*', Name.Class, '#pop'),
],
'procname': [
(r'([a-zA-Z_][.\w$]*|\~[a-zA-Z_][.\w$]*|[+*/!~%<>=&^|\-]{1,2})',
Name.Function, '#pop'),
],
}
| 35.672566
| 95
| 0.392707
|
381b0fed467665516906aae53361958744425a77
| 755
|
py
|
Python
|
video.py
|
jamesGadoury/slack-alert
|
cd690ab9bf294ecbc2c015e4910c2c401aba2869
|
[
"MIT"
] | null | null | null |
video.py
|
jamesGadoury/slack-alert
|
cd690ab9bf294ecbc2c015e4910c2c401aba2869
|
[
"MIT"
] | null | null | null |
video.py
|
jamesGadoury/slack-alert
|
cd690ab9bf294ecbc2c015e4910c2c401aba2869
|
[
"MIT"
] | null | null | null |
import cv2
class VideoCaptureWindow:
QUIT_BUTTON = 'quit'
PAUSE_BUTTON = 'pause'
def __init__(self, name='VideoCapture', width=1280, height=720):
self.name = name
self.width = width
self.height = height
def generate(self):
cv2.namedWindow(self.name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.name, self.width, self.height)
def update_frame(self, frame):
cv2.imshow(self.name, frame)
def handle_button_event(self):
key = cv2.waitKey(1)
if key == ord('q'):
return VideoCaptureWindow.QUIT_BUTTON
if key == ord('p'):
return VideoCaptureWindow.PAUSE_BUTTON
return None
def destroy(self):
cv2.destroyWindow(self.name)
| 27.962963
| 68
| 0.62649
|
c1d80650d5f4bc467983be6252ff40b231731a26
| 3,234
|
py
|
Python
|
tests/operators/ci_gpu/test_fused_bn_reduce.py
|
xqdan/akg
|
e28501611d73d3957a1f3c58eeb6b028f2f2765d
|
[
"Apache-2.0"
] | null | null | null |
tests/operators/ci_gpu/test_fused_bn_reduce.py
|
xqdan/akg
|
e28501611d73d3957a1f3c58eeb6b028f2f2765d
|
[
"Apache-2.0"
] | null | null | null |
tests/operators/ci_gpu/test_fused_bn_reduce.py
|
xqdan/akg
|
e28501611d73d3957a1f3c58eeb6b028f2f2765d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from __future__ import absolute_import
import numpy as np
from gen_random import random_gaussian
from akg.utils import kernel_exec as utils
from akg.utils.result_analysis import gpu_profiling
from akg.utils.format_transform import to_tvm_nd_array
from akg.ops.poly_gpu import fused_bn_reduce_manual, fused_bn_reduce_auto
def compute_fused_bn_reduce(data, layout, out_dtype):
if layout == "NCHW":
data = np.transpose(data, axes=(0, 2, 3, 1))
n, h, w, c = data.shape
inter_dtype = 'float32'
if data.dtype != inter_dtype:
data = data.astype(inter_dtype)
data = np.reshape(data, (n * h * w, c))
output1 = np.sum(data, axis=0)
output1 = output1.astype(out_dtype)
squared = np.multiply(data, data)
output2 = np.sum(squared, axis=0)
output2 = output2.astype(out_dtype)
return [output1, output2]
def gen_data(in_shape, in_dtype, layout, out_dtype):
support_list = {"float16": np.float16, "float32": np.float32}
data = random_gaussian(in_shape, miu=1, sigma=0.1).astype(support_list[in_dtype])
expect = compute_fused_bn_reduce(data, layout, out_dtype)
output = np.full(expect[0].shape, np.nan, out_dtype)
output = [output, output]
return data, output, expect
def test_fused_bn_reduce(in_shape, in_dtype='float16', layout='NHWC', out_dtype='float32', poly_sch=False):
if layout != "NHWC" and layout != "NCHW":
raise NotImplementedError(
'Layout not supported {} '.format(layout))
op_attrs = [layout, out_dtype]
if poly_sch:
mod = utils.op_build_test(
fused_bn_reduce_auto,
[in_shape],
[in_dtype],
kernel_name="fused_bn_reduce_auto",
op_attrs=op_attrs,
attrs={
"target": "cuda",
"enable_akg_reduce_lib": True})
else:
mod = utils.op_build_test(
fused_bn_reduce_manual,
[in_shape],
[in_dtype],
kernel_name="fused_bn_reduce_manual",
op_attrs=op_attrs)
data, outputs, expect = gen_data(in_shape, in_dtype, layout, out_dtype)
inputs = [data]
arglist = inputs + outputs
output = utils.mod_launch(mod, arglist, outputs=tuple(range(-len(outputs), 0)), expect=expect)
res = np.allclose(output, expect, rtol=5e-03, atol=1.e-8)
print("Test {}".format("Pass" if res else "Fail"))
if not res:
print("Error cuda:========================")
print(mod.imported_modules[0].get_source())
raise AssertionError("Test fail")
inputs = to_tvm_nd_array(inputs)
expect = to_tvm_nd_array(expect)
return True
| 35.933333
| 107
| 0.671305
|
0cceb13271728e47715f6a399bb5272e3e451ec4
| 681
|
py
|
Python
|
7-assets/past-student-repos/LambdaSchool-master/m6/63c1/recipe_batches/test_recipe_batches.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m6/63c1/recipe_batches/test_recipe_batches.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m6/63c1/recipe_batches/test_recipe_batches.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
import unittest
from recipe_batches import recipe_batches
class Test(unittest.TestCase):
def test_recipe_batches(self):
self.assertEqual(recipe_batches(
{'milk': 100, 'flour': 4, 'sugar': 10, 'butter': 5},
{ 'milk': 1288, 'flour': 9, 'sugar': 95 }), 0)
self.assertEqual(recipe_batches(
{'milk': 100, 'butter': 50, 'cheese': 10},
{ 'milk': 198, 'butter': 52, 'cheese': 10 }), 1)
self.assertEqual(recipe_batches(
{'milk': 2, 'sugar': 40, 'butter': 20},
{ 'milk': 5, 'sugar': 120, 'butter': 500 }), 2)
self.assertEqual(recipe_batches(
{'milk': 2},
{ 'milk': 200}), 100)
if __name__ == '__main__':
unittest.main()
| 30.954545
| 58
| 0.591777
|
6a9de5eaace353499572daaff22cbef4217c0e9f
| 260
|
py
|
Python
|
MQ/main/urls.py
|
A-a2008/MasterQuiz
|
25c516f4029c5936ffe2e77bf21a63309aaf3cf1
|
[
"MIT"
] | null | null | null |
MQ/main/urls.py
|
A-a2008/MasterQuiz
|
25c516f4029c5936ffe2e77bf21a63309aaf3cf1
|
[
"MIT"
] | null | null | null |
MQ/main/urls.py
|
A-a2008/MasterQuiz
|
25c516f4029c5936ffe2e77bf21a63309aaf3cf1
|
[
"MIT"
] | null | null | null |
from cgi import test
from django.urls import path
from .views import *
create_paper = CreatePaper()
urlpatterns = [
path("", home, name="home"),
path("test/", test1, name="test"),
path("create/", create_paper.create_paper_1, name="view_paper")
]
| 21.666667
| 67
| 0.680769
|
15907712004a58c2de3ee73d4ab51d9121837d9d
| 1,227
|
py
|
Python
|
import.py
|
danrneal/books
|
2f933745aee9194a58c19f44703677e0f9dcea4d
|
[
"MIT"
] | null | null | null |
import.py
|
danrneal/books
|
2f933745aee9194a58c19f44703677e0f9dcea4d
|
[
"MIT"
] | null | null | null |
import.py
|
danrneal/books
|
2f933745aee9194a58c19f44703677e0f9dcea4d
|
[
"MIT"
] | null | null | null |
import csv
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
# Check for environment variable
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
def main():
with open('books.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader)
for isbn, title, author, year in reader:
author_id = db.execute(
"INSERT INTO authors (name) VALUES(:name) ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name RETURNING id", {
"name": author
}
).fetchone()[0]
db.execute(
"INSERT INTO books (isbn, title, author_id, year) VALUES(:isbn, :title, :author_id, :year) ON CONFLICT (isbn) DO NOTHING", {
"isbn": isbn,
"title": title,
"author_id": author_id,
"year": year
}
)
print(f"Added book: {isbn}, {title}, {author}, {year}")
db.commit()
if __name__ == "__main__":
main()
| 30.675
| 140
| 0.568052
|
97f69381dc5f71b32069099a5c9932fdf36f9897
| 264
|
py
|
Python
|
18/pizzeria/pizzerias/urls.py
|
liqiwa/python_work
|
3d1198d5616b28a37fee7dfba5bbef0e1d489c2d
|
[
"Apache-2.0"
] | null | null | null |
18/pizzeria/pizzerias/urls.py
|
liqiwa/python_work
|
3d1198d5616b28a37fee7dfba5bbef0e1d489c2d
|
[
"Apache-2.0"
] | null | null | null |
18/pizzeria/pizzerias/urls.py
|
liqiwa/python_work
|
3d1198d5616b28a37fee7dfba5bbef0e1d489c2d
|
[
"Apache-2.0"
] | null | null | null |
"""定义pizzerias的url模式"""
from . import views
from django.conf.urls import url
urlpatterns = [
#主页
url(r'^$',views.index,name = 'index'),
#显示所有主题
url(r'^pizzas/$',views.pizzas,name = 'pizzas'),
url(r'^pizzas/(?P<pizza_id>\d+)/$',views.pizza,name = 'pizza'),
]
| 22
| 64
| 0.647727
|
882140064ddda301a6892e6f7827b44e46d29a6c
| 6,363
|
py
|
Python
|
NodeGraphQt/base/commands.py
|
wow2006/NodeGraphQt
|
e083c32ece3261a29155ff2cefe538c1131216c1
|
[
"MIT"
] | null | null | null |
NodeGraphQt/base/commands.py
|
wow2006/NodeGraphQt
|
e083c32ece3261a29155ff2cefe538c1131216c1
|
[
"MIT"
] | null | null | null |
NodeGraphQt/base/commands.py
|
wow2006/NodeGraphQt
|
e083c32ece3261a29155ff2cefe538c1131216c1
|
[
"MIT"
] | 1
|
2018-11-05T14:21:21.000Z
|
2018-11-05T14:21:21.000Z
|
#!/usr/bin/python
from PySide2.QtWidgets import QUndoCommand
class PropertyChangedCmd(QUndoCommand):
"""
Node property changed command.
"""
def __init__(self, node, name, value):
QUndoCommand.__init__(self)
self.setText('set {} ({})'.format(name, node.name()))
self.node = node
self.name = name
self.old_val = node.get_property(name)
self.new_val = value
def set_node_prop(self, name, value):
# set model data.
model = self.node.model
if name in model.properties.keys():
setattr(model, name, value)
elif name in model.custom_properties.keys():
model.custom_properties[name] = value
else:
raise KeyError('No property "{}"'.format(name))
# set view data.
view = self.node.view
# view widgets.
if hasattr(view, 'widgets') and name in view.widgets.keys():
view.widgets[name].value = value
# view properties.
if name in view.properties.keys():
setattr(view, name, value)
def undo(self):
if self.old_val != self.new_val:
self.set_node_prop(self.name, self.old_val)
def redo(self):
if self.old_val != self.new_val:
self.set_node_prop(self.name, self.new_val)
class NodeMovedCmd(QUndoCommand):
"""
Node moved command.
"""
def __init__(self, node, pos, prev_pos):
QUndoCommand.__init__(self)
self.node = node
self.pos = pos
self.prev_pos = prev_pos
def undo(self):
self.node.view.pos = self.prev_pos
self.node.model.pos = self.prev_pos
def redo(self):
if self.pos == self.prev_pos:
return
self.node.view.pos = self.pos
self.node.model.pos = self.pos
class NodeAddedCmd(QUndoCommand):
"""
Node added command.
"""
def __init__(self, graph, node, pos=None):
QUndoCommand.__init__(self)
self.setText('added node')
self.graph = graph
self.node = node
self.pos = pos
def undo(self):
self.pos = self.pos or self.node.pos()
self.graph.model.nodes.pop(self.node.id)
self.node.view.delete()
def redo(self):
self.graph.model.nodes[self.node.id] = self.node
self.graph.viewer().add_node(self.node.view, self.pos)
class NodeRemovedCmd(QUndoCommand):
"""
Node deleted command.
"""
def __init__(self, graph, node):
QUndoCommand.__init__(self)
self.setText('deleted node')
self.graph = graph
self.node = node
self.inputs = []
self.outputs = []
if hasattr(self.node, 'inputs'):
input_ports = self.node.inputs().values()
self.inputs = [(p, p.connected_ports()) for p in input_ports]
if hasattr(self.node, 'outputs'):
output_ports = self.node.outputs().values()
self.outputs = [(p, p.connected_ports()) for p in output_ports]
def undo(self):
self.graph.model.nodes[self.node.id] = self.node
self.graph.scene().addItem(self.node.view)
for port, connected_ports in self.inputs:
[port.connect_to(p) for p in connected_ports]
for port, connected_ports in self.outputs:
[port.connect_to(p) for p in connected_ports]
def redo(self):
for port, connected_ports in self.inputs:
[port.disconnect_from(p) for p in connected_ports]
for port, connected_ports in self.outputs:
[port.disconnect_from(p) for p in connected_ports]
self.graph.model.nodes.pop(self.node.id)
self.node.view.delete()
class PortConnectedCmd(QUndoCommand):
"""
Port connected command.
"""
def __init__(self, src_port, trg_port):
QUndoCommand.__init__(self)
self.source = src_port
self.target = trg_port
def undo(self):
src_model = self.source.model
trg_model = self.target.model
src_id = self.source.node().id
trg_id = self.target.node().id
port_names = src_model.connected_ports.get(trg_id)
if port_names is []:
del src_model.connected_ports[trg_id]
if port_names and self.target.name() in port_names:
port_names.remove(self.target.name())
port_names = trg_model.connected_ports.get(src_id)
if port_names is []:
del trg_model.connected_ports[src_id]
if port_names and self.source.name() in port_names:
port_names.remove(self.source.name())
self.source.view.disconnect_from(self.target.view)
def redo(self):
src_model = self.source.model
trg_model = self.target.model
src_id = self.source.node().id
trg_id = self.target.node().id
src_model.connected_ports[trg_id].append(self.target.name())
trg_model.connected_ports[src_id].append(self.source.name())
self.source.view.connect_to(self.target.view)
class PortDisconnectedCmd(QUndoCommand):
"""
Port disconnected command.
"""
def __init__(self, src_port, trg_port):
QUndoCommand.__init__(self)
self.source = src_port
self.target = trg_port
def undo(self):
src_model = self.source.model
trg_model = self.target.model
src_id = self.source.node().id
trg_id = self.target.node().id
src_model.connected_ports[trg_id].append(self.target.name())
trg_model.connected_ports[src_id].append(self.source.name())
self.source.view.connect_to(self.target.view)
def redo(self):
src_model = self.source.model
trg_model = self.target.model
src_id = self.source.node().id
trg_id = self.target.node().id
port_names = src_model.connected_ports.get(trg_id)
if port_names is []:
del src_model.connected_ports[trg_id]
if port_names and self.target.name() in port_names:
port_names.remove(self.target.name())
port_names = trg_model.connected_ports.get(src_id)
if port_names is []:
del trg_model.connected_ports[src_id]
if port_names and self.source.name() in port_names:
port_names.remove(self.source.name())
self.source.view.disconnect_from(self.target.view)
| 30.3
| 75
| 0.616533
|
7d6cb622036128f0eb9aa283a9868191158ee1a1
| 2,918
|
py
|
Python
|
alephclient/tests/test_tasks.py
|
mynameisfiber/alephclient
|
02ca43da406151034ac30433c2fa734933af21f8
|
[
"MIT"
] | null | null | null |
alephclient/tests/test_tasks.py
|
mynameisfiber/alephclient
|
02ca43da406151034ac30433c2fa734933af21f8
|
[
"MIT"
] | null | null | null |
alephclient/tests/test_tasks.py
|
mynameisfiber/alephclient
|
02ca43da406151034ac30433c2fa734933af21f8
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from alephclient.tasks.crawldir import crawl_dir
from alephclient.api import AlephAPI
class TestCrawldir(object):
def setup_method(self):
self.api = AlephAPI(
host="http://aleph.test/api/2/", api_key="fake_key"
)
def test_new_collection(self, mocker):
mocker.patch.object(self.api, "filter_collections", return_value=[])
mocker.patch.object(self.api, "create_collection")
mocker.patch.object(self.api, "update_collection")
mocker.patch.object(self.api, "ingest_upload")
crawl_dir(self.api, "alephclient/tests/testdata", "test153", {})
self.api.create_collection.assert_called_once_with({
'category': 'other',
'foreign_id': 'test153',
'label': 'test153',
'languages': [],
'summary': '',
'casefile': False
})
def test_ingest(self, mocker):
mocker.patch.object(self.api, "ingest_upload",
return_value={"id": 42})
mocker.patch.object(self.api, "load_collection_by_foreign_id",
return_value={"id": 2})
mocker.patch.object(self.api, "update_collection")
crawl_dir(self.api, "alephclient/tests/testdata", "test153", {})
base_path = os.path.abspath("alephclient/tests/testdata")
assert self.api.ingest_upload.call_count == 5
expected_calls = [
mocker.call(
2,
Path(os.path.join(base_path, "feb")),
metadata={
'foreign_id': 'feb',
'file_name': 'feb'
}
),
mocker.call(
2,
Path(os.path.join(base_path, "jan")),
metadata={
'foreign_id': 'jan',
'file_name': 'jan'
}
),
mocker.call(
2,
Path(os.path.join(base_path, "feb/2.txt")),
metadata={
'parent_id': 42,
'foreign_id': 'feb/2.txt',
'file_name': '2.txt'
}
),
mocker.call(
2,
Path(os.path.join(base_path, "jan/week1")),
metadata={
'parent_id': 42,
'foreign_id': 'jan/week1',
'file_name': 'week1'
}
),
mocker.call(
2,
Path(os.path.join(base_path, "jan/week1/1.txt")),
metadata={
'parent_id': 42,
'foreign_id': 'jan/week1/1.txt',
'file_name': '1.txt'
}
),
]
for call in expected_calls:
assert call in self.api.ingest_upload.mock_calls
| 34.329412
| 76
| 0.47464
|
bea7e2d5940a386fd4a6447e41cd156e4ee2d257
| 2,538
|
py
|
Python
|
paddlenlp/datasets/experimental/lcqmc.py
|
Steffy-zxf/PaddleNLP
|
0c9681e88e6e2115ed894876a69c5140ddbbfa29
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/datasets/experimental/lcqmc.py
|
Steffy-zxf/PaddleNLP
|
0c9681e88e6e2115ed894876a69c5140ddbbfa29
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/datasets/experimental/lcqmc.py
|
Steffy-zxf/PaddleNLP
|
0c9681e88e6e2115ed894876a69c5140ddbbfa29
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os
from paddle.dataset.common import md5file
from paddle.utils.download import get_path_from_url
from paddlenlp.utils.env import DATA_HOME
from . import DatasetBuilder
__all__ = ['LCQMC']
class LCQMC(DatasetBuilder):
"""
LCQMC:A Large-scale Chinese Question Matching Corpus
More information please refer to `https://www.aclweb.org/anthology/C18-1166/`
"""
URL = "https://bj.bcebos.com/paddlehub-dataset/lcqmc.tar.gz"
MD5 = "62a7ba36f786a82ae59bbde0b0a9af0c"
META_INFO = collections.namedtuple('META_INFO', ('file', 'md5'))
SPLITS = {
'train': META_INFO(
os.path.join('lcqmc', 'train.tsv'),
'2193c022439b038ac12c0ae918b211a1'),
'dev': META_INFO(
os.path.join('lcqmc', 'dev.tsv'),
'c5dcba253cb4105d914964fd8b3c0e94'),
'test': META_INFO(
os.path.join('lcqmc', 'test.tsv'),
'8f4b71e15e67696cc9e112a459ec42bd'),
}
def _get_data(self, mode, **kwargs):
default_root = DATA_HOME
filename, data_hash = self.SPLITS[mode]
fullname = os.path.join(default_root, filename)
if not os.path.exists(fullname) or (data_hash and
not md5file(fullname) == data_hash):
fullname = os.path.join(default_root, filename)
return fullname
def _read(self, filename):
"""Reads data."""
with open(filename, 'r', encoding='utf-8') as f:
head = None
for line in f:
data = line.strip().split("\t")
if not head:
head = data
else:
query, title, label = data
yield {"query": query, "title": title, "label": label}
def get_labels(self):
"""
Return labels of the LCQMC object.
"""
return ["0", "1"]
| 33.394737
| 81
| 0.621749
|
ed1a6681a34f65cd9cf35dabc263ac11bd167455
| 23,199
|
py
|
Python
|
malandro/gui/assignMentTransferTab.py
|
jorenretel/Malandro
|
d03bd12b894b314a55662b8002c2d6fc396d375a
|
[
"MIT"
] | null | null | null |
malandro/gui/assignMentTransferTab.py
|
jorenretel/Malandro
|
d03bd12b894b314a55662b8002c2d6fc396d375a
|
[
"MIT"
] | null | null | null |
malandro/gui/assignMentTransferTab.py
|
jorenretel/Malandro
|
d03bd12b894b314a55662b8002c2d6fc396d375a
|
[
"MIT"
] | null | null | null |
'''This module contains only one class: the class
describing the tab in the GUI where assignments
can be transferred in bulk to the ccpn analysis
project. A difference is made between two types
of assignments:
1) spin systems to residues, which also
implies resonanceSets to atomSets.
2) resonances to peak dimensions.
The user is able to configure which assignments
should be transferred to the project.
'''
from memops.gui.CheckButton import CheckButton
from memops.gui.Label import Label
from memops.gui.RadioButtons import RadioButtons
from memops.gui.LabelFrame import LabelFrame
from memops.gui.FloatEntry import FloatEntry
from memops.gui.IntEntry import IntEntry
from memops.gui.PulldownList import PulldownList
from memops.gui.ButtonList import ButtonList
from ccpnmr.analysis.core.AssignmentBasic import assignResToDim
from Tkinter import VERTICAL
from malandro.gui.assignmentFunctions import assignSpinSystemstoResidues
class AssignMentTransferTab(object):
'''the tab in the GUI where assignments
can be transferred in bulk to the ccpn analysis
project. A difference is made between two types
of assignments:
1) spin systems to residues, which also
implies resonanceSets to atomSets.
2) resonances to peak dimensions.
The user is able to configure which assignments
should be transferred to the project.
Attributes:
guiParent: gui object this tab is part of.
frame: the frame in which this element lives.
dataModel(src.cython.malandro.DataModel): dataModel
object describing the assignment proposed by
the algorithm.
selectedSolution (int): The index of the solution/run
that is used asa the template to make the assignments.
resonanceToDimension (bool): True if resonances should
be assigned to peak dimensions. False if not.
spinSystemToResidue (bool): True if spin system to
residue assignment should be carried out.
minScore (float): The minimal score of a spin system
assignment to a residue to be allowed
to transfer this assignment to the project
intra (bool): True if intra-residual peaks should be
assigned.
sequential (bool): True if sequential peaks should be
assigned.
noDiagonal (bool): If True, purely diagonal peaks are
ignored during the transfer of assignments.
allSpectra (bool): If True, all spectra will be assigned.
If False, one specified spectrum will be assigned.
spectrum (src.cython.malandro.Spectrum): The spectrum
that should be assigned.
'''
def __init__(self, parent, frame):
'''Init. args: parent: the guiElement that this
tab is part of.
frame: the frame this part of the
GUI lives in.
'''
self.guiParent = parent
self.frame = frame
# Buttons and fields,
# will be set in body():
self.peaksCheckButton = None
self.residuesCheckButton = None
self.intraCheckButton = None
self.sequentialCheckButton = None
self.noDiagonalCheckButton = None
self.spinSystemTypeSelect = None
self.minScoreEntry = None
self.solutionNumberEntry = None
self.spectrumSelect = None
self.spectraPullDown = None
self.assignedResidueStrategySelect = None
self.transferButton = None
# Settings that determine how assignments
# are transferred to the analysis project:
self.minScore = 80.0
self.dataModel = None
self.spectrum = None
self.selectedSolution = 1
self.body()
self.resonanceToDimension = True
self.spinSystemToResidue = True
self.intra = True
self.sequential = True
self.noDiagonal = True
self.allSpectra = True
self.spinSystemType = 0
self.strategy = 0
def body(self):
'''Describes the body of this tab. It consists
out of a number of radio buttons, check buttons
and number entries that allow the user to
indicate which assignments should be transferred.
'''
# self.frame.expandColumn(0)
self.frame.expandGrid(8, 0)
self.frame.expandGrid(8, 1)
typeOfAssignmentFrame = LabelFrame(
self.frame, text='type of assignment')
typeOfAssignmentFrame.grid(row=0, column=0, sticky='nesw')
# typeOfAssignmentFrame.expandGrid(0,5)
peakSelectionFrame = LabelFrame(
self.frame, text='which peaks to assign')
peakSelectionFrame.grid(row=0, column=1, sticky='nesw', rowspan=2)
spinSystemSelectionFrame = LabelFrame(self.frame,
text='Which spin-systems to use')
spinSystemSelectionFrame.grid(row=2, column=0, sticky='nesw')
tipText = 'What to do when a residue has already a spin system assigned to it.'
assignedResidueFrame = LabelFrame(self.frame,
text='if residue already has spin-system',
tipText=tipText)
assignedResidueFrame.grid(row=2, column=1, sticky='nesw')
spectrumSelectionFrame = LabelFrame(self.frame, text='spectra')
spectrumSelectionFrame.grid(row=1, column=0, sticky='nesw')
row = 0
Label(typeOfAssignmentFrame,
text='Resonances to Peak Dimensions',
grid=(row, 0))
self.peaksCheckButton = CheckButton(typeOfAssignmentFrame,
selected=True,
grid=(row, 1))
row += 1
Label(typeOfAssignmentFrame,
text='SpinSystems to Residues',
grid=(row, 0))
self.residuesCheckButton = CheckButton(
typeOfAssignmentFrame, selected=True, grid=(row, 1))
row = 0
Label(peakSelectionFrame, text='Intra-Residual', grid=(row, 0))
self.intraCheckButton = CheckButton(
peakSelectionFrame, selected=True, grid=(row, 1))
row += 1
Label(peakSelectionFrame, text='Sequential', grid=(row, 0))
self.sequentialCheckButton = CheckButton(
peakSelectionFrame, selected=True, grid=(row, 1))
row += 1
Label(peakSelectionFrame,
text='Do not assign diagonal peaks',
grid=(row, 0))
self.noDiagonalCheckButton = CheckButton(
peakSelectionFrame, selected=True, grid=(row, 1))
entries = ['Only assigned spin systems',
'All that have a score of at least: ',
'User Defined',
'Solution number:']
tipTexts = ['Only assign resonances of spin systems that already have a sequential assignment for the assignment of peak dimensions. Spin system to residue assignment is not relevant in this case.',
'Assign all spin systems that have a score of at least a given percentage. 50% or lower is not possible, because than spin systems might have to be assigned to more than 1 residue, which is impossible.',
"As defined in the lower row of buttons in the 'results' tab.",
'One of the single solutions of the annealing.']
self.spinSystemTypeSelect = RadioButtons(spinSystemSelectionFrame,
entries=entries, grid=(0, 0),
select_callback=None,
direction=VERTICAL,
gridSpan=(4, 1),
tipTexts=tipTexts)
tipText = 'The minimal amount of colabelling the different nuclei should have in order to still give rise to a peak.'
self.minScoreEntry = FloatEntry(spinSystemSelectionFrame,
grid=(1, 1), width=7,
text=str(self.minScore),
returnCallback=self.changeMinScore,
tipText=tipText)
self.minScoreEntry.bind('<Leave>', self.changeMinScore, '+')
self.solutionNumberEntry = IntEntry(spinSystemSelectionFrame,
grid=(3, 1), width=7, text=1,
returnCallback=self.solutionUpdate,
tipText=tipText)
self.solutionNumberEntry.bind('<Leave>', self.solutionUpdate, '+')
#self.solutionPullDown = PulldownList(spinSystemSelectionFrame, None, grid=(3,1), sticky='w')
entries = ['all spectra', 'only:']
tipTexts = ['Assign peaks in all the spectra that where selected before the annealing ran.',
'Only assign peaks in one particular spectrum. You can of course repeat this multiple times for different spectra.']
self.spectrumSelect = RadioButtons(spectrumSelectionFrame,
entries=entries,
grid=(0, 0),
select_callback=None,
direction=VERTICAL,
gridSpan=(2, 1), tipTexts=tipTexts)
self.spectraPullDown = PulldownList(spectrumSelectionFrame,
self.changeSpectrum,
grid=(1, 1), sticky='w')
entries = ['skip this residue',
'de-assign old spin system from residue',
'assign, but never merge',
'warn to merge']
tipTexts = ["Don't assign the new spin system to the residue. The residue is not skipped when the old spin system does not contain any resonances",
"De-assign old spin system from residue, unless the old spin system is a spin system without any resonances.",
"Don't merge any spin systems, merging can be performed later if nescesary in the Resonance --> SpinSystems window.",
"Ask to merge individually for each spin system, this might result in clicking on a lot of popups."]
self.assignedResidueStrategySelect = RadioButtons(assignedResidueFrame,
entries=entries,
grid=(0, 0),
select_callback=None,
direction=VERTICAL,
gridSpan=(2, 1),
tipTexts=tipTexts)
texts = ['Transfer Assignments']
commands = [self.transferAssignments]
self.transferButton = ButtonList(
self.frame, commands=commands, texts=texts)
self.transferButton.grid(row=5, column=0, sticky='nsew', columnspan=2)
def update(self):
'''Update the nescesary elements in the
tab. Is called when the algorithm
has produced possible assignments.
The only thing that has to be updated
in practice in this tab is the pulldown
with spectra.
'''
self.dataModel = self.guiParent.connector.results
self.updateSpectra()
def setDataModel(self, dataModel):
'''Here the dataModel, which is the dataModel
containing the suggested assignments body
the algorithm, can be set.
'''
self.dataModel = dataModel
self.update()
def updateSpectra(self, *opt):
'''Updates the spectra shown in the spectra
pulldown. These are only the spectra that
were used by the algorithm. All other spectra
in the project are not relevant since for those
no simulated peaks have been matched to real
peaks.
'''
if not self.dataModel:
return
spectrum = self.spectrum
spectra = self.dataModel.getSpectra()
if spectra:
names = [spectrum.name for spectrum in spectra]
index = 0
if self.spectrum not in spectra:
self.spectrum = spectra[0]
else:
index = spectra.index(self.spectrum)
self.spectraPullDown.setup(names, spectra, index)
def changeSpectrum(self, spectrum):
'''Select a spectum to be assigned.'''
self.spectrum = spectrum
def solutionUpdate(self, event=None, value=None):
'''Select a solution. A solution is a
one to one mapping of spin systems
to residues produced by one run of
the algorithm.
args: event: event object, this is
one of the values the number
entry calls his callback
function with.
value: the index of the solution/run.
'''
if not self.dataModel:
return
Nsolutions = len(self.dataModel.chain.residues[0].solutions)
if value is None:
value = self.solutionNumberEntry.get()
if value == self.selectedSolution:
return
else:
self.selectedSolution = value
if value < 1:
self.solutionNumberEntry.set(1)
self.selectedSolution = 1
elif value > Nsolutions:
self.selectedSolution = Nsolutions
self.solutionNumberEntry.set(self.selectedSolution)
else:
self.solutionNumberEntry.set(self.selectedSolution)
def fetchOptions(self):
'''Fetches user set options from the gui in
one go and stores them in their corresponding
instance variables.
'''
self.resonanceToDimension = self.peaksCheckButton.get()
self.spinSystemToResidue = self.residuesCheckButton.get()
self.intra = self.intraCheckButton.get()
self.sequential = self.sequentialCheckButton.get()
self.noDiagonal = self.noDiagonalCheckButton.get()
self.spinSystemType = self.spinSystemTypeSelect.getIndex()
self.strategy = ['skip', 'remove', 'noMerge', None][
self.assignedResidueStrategySelect.getIndex()]
self.allSpectra = [True, False][self.spectrumSelect.getIndex()]
def changeMinScore(self, event=None):
'''Set the minimal score for which a spin system
to residue assignment gets transferred to the
ccpn analysis project.
'''
newMinScore = self.minScoreEntry.get()
if self.minScore != newMinScore:
if newMinScore <= 50.0:
self.minScore = 51.0
self.minScoreEntry.set(51.0)
elif newMinScore > 100.0:
self.minScore = 100.0
self.minScoreEntry.set(100.0)
else:
self.minScore = newMinScore
def transferAssignments(self):
'''Transfer assignments to project depending
on the settings from the GUI.
'''
self.fetchOptions()
if not self.dataModel or (not self.resonanceToDimension and not self.spinSystemToResidue):
return
strategy = self.strategy
lookupSpinSystem = [self.getAssignedSpinSystem,
self.getBestScoringSpinSystem,
self.getUserDefinedSpinSystem,
self.getSelectedSolutionSpinSystem][self.spinSystemType]
residues = self.dataModel.chain.residues
spinSystemSequence = [lookupSpinSystem(res) for res in residues]
ccpnSpinSystems = []
ccpnResidues = []
# if self.spinSystemType == 0 it means that it for sure already
# assigned like this
if self.spinSystemToResidue and not self.spinSystemType == 0:
for spinSys, res in zip(spinSystemSequence, residues):
if spinSys and res:
ccpnSpinSystems.append(spinSys.getCcpnResonanceGroup())
ccpnResidues.append(res.getCcpnResidue())
assignSpinSystemstoResidues(ccpnSpinSystems,
ccpnResidues,
strategy=strategy,
guiParent=self.guiParent)
if self.resonanceToDimension:
allSpectra = self.allSpectra
if self.intra:
for residue, spinSystem in zip(residues, spinSystemSequence):
if not spinSystem:
continue
intraLink = residue.getIntraLink(spinSystem)
for pl in intraLink.getPeakLinks():
peak = pl.getPeak()
if not allSpectra and peak.getSpectrum() is not self.spectrum:
continue
if not peak:
continue
resonances = pl.getResonances()
if self.noDiagonal and len(set(resonances)) < len(resonances):
continue
for resonance, dimension in zip(resonances, peak.getDimensions()):
ccpnResonance = resonance.getCcpnResonance()
ccpnDimension = dimension.getCcpnDimension()
assignResToDim(ccpnDimension, ccpnResonance)
if self.sequential:
for residue, spinSystemA, spinSystemB in zip(residues,
spinSystemSequence,
spinSystemSequence[1:]):
if not spinSystemA or not spinSystemB:
continue
link = residue.getLink(spinSystemA, spinSystemB)
for pl in link.getPeakLinks():
peak = pl.getPeak()
if not allSpectra and peak.getSpectrum() is not self.spectrum:
continue
if not peak:
continue
resonances = pl.getResonances()
if self.noDiagonal and len(set(resonances)) < len(resonances):
continue
for resonance, dimension in zip(resonances, peak.getDimensions()):
ccpnResonance = resonance.getCcpnResonance()
ccpnDimension = dimension.getCcpnDimension()
assignResToDim(ccpnDimension, ccpnResonance)
self.guiParent.resultsTab.update()
def getAssignedSpinSystem(self, residue):
'''Get the spinSystem that is assigned in the project
to a residue.
args: residue (src.cython.malandro.Residue)
return: spinSystem (src.cython.malandro.SpinSystem)
'''
ccpCode = residue.ccpCode
seqCode = residue.getSeqCode()
spinSystems = self.dataModel.getSpinSystems()[ccpCode]
ccpnResidue = residue.getCcpnResidue()
if ccpnResidue:
assignedResonanceGroups = ccpnResidue.getResonanceGroups()
if len(assignedResonanceGroups) > 1:
print 'There is more than one spin system assigned to residue %s, did not know which one to use to assign peaks. Therefor this residue is skipped.' % (seqCode)
return
assignedResonanceGroup = ccpnResidue.findFirstResonanceGroup()
if assignedResonanceGroup:
for spinSystem in spinSystems:
if spinSystem.getSerial() == assignedResonanceGroup.serial:
# Just checking to make sure, analysis project could
# have changed
if not self.skipResidue(residue, spinSystem):
return spinSystem
def getBestScoringSpinSystem(self, residue):
'''Get the spinSystem that scores the highest,
i.e. is assigned in most of the runs to the
given residue.
args: residue (src.cython.malandro.Residue)
return: spinSystem (src.cython.malandro.SpinSystem)
'''
solutions = residue.solutions
weigth = 1.0 / len(solutions)
score, bestSpinSystem = max([(solutions.count(solution) * weigth * 100.0, solution) for solution in solutions])
if score >= self.minScore and not bestSpinSystem.getIsJoker() and not self.skipResidue(residue, bestSpinSystem):
return bestSpinSystem
return None
def getUserDefinedSpinSystem(self, residue):
'''Get the spinSystem that is defined by the user
(probably in the resultsTab) as the correct
assignment of the given residue.
args: residue (src.cython.malandro.Residue)
return: spinSystem (src.cython.malandro.SpinSystem)
'''
userDefinedSpinSystem = residue.userDefinedSolution
if userDefinedSpinSystem and not userDefinedSpinSystem.getIsJoker() and not self.skipResidue(residue, userDefinedSpinSystem):
return userDefinedSpinSystem
return None
def getSelectedSolutionSpinSystem(self, residue):
'''I a solution corresponding to one specific run
of the algorithm is defined, return which spinSystem
in that run got assigned to the given residue.
args: residue (src.cython.malandro.Residue)
return: spinSystem (src.cython.malandro.SpinSystem)
'''
solutions = residue.solutions
spinSystem = solutions[self.selectedSolution - 1]
if not spinSystem.getIsJoker() and not self.skipResidue(residue, spinSystem):
return spinSystem
return None
def skipResidue(self, residue, spinSystem):
'''One strategy is to skip all residues that
already have a spin system assignment.
If that is the case determine whether to
skip the given residue.
args: residue (src.cython.malandro.Residue)
spinSystem (src.cython.malandro.SpinSystem)
return: boolean, True if residue should be skipped.
'''
if self.strategy == 0:
assignedGroups = residue.getCcpnResidue().getResonanceGroups()
assignedSerials = set([spinSys.serial for spinSys in assignedGroups])
if assignedSerials and spinSystem.getSerial() not in assignedSerials:
return True
return False
| 37.906863
| 223
| 0.575628
|
32015a9e5781c041089fabb183941cb6321fcf06
| 2,013
|
py
|
Python
|
athena/convert_main.py
|
leixiaoning/Athena-Giga
|
d599cee4027126fc4efd27cefd69ce89b77530e0
|
[
"Apache-2.0"
] | null | null | null |
athena/convert_main.py
|
leixiaoning/Athena-Giga
|
d599cee4027126fc4efd27cefd69ce89b77530e0
|
[
"Apache-2.0"
] | null | null | null |
athena/convert_main.py
|
leixiaoning/Athena-Giga
|
d599cee4027126fc4efd27cefd69ce89b77530e0
|
[
"Apache-2.0"
] | 1
|
2021-03-15T08:04:15.000Z
|
2021-03-15T08:04:15.000Z
|
# coding=utf-8
# Copyright (C) ATHENA AUTHORS
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support tensorflow 2.0
# pylint: disable=invalid-name, no-member, redefined-outer-name
""" starting point for conversion of VC models """
import sys
import json
import tensorflow as tf
from absl import logging
from athena import ConvertSolver
from athena.stargan_main import (
parse_config,
build_model_from_jsonfile_stargan,
SUPPORTED_DATASET_BUILDER
)
def convert(jsonfile):
""" entry point for speech conversion, do some preparation work """
p, model, checkpointer = build_model_from_jsonfile_stargan(jsonfile)
checkpointer.restore_from_best()
assert p.testset_config is not None
dataset_builder = SUPPORTED_DATASET_BUILDER[p.dataset_builder](p.testset_config)
solver = ConvertSolver(model, dataset_builder, config=p.convert_config)
solver.convert(dataset_builder.as_dataset(batch_size=1))
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
if len(sys.argv) < 2:
logging.warning('Usage: python {} config_json_file'.format(sys.argv[0]))
sys.exit()
tf.random.set_seed(1)
jsonfile = sys.argv[1]
with open(jsonfile) as file:
config = json.load(file)
p = parse_config(config)
ConvertSolver.initialize_devices(p.solver_gpu)
convert(jsonfile)
| 35.315789
| 85
| 0.695976
|
4ef504d9081e22d2a445b4c0d4a8f86422daef0a
| 1,589
|
py
|
Python
|
tests/lib/pg_tools.py
|
opbro/postgres-elasticsearch-fdw
|
d79155bd275b998b4976878816e5f3b4ed933ea7
|
[
"MIT"
] | 87
|
2016-09-20T19:58:55.000Z
|
2022-03-05T02:50:11.000Z
|
tests/lib/pg_tools.py
|
opbro/postgres-elasticsearch-fdw
|
d79155bd275b998b4976878816e5f3b4ed933ea7
|
[
"MIT"
] | 31
|
2016-09-20T19:19:37.000Z
|
2021-12-22T13:01:54.000Z
|
tests/lib/pg_tools.py
|
opbro/postgres-elasticsearch-fdw
|
d79155bd275b998b4976878816e5f3b4ed933ea7
|
[
"MIT"
] | 22
|
2016-10-03T15:05:07.000Z
|
2022-03-05T02:50:01.000Z
|
""" Commands for interacting with PostgreSQL """
# pylint: disable=broad-except, no-member
from os.path import join
import io
import psycopg2
import sh
from lib.tools import TEST_FOLDER
def pg_is_available():
""" Test if PostgreSQL is running """
try:
return sql("select 1 + 1;")[0][0] == 2
except Exception:
return False
def load_sql_file(filename):
""" Load SQL file into PostgreSQL """
path = join(TEST_FOLDER, "data", filename)
with open(path, "r") as handle:
sh.psql(
"postgres",
"--no-psqlrc",
host="localhost",
port="5432",
username="postgres",
_in=handle,
)
def run_sql_test(filename):
""" Run SQL test file """
path = join(TEST_FOLDER, "test", filename)
with open(path, "r") as handle:
with io.StringIO() as out, io.StringIO() as err:
sh.psql(
"postgres",
"--no-psqlrc",
"--tuples-only",
host="localhost",
port="5432",
username="postgres",
quiet=True,
_in=handle,
_out=out,
_err=err,
)
return out.getvalue().strip(), err.getvalue().strip()
def sql(statement):
""" Execute SQL statement """
with psycopg2.connect(
host="localhost", port=5432, user="postgres", dbname="postgres"
) as conn:
with conn.cursor() as cursor:
cursor.execute(statement)
return cursor.fetchall()
| 23.716418
| 71
| 0.528634
|
67a7e6b794291916ae9a585fedf57d04005bf5c4
| 22,036
|
py
|
Python
|
dependencies/panda/Panda3D-1.10.0-x64/direct/tkwidgets/AppShell.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 3
|
2018-03-09T12:07:29.000Z
|
2021-02-25T06:50:25.000Z
|
direct/src/tkwidgets/AppShell.py
|
Sinkay/panda3d
|
16bfd3750f726a8831771b81649d18d087917fd5
|
[
"PHP-3.01",
"PHP-3.0"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
direct/src/tkwidgets/AppShell.py
|
Sinkay/panda3d
|
16bfd3750f726a8831771b81649d18d087917fd5
|
[
"PHP-3.01",
"PHP-3.0"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
"""
AppShell provides a GUI application framework.
This is an adaption of AppShell.py found in Python and Tkinter Programming
by John E. Grayson which is a streamlined adaptation of GuiAppD.py, originally
created by Doug Hellmann (doughellmann@mindspring.com).
"""
__all__ = ['AppShell']
from direct.showbase.DirectObject import DirectObject
from direct.showbase.TkGlobal import *
from tkFileDialog import *
from Tkinter import *
import Pmw
import Dial
import Floater
import Slider
import EntryScale
import VectorWidgets
import ProgressBar
"""
TO FIX:
Radiobutton ordering change
"""
# Create toplevel widget dictionary
try:
__builtins__["widgetDict"]
except KeyError:
__builtins__["widgetDict"] = {}
# Create toplevel variable dictionary
try:
__builtins__["variableDict"]
except KeyError:
__builtins__["variableDict"] = {}
def resetWidgetDict():
__builtins__["widgetDict"] = {}
def resetVariableDict():
__builtins__["variableDict"] = {}
# Inherit from MegaWidget instead of Toplevel so you can pass in a toplevel
# to use as a container if you wish. If no toplevel passed in, create one
class AppShell(Pmw.MegaWidget, DirectObject):
appversion = '1.0'
appname = 'Generic Application Frame'
copyright = ('Copyright 2004 Walt Disney Imagineering.' +
' All Rights Reserved')
contactname = 'Mark R. Mine'
contactphone = '(818) 544-2921'
contactemail = 'Mark.Mine@disney.com'
frameWidth = 450
frameHeight = 320
padx = 5
pady = 5
usecommandarea = 0
usestatusarea = 0
balloonState = 'none'
panelCount = 0
def __init__(self, parent = None, **kw):
optiondefs = (
('title', self.appname, None),
('padx', 1, Pmw.INITOPT),
('pady', 1, Pmw.INITOPT),
('framewidth', self.frameWidth, Pmw.INITOPT),
('frameheight', self.frameHeight, Pmw.INITOPT),
('usecommandarea', self.usecommandarea, Pmw.INITOPT),
('usestatusarea', self.usestatusarea, Pmw.INITOPT),
)
self.defineoptions(kw, optiondefs)
# If no toplevel passed in, create one
if parent == None:
self.parent = Toplevel()
else:
self.parent = parent
# Initialize the base class
Pmw.MegaWidget.__init__(self, self.parent)
# Set window size
self.parent.geometry('%dx%d' % (self.frameWidth, self.frameHeight))
self.parent.title(self['title'])
# Create unique id
AppShell.panelCount += 1
self.id = self.appname + '-' + repr(AppShell.panelCount)
# Create a dictionary in the widgetDict to hold this panel's widgets
self.widgetDict = widgetDict[self.id] = {}
# And one to hold this panel's variables
self.variableDict = variableDict[self.id] = {}
# Get handle to the toplevels hull
self._hull = self.component('hull')
# Initialize the application
self.appInit()
# create the interface
self.__createInterface()
# Set focus to ourselves
self.focus_set()
# initialize our options
self.initialiseoptions(AppShell)
self.pack(fill = BOTH, expand = 1)
def __createInterface(self):
self.__createBalloon()
self.__createMenuBar()
self.__createDataArea()
self.__createCommandArea()
self.__createMessageBar()
self.__createAboutBox()
# Add binding for panel cleanup code
self.interior().bind('<Destroy>', self.onDestroy)
#
# Create the parts of the interface
# which can be modified by subclasses
#
self.createMenuBar()
self.createInterface()
def __createBalloon(self):
# Create the balloon help manager for the frame.
# Create the manager for the balloon help
self.__balloon = self.createcomponent('balloon', (), None,
Pmw.Balloon, (self._hull,))
self.__balloon.configure(state = self.balloonState)
def __createMenuBar(self):
self.menuFrame = Frame(self._hull)
self.menuBar = self.createcomponent('menubar', (), None,
Pmw.MenuBar,
(self.menuFrame,),
hull_relief=FLAT,
hull_borderwidth=0,
balloon=self.balloon())
self.menuBar.addmenu('Help', 'About %s' % self.appname, side = 'right')
self.menuBar.addmenu('File', 'File commands and Quit')
self.menuBar.pack(fill=X, side = LEFT)
# Force some space between pull down menus and other widgets
spacer = Label(self.menuFrame, text = ' ')
spacer.pack(side = LEFT, expand = 0)
self.menuFrame.pack(fill = X)
def __createDataArea(self):
# Create data area where data entry widgets are placed.
self.dataArea = self.createcomponent('dataarea',
(), None,
Frame, (self._hull,),
relief=GROOVE,
bd=1)
self.dataArea.pack(side=TOP, fill=BOTH, expand=YES,
padx=self['padx'], pady=self['pady'])
def __createCommandArea(self):
# Create a command area for application-wide buttons.
self.__commandFrame = self.createcomponent('commandframe', (), None,
Frame,
(self._hull,),
relief=SUNKEN,
bd=1)
self.__buttonBox = self.createcomponent('buttonbox', (), None,
Pmw.ButtonBox,
(self.__commandFrame,),
padx=0, pady=0)
self.__buttonBox.pack(side=TOP, expand=NO, fill=X)
if self['usecommandarea']:
self.__commandFrame.pack(side=TOP,
expand=NO,
fill=X,
padx=self['padx'],
pady=self['pady'])
def __createMessageBar(self):
# Create the message bar area for help and status messages.
frame = self.createcomponent('bottomtray', (), None,
Frame, (self._hull,), relief=SUNKEN)
self.__messageBar = self.createcomponent('messagebar',
(), None,
Pmw.MessageBar,
(frame,),
#entry_width = 40,
entry_relief=SUNKEN,
entry_bd=1,
labelpos=None)
self.__messageBar.pack(side=LEFT, expand=YES, fill=X)
self.__progressBar = ProgressBar.ProgressBar(
frame,
fillColor='slateblue',
doLabel=1,
width=150)
self.__progressBar.frame.pack(side=LEFT, expand=NO, fill=NONE)
self.updateProgress(0)
if self['usestatusarea']:
frame.pack(side=BOTTOM, expand=NO, fill=X)
self.__balloon.configure(statuscommand = \
self.__messageBar.helpmessage)
def __createAboutBox(self):
Pmw.aboutversion(self.appversion)
Pmw.aboutcopyright(self.copyright)
Pmw.aboutcontact(
'For more information, contact:\n %s\n Phone: %s\n Email: %s' %\
(self.contactname, self.contactphone,
self.contactemail))
self.about = Pmw.AboutDialog(self._hull,
applicationname=self.appname)
self.about.withdraw()
def toggleBalloon(self):
if self.toggleBalloonVar.get():
self.__balloon.configure(state = 'both')
else:
self.__balloon.configure(state = 'status')
def showAbout(self):
# Create the dialog to display about and contact information.
self.about.show()
self.about.focus_set()
def quit(self):
self.parent.destroy()
### USER METHODS ###
# To be overridden
def appInit(self):
# Called before interface is created (should be overridden).
pass
def createInterface(self):
# Override this method to create the interface for the app.
pass
def onDestroy(self, event):
# Override this method with actions to be performed on panel shutdown
pass
def createMenuBar(self):
# Creates default menus. Can be overridden or simply augmented
# Using button Add below
self.menuBar.addmenuitem('Help', 'command',
'Get information on application',
label='About...', command=self.showAbout)
self.toggleBalloonVar = IntVar()
if self.balloonState == 'none':
self.toggleBalloonVar.set(0)
else:
self.toggleBalloonVar.set(1)
self.menuBar.addmenuitem('Help', 'checkbutton',
'Toggle balloon help',
label='Balloon help',
variable = self.toggleBalloonVar,
command=self.toggleBalloon)
self.menuBar.addmenuitem('File', 'command', 'Quit this application',
label='Quit',
command=self.quit)
# Getters
def interior(self):
# Retrieve the interior site where widgets should go.
return self.dataArea
def balloon(self):
# Retrieve the panel's balloon widget
return self.__balloon
def buttonBox(self):
# Retrieve the button box.
return self.__buttonBox
def messageBar(self):
# Retieve the message bar
return self.__messageBar
# Utility functions
def buttonAdd(self, buttonName, helpMessage=None,
statusMessage=None, **kw):
# Add a button to the button box.
newBtn = self.__buttonBox.add(buttonName)
newBtn.configure(kw)
if helpMessage:
self.bind(newBtn, helpMessage, statusMessage)
return newBtn
def alignbuttons(self):
""" Make all buttons wide as widest """
self.__buttonBox.alignbuttons()
def bind(self, child, balloonHelpMsg, statusHelpMsg=None):
# Bind a help message and/or status message to a widget.
self.__balloon.bind(child, balloonHelpMsg, statusHelpMsg)
def updateProgress(self, newValue=0, newMax=0):
# Used to update progress bar
self.__progressBar.updateProgress(newValue, newMax)
## WIDGET UTILITY FUNCTIONS ##
def addWidget(self, category, text, widget):
self.widgetDict[category + '-' + text] = widget
def getWidget(self, category, text):
return self.widgetDict.get(category + '-' + text, None)
def addVariable(self, category, text, variable):
self.variableDict[category + '-' + text] = variable
def getVariable(self, category, text):
return self.variableDict.get(category + '-' + text, None)
def createWidget(self, parent, category, text, widgetClass,
help, command, side, fill, expand, kw):
# Update kw to reflect user inputs
kw['text'] = text
# Create widget
widget = apply(widgetClass, (parent,), kw)
# Do this after so command isn't called on widget creation
widget['command'] = command
# Pack widget
widget.pack(side = side, fill = fill, expand = expand)
# Bind help
self.bind(widget, help)
# Record widget
self.addWidget(category, text, widget)
return widget
def newCreateLabeledEntry(self, parent, category, text, help = '',
command = None, value = '',
width = 12, relief = SUNKEN,
side = LEFT, fill = X, expand = 0):
""" createLabeledEntry(parent, category, text, [options]) """
# Create labeled entry
frame = Frame(parent)
variable = StringVar()
variable.set(value)
label = Label(frame, text = text)
label.pack(side = LEFT, fill = X, expand = 0)
entry = Entry(frame, width = width, relief = relief,
textvariable = variable)
entry.pack(side = LEFT, fill = X, expand = 1)
frame.pack(side = side, fill = X, expand = expand)
if command:
entry.bind('<Return>', command)
# Add balloon help
self.bind(label, help)
self.bind(entry, help)
# Record widgets and variable
self.addWidget(category, text, entry)
self.addWidget(category, text + '-Label', label)
self.addVariable(category, text, variable)
return entry
def newCreateButton(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
""" createButton(parent, category, text, [options]) """
# Create the widget
widget = self.createWidget(parent, category, text, Button,
help, command, side, fill, expand, kw)
return widget
def newCreateCheckbutton(self, parent, category, text,
help = '', command = None,
initialState = 0, anchor = W,
side = LEFT, fill = X, expand = 0, **kw):
""" createCheckbutton(parent, category, text, [options]) """
# Create the widget
widget = self.createWidget(parent, category, text, Checkbutton,
help, command, side, fill, expand, kw)
# Perform extra customization
widget['anchor'] = anchor
variable = BooleanVar()
variable.set(initialState)
self.addVariable(category, text, variable)
widget['variable'] = variable
return widget
def newCreateRadiobutton(self, parent, category, text, variable, value,
command = None, help = '', anchor = W,
side = LEFT, fill = X, expand = 0, **kw):
"""
createRadiobutton(parent, category, text, variable, value, [options])
"""
# Create the widget
widget = self.createWidget(parent, category, text, Radiobutton,
help, command, side, fill, expand, kw)
# Perform extra customization
widget['anchor'] = anchor
widget['value'] = value
widget['variable'] = variable
return widget
def newCreateFloater(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
Floater.Floater,
help, command, side, fill, expand, kw)
return widget
def newCreateDial(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
Dial.Dial,
help, command, side, fill, expand, kw)
return widget
def newCreateSider(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
Slider.Slider,
help, command, side, fill, expand, kw)
return widget
def newCreateEntryScale(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
EntryScale.EntryScale,
help, command, side, fill, expand, kw)
return widget
def newCreateVector2Entry(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
VectorWidgets.Vector2Entry,
help, command, side, fill, expand, kw)
def newCreateVector3Entry(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
VectorWidgets.Vector3Entry,
help, command, side, fill, expand, kw)
return widget
def newCreateColorEntry(self, parent, category, text,
help = '', command = None,
side = LEFT, fill = X, expand = 0, **kw):
# Create the widget
widget = self.createWidget(parent, category, text,
VectorWidgets.ColorEntry,
help, command, side, fill, expand, kw)
return widget
def newCreateOptionMenu(self, parent, category, text,
help = '', command = None, items = [],
labelpos = W, label_anchor = W,
label_width = 16, menu_tearoff = 1,
side = LEFT, fill = X, expand = 0, **kw):
# Create variable
variable = StringVar()
if len(items) > 0:
variable.set(items[0])
# Update kw to reflect user inputs
kw['items'] = items
kw['label_text'] = text
kw['labelpos'] = labelpos
kw['label_anchor'] = label_anchor
kw['label_width'] = label_width
kw['menu_tearoff'] = menu_tearoff
kw['menubutton_textvariable'] = variable
# Create widget
widget = apply(Pmw.OptionMenu, (parent,), kw)
# Do this after so command isn't called on widget creation
widget['command'] = command
# Pack widget
widget.pack(side = side, fill = fill, expand = expand)
# Bind help
self.bind(widget.component('menubutton'), help)
# Record widget and variable
self.addWidget(category, text, widget)
self.addVariable(category, text, variable)
return widget
def newCreateComboBox(self, parent, category, text,
help = '', command = None,
items = [], state = DISABLED, history = 0,
labelpos = W, label_anchor = W,
label_width = 16, entry_width = 16,
side = LEFT, fill = X, expand = 0, **kw):
# Update kw to reflect user inputs
kw['label_text'] = text
kw['labelpos'] = labelpos
kw['label_anchor'] = label_anchor
kw['label_width'] = label_width
kw['entry_width'] = entry_width
kw['scrolledlist_items'] = items
kw['entryfield_entry_state'] = state
# Create widget
widget = apply(Pmw.ComboBox, (parent,), kw)
# Bind selection command
widget['selectioncommand'] = command
# Select first item if it exists
if len(items) > 0:
widget.selectitem(items[0])
# Pack widget
widget.pack(side = side, fill = fill, expand = expand)
# Bind help
self.bind(widget, help)
# Record widget
self.addWidget(category, text, widget)
return widget
def transformRGB(self, rgb, max = 1.0):
retval = '#'
for v in [rgb[0], rgb[1], rgb[2]]:
v = (v/max)*255
if v > 255:
v = 255
if v < 0:
v = 0
retval = "%s%02x" % (retval, int(v))
return retval
class TestAppShell(AppShell):
# Override class variables here
appname = 'Test Application Shell'
usecommandarea = 1
usestatusarea = 1
def __init__(self, parent = None, **kw):
# Call superclass initialization function
AppShell.__init__(self)
self.initialiseoptions(TestAppShell)
def createButtons(self):
self.buttonAdd('Ok',
helpMessage='Exit',
statusMessage='Exit',
command=self.quit)
def createMain(self):
self.label = self.createcomponent('label', (), None,
Label,
(self.interior(),),
text='Data Area')
self.label.pack()
self.bind(self.label, 'Space taker')
def createInterface(self):
self.createButtons()
self.createMain()
if __name__ == '__main__':
test = TestAppShell(balloon_state='none')
| 39.279857
| 79
| 0.527274
|
11bfc267931dfd1ff5b4dc7571e2f7bd36ec8b8d
| 3,687
|
py
|
Python
|
senteval/binary.py
|
idavidrein/SentEval
|
fcac5a5c0598f2cad4239cc98ed3415671372b58
|
[
"BSD-3-Clause"
] | null | null | null |
senteval/binary.py
|
idavidrein/SentEval
|
fcac5a5c0598f2cad4239cc98ed3415671372b58
|
[
"BSD-3-Clause"
] | null | null | null |
senteval/binary.py
|
idavidrein/SentEval
|
fcac5a5c0598f2cad4239cc98ed3415671372b58
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Binary classifier and corresponding datasets : MR, CR, SUBJ, MPQA
'''
from __future__ import absolute_import, division, unicode_literals
import io
import logging
import os
import numpy as np
import logging
from tqdm import tqdm
from senteval.tools.validation import InnerKFoldClassifier
class BinaryClassifierEval(object):
def __init__(self, pos, neg, seed=1111):
self.seed = seed
self.samples, self.labels = pos + neg, [1] * len(pos) + [0] * len(neg)
self.n_samples = len(self.samples)
def do_prepare(self, params, prepare):
# prepare is given the whole text
return prepare(params, self.samples)
# prepare puts everything it outputs in "params" : params.word2id etc
# Those output will be further used by "batcher".
def loadFile(self, fpath):
with io.open(fpath, 'r', encoding='latin-1') as f:
return [line.split() for line in f.read().splitlines()]
def run(self, params, batcher):
enc_input = []
# Sort to reduce padding
sorted_corpus = sorted(zip(self.samples, self.labels), key=lambda z: (len(z[0]), z[1]))
sorted_samples = [x for (x, y) in sorted_corpus]
sorted_labels = [y for (x, y) in sorted_corpus]
print('Generating sentence embeddings')
for ii in tqdm(range(0, self.n_samples, params.batch_size)):
batch = sorted_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
enc_input.append(embeddings)
enc_input = np.vstack(enc_input)
print('Generated sentence embeddings')
config = {
'nclasses': 2,
'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid,
'kfold': params.kfold
}
clf = InnerKFoldClassifier(enc_input, np.array(sorted_labels), config)
devacc, testacc = clf.run()
print('Dev acc : {0} Test acc : {1}\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc, 'ndev': self.n_samples, 'ntest': self.n_samples}
class CREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
print('***** Transfer task : CR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'custrev.pos'))
neg = self.loadFile(os.path.join(task_path, 'custrev.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class MREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
print('***** Transfer task : MR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'rt-polarity.pos'))
neg = self.loadFile(os.path.join(task_path, 'rt-polarity.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class SUBJEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
print('***** Transfer task : SUBJ *****\n\n')
obj = self.loadFile(os.path.join(task_path, 'subj.objective'))
subj = self.loadFile(os.path.join(task_path, 'subj.subjective'))
super(self.__class__, self).__init__(obj, subj, seed)
class MPQAEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
print('***** Transfer task : MPQA *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'mpqa.pos'))
neg = self.loadFile(os.path.join(task_path, 'mpqa.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
| 36.147059
| 98
| 0.638459
|
b959e05a9d9c7e07e71fe81f1f4204f27fcf4364
| 13,046
|
py
|
Python
|
Tester/RCube.py
|
AthulJoseph27/Thistlewaites_Algorithm
|
904d0a889d86be6191335aef66f36938a621cff2
|
[
"MIT"
] | null | null | null |
Tester/RCube.py
|
AthulJoseph27/Thistlewaites_Algorithm
|
904d0a889d86be6191335aef66f36938a621cff2
|
[
"MIT"
] | null | null | null |
Tester/RCube.py
|
AthulJoseph27/Thistlewaites_Algorithm
|
904d0a889d86be6191335aef66f36938a621cff2
|
[
"MIT"
] | null | null | null |
import os
import sys
import time
cube = []
def R():
global cube
t = cube[0][5]
cube[0][5] = cube[3][5]
cube[3][5] = cube[6][5]
cube[6][5] = cube[9][5]
cube[9][5] = t
t = cube[2][5]
cube[2][5] = cube[5][5]
cube[5][5] = cube[8][5]
cube[8][5] = cube[11][5]
cube[11][5] = t
t = cube[1][5]
cube[1][5] = cube[4][5]
cube[4][5] = cube[7][5]
cube[7][5] = cube[10][5]
cube[10][5] = t
t = cube[3][6]
cube[3][6] = cube[5][6]
cube[5][6] = cube[5][8]
cube[5][8] = cube[3][8]
cube[3][8] = t
t = cube[4][6]
cube[4][6] = cube[5][7]
cube[5][7] = cube[4][8]
cube[4][8] = cube[3][7]
cube[3][7] = t
def r():
global cube
t = cube[0][5]
cube[0][5] = cube[9][5]
cube[9][5] = cube[6][5]
cube[6][5] = cube[3][5]
cube[3][5] = t
t = cube[1][5]
cube[1][5] = cube[10][5]
cube[10][5] = cube[7][5]
cube[7][5] = cube[4][5]
cube[4][5] = t
t = cube[2][5]
cube[2][5] = cube[11][5]
cube[11][5] = cube[8][5]
cube[8][5] = cube[5][5]
cube[5][5] = t
t = cube[3][6]
cube[3][6] = cube[3][8]
cube[3][8] = cube[5][8]
cube[5][8] = cube[5][6]
cube[5][6] = t
t = cube[3][7]
cube[3][7] = cube[4][8]
cube[4][8] = cube[5][7]
cube[5][7] = cube[4][6]
cube[4][6] = t
def F():
global cube
t = cube[3][4]
cube[3][4] = cube[4][3]
cube[4][3] = cube[5][4]
cube[5][4] = cube[4][5]
cube[4][5] = t
b = cube[3][5]
cube[3][5] = cube[3][3]
cube[3][3] = cube[5][3]
cube[5][3] = cube[5][5]
cube[5][5] = b
c = cube[2][4]
cube[2][4] = cube[4][2]
cube[4][2] = cube[6][4]
cube[6][4] = cube[4][6]
cube[4][6] = c
d = cube[2][5]
cube[2][5] = cube[3][2]
cube[3][2] = cube[6][3]
cube[6][3] = cube[5][6]
cube[5][6] = d
h = cube[2][3]
cube[2][3] = cube[5][2]
cube[5][2] = cube[6][5]
cube[6][5] = cube[3][6]
cube[3][6] = h
def f():
global cube
t = cube[3][4]
cube[3][4] = cube[4][5]
cube[4][5] = cube[5][4]
cube[5][4] = cube[4][3]
cube[4][3] = t
b = cube[3][5]
cube[3][5] = cube[5][5]
cube[5][5] = cube[5][3]
cube[5][3] = cube[3][3]
cube[3][3] = b
c = cube[2][4]
cube[2][4] = cube[4][6]
cube[4][6] = cube[6][4]
cube[6][4] = cube[4][2]
cube[4][2] = c
d = cube[2][5]
cube[2][5] = cube[5][6]
cube[5][6] = cube[6][3]
cube[6][3] = cube[3][2]
cube[3][2] = d
h = cube[2][3]
cube[2][3] = cube[3][6]
cube[3][6] = cube[6][5]
cube[6][5] = cube[5][2]
cube[5][2] = h
def L():
global cube
t = cube[0][3]
cube[0][3] = cube[9][3]
cube[9][3] = cube[6][3]
cube[6][3] = cube[3][3]
cube[3][3] = t
c = cube[2][3]
cube[2][3] = cube[11][3]
cube[11][3] = cube[8][3]
cube[8][3] = cube[5][3]
cube[5][3] = c
b = cube[1][3]
cube[1][3] = cube[10][3]
cube[10][3] = cube[7][3]
cube[7][3] = cube[4][3]
cube[4][3] = b
d = cube[3][0]
cube[3][0] = cube[5][0]
cube[5][0] = cube[5][2]
cube[5][2] = cube[3][2]
cube[3][2] = d
h = cube[3][1]
cube[3][1] = cube[4][0]
cube[4][0] = cube[5][1]
cube[5][1] = cube[4][2]
cube[4][2] = h
def l():
global cube
t = cube[0][3]
cube[0][3] = cube[3][3]
cube[3][3] = cube[6][3]
cube[6][3] = cube[9][3]
cube[9][3] = t
c = cube[2][3]
cube[2][3] = cube[5][3]
cube[5][3] = cube[8][3]
cube[8][3] = cube[11][3]
cube[11][3] = c
b = cube[1][3]
cube[1][3] = cube[4][3]
cube[4][3] = cube[7][3]
cube[7][3] = cube[10][3]
cube[10][3] = b
d = cube[3][0]
cube[3][0] = cube[3][2]
cube[3][2] = cube[5][2]
cube[5][2] = cube[5][0]
cube[5][0] = d
h = cube[3][1]
cube[3][1] = cube[4][2]
cube[4][2] = cube[5][1]
cube[5][1] = cube[4][0]
cube[4][0] = h
def U():
global cube
t = cube[3][0]
cube[3][0] = cube[3][3]
cube[3][3] = cube[3][6]
cube[3][6] = cube[11][5]
cube[11][5] = t
c = cube[3][2]
cube[3][2] = cube[3][5]
cube[3][5] = cube[3][8]
cube[3][8] = cube[11][3]
cube[11][3] = c
b = cube[3][1]
cube[3][1] = cube[3][4]
cube[3][4] = cube[3][7]
cube[3][7] = cube[11][4]
cube[11][4] = b
d = cube[2][3]
cube[2][3] = cube[2][5]
cube[2][5] = cube[0][5]
cube[0][5] = cube[0][3]
cube[0][3] = d
h = cube[2][4]
cube[2][4] = cube[1][5]
cube[1][5] = cube[0][4]
cube[0][4] = cube[1][3]
cube[1][3] = h
def u():
global cube
t = cube[3][0]
cube[3][0] = cube[11][5]
cube[11][5] = cube[3][6]
cube[3][6] = cube[3][3]
cube[3][3] = t
c = cube[3][2]
cube[3][2] = cube[11][3]
cube[11][3] = cube[3][8]
cube[3][8] = cube[3][5]
cube[3][5] = c
b = cube[3][1]
cube[3][1] = cube[11][4]
cube[11][4] = cube[3][7]
cube[3][7] = cube[3][4]
cube[3][4] = b
d = cube[2][3]
cube[2][3] = cube[0][3]
cube[0][3] = cube[0][5]
cube[0][5] = cube[2][5]
cube[2][5] = d
h = cube[2][4]
cube[2][4] = cube[1][3]
cube[1][3] = cube[0][4]
cube[0][4] = cube[1][5]
cube[1][5] = h
def B():
global cube
b = cube[0][5]
cube[0][5] = cube[5][8]
cube[5][8] = cube[8][3]
cube[8][3] = cube[3][0]
cube[3][0] = b
c = cube[0][4]
cube[0][4] = cube[4][8]
cube[4][8] = cube[8][4]
cube[8][4] = cube[4][0]
cube[4][0] = c
d = cube[0][3]
cube[0][3] = cube[3][8]
cube[3][8] = cube[8][5]
cube[8][5] = cube[5][0]
cube[5][0] = d
t = cube[9][4]
cube[9][4] = cube[10][3]
cube[10][3] = cube[11][4]
cube[11][4] = cube[10][5]
cube[10][5] = t
h = cube[9][3]
cube[9][3] = cube[11][3]
cube[11][3] = cube[11][5]
cube[11][5] = cube[9][5]
cube[9][5] = h
def b():
global cube
b = cube[0][5]
cube[0][5] = cube[3][0]
cube[3][0] = cube[8][3]
cube[8][3] = cube[5][8]
cube[5][8] = b
c = cube[0][4]
cube[0][4] = cube[4][0]
cube[4][0] = cube[8][4]
cube[8][4] = cube[4][8]
cube[4][8] = c
d = cube[0][3]
cube[0][3] = cube[5][0]
cube[5][0] = cube[8][5]
cube[8][5] = cube[3][8]
cube[3][8] = d
t = cube[9][4]
cube[9][4] = cube[10][5]
cube[10][5] = cube[11][4]
cube[11][4] = cube[10][3]
cube[10][3] = t
h = cube[9][3]
cube[9][3] = cube[9][5]
cube[9][5] = cube[11][5]
cube[11][5] = cube[11][3]
cube[11][3] = h
def D():
global cube
t = cube[5][0]
cube[5][0] = cube[9][5]
cube[9][5] = cube[5][6]
cube[5][6] = cube[5][3]
cube[5][3] = t
c = cube[5][2]
cube[5][2] = cube[9][3]
cube[9][3] = cube[5][8]
cube[5][8] = cube[5][5]
cube[5][5] = c
b = cube[5][1]
cube[5][1] = cube[9][4]
cube[9][4] = cube[5][7]
cube[5][7] = cube[5][4]
cube[5][4] = b
d = cube[6][3]
cube[6][3] = cube[8][3]
cube[8][3] = cube[8][5]
cube[8][5] = cube[6][5]
cube[6][5] = d
h = cube[6][4]
cube[6][4] = cube[7][3]
cube[7][3] = cube[8][4]
cube[8][4] = cube[7][5]
cube[7][5] = h
def d():
global cube
t = cube[5][0]
cube[5][0] = cube[5][3]
cube[5][3] = cube[5][6]
cube[5][6] = cube[9][5]
cube[9][5] = t
c = cube[5][2]
cube[5][2] = cube[5][5]
cube[5][5] = cube[5][8]
cube[5][8] = cube[9][3]
cube[9][3] = c
b = cube[5][1]
cube[5][1] = cube[5][4]
cube[5][4] = cube[5][7]
cube[5][7] = cube[9][4]
cube[9][4] = b
d = cube[6][3]
cube[6][3] = cube[6][5]
cube[6][5] = cube[8][5]
cube[8][5] = cube[8][3]
cube[8][3] = d
h = cube[6][4]
cube[6][4] = cube[7][5]
cube[7][5] = cube[8][4]
cube[8][4] = cube[7][3]
cube[7][3] = h
def U2():
U()
U()
def D2():
D()
D()
def F2():
F()
F()
def B2():
B()
B()
def R2():
R()
R()
def L2():
L()
L()
def display_cube(a, sol):
import pygame
global cube
cube = [rows[:] for rows in a]
pygame.font.init()
moves = ['R', 'L', 'F', 'B', 'U', 'D', 'r', 'l', 'f',
'b', 'u', 'd', 'R2', 'L2', 'F2', 'B2', 'U2', 'D2']
movesq = ['R()', 'L()', 'F()', 'B()', 'U()', 'D()', 'r()', 'l()', 'f()',
'b()', 'u()', 'd()', 'R2()', 'L2()', 'F2()', 'B2()', 'U2()', 'D2()']
clock = pygame.time.Clock()
STAT_FONT = pygame.font.SysFont("comicsans", 30)
gameExit = False
try:
pygame.quit()
finally:
gameDisplay = pygame.display.set_mode((500, 500))
pygame.display.set_caption("Cube")
while not gameExit:
clock.tick(3)
nxt_x = 150
nxt_y = 10
if len(sol) != 0:
if sol[0] != ' ':
eval(movesq[moves.index(sol[0])])
sol.pop(0)
a = [rows[:] for rows in cube]
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
gameExit = True
if gameExit:
break
# pygame.draw.rect(gameDisplay,(173,216,230),[nxt_x,nxt_y,30,30])
gameDisplay.fill((0, 0, 0))
for i in range(0, 3):
for j in range(3, 6):
if a[i][j] == 'B':
pygame.draw.rect(gameDisplay, (70, 216, 255), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'R':
pygame.draw.rect(gameDisplay, (225, 0, 0),
[nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'G':
pygame.draw.rect(gameDisplay, (0, 255, 0),
[nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'Y':
pygame.draw.rect(gameDisplay, (255, 255, 0), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'W':
pygame.draw.rect(gameDisplay, (255, 255, 255), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'O':
pygame.draw.rect(gameDisplay, (255, 165, 0), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
nxt_y += 40
nxt_x = 150
nxt_x = 30
for i in range(3, 6):
for j in range(0, 9):
if a[i][j] == 'B':
pygame.draw.rect(gameDisplay, (70, 216, 255), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'R':
pygame.draw.rect(gameDisplay, (225, 0, 0),
[nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'G':
pygame.draw.rect(gameDisplay, (0, 255, 0),
[nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'Y':
pygame.draw.rect(gameDisplay, (255, 255, 0), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'W':
pygame.draw.rect(gameDisplay, (255, 255, 255), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'O':
pygame.draw.rect(gameDisplay, (255, 165, 0), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
nxt_y += 40
nxt_x = 30
nxt_x = 150
for i in range(6, 12):
for j in range(3, 6):
if a[i][j] == 'B':
pygame.draw.rect(gameDisplay, (70, 216, 255), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'R':
pygame.draw.rect(gameDisplay, (225, 0, 0),
[nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'G':
pygame.draw.rect(gameDisplay, (0, 255, 0),
[nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'Y':
pygame.draw.rect(gameDisplay, (255, 255, 0), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'W':
pygame.draw.rect(gameDisplay, (255, 255, 255), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
elif a[i][j] == 'O':
pygame.draw.rect(gameDisplay, (255, 165, 0), [
nxt_x, nxt_y, 30, 30])
nxt_x += 40
nxt_y += 40
nxt_x = 150
pygame.display.update()
| 24.661626
| 82
| 0.389085
|
4860577c3e812a6e87bdcaf70cad66e843a69e03
| 1,484
|
py
|
Python
|
tribolium_clustering/data_visualisation/_plot_predictions_onto_UMAP.py
|
Cryaaa/tribolium-clustering
|
f5751ec8c007e95e8a9688d2d8e34508b04f0822
|
[
"BSD-3-Clause"
] | null | null | null |
tribolium_clustering/data_visualisation/_plot_predictions_onto_UMAP.py
|
Cryaaa/tribolium-clustering
|
f5751ec8c007e95e8a9688d2d8e34508b04f0822
|
[
"BSD-3-Clause"
] | null | null | null |
tribolium_clustering/data_visualisation/_plot_predictions_onto_UMAP.py
|
Cryaaa/tribolium-clustering
|
f5751ec8c007e95e8a9688d2d8e34508b04f0822
|
[
"BSD-3-Clause"
] | null | null | null |
def plot_predictions_onto_UMAP(embedding, prediction, title = ' ', HDBSCAN = True):
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
np.random.rand
np.random.seed(42)
rand_colours = np.random.rand((max(prediction)+3))
plt.figure(figsize = (10,10))
if HDBSCAN:
clustered = (prediction >= 0)
plt.scatter(embedding[~clustered, 0],
embedding[~clustered, 1],
c=(0.6, 0.6, 0.6), s=10, alpha=0.3)
try:
plt.scatter(embedding[clustered, 0],
embedding[clustered, 1],
c=[sns.color_palette()[int(x)] for x in prediction[clustered]],
s=10);
except IndexError:
plt.scatter(embedding[clustered, 0],
embedding[clustered, 1],
c=[rand_colours[x] for x in prediction[clustered]],
s=10);
else:
try:
plt.scatter(embedding[:, 0],
embedding[:, 1],
c=[sns.color_palette()[int(x)] for x in prediction],
s=10);
except IndexError:
plt.scatter(embedding[:, 0],
embedding[:, 1],
c=[rand_colours[x] for x in prediction],
s=10);
plt.gca().set_aspect('equal', 'datalim')
plt.title(title, fontsize=18)
| 37.1
| 87
| 0.481806
|
d356c6507be91d9c4e98cea34d64b47bc41d9cd0
| 714
|
py
|
Python
|
run/old_scripts/run_iteration.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | null | null | null |
run/old_scripts/run_iteration.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | 20
|
2019-07-15T21:49:29.000Z
|
2020-01-09T14:35:03.000Z
|
run/old_scripts/run_iteration.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | null | null | null |
from libs.iteration_manager import IterationManager
from libs.index import IndexManager
import libs.dirs as dirs
iterFolder = Path(dirs.iter_folder) / "test_loop/iteration_1/"
indexPath = testFolder / "sampled_images.csv"
newLabelsPath = testFolder / "sampled_images_labels.csv"
# Sample images
# Label images w/ interface
# Create sampled_images_labels.csv
# Add frame hash to labels file
# add_frame_hash_to_labels_file
# Merge interface labels file with index file
ind = IndexManager(indexPath)
newLabelsIndex = translate_interface_labels_file(newLabelsPath)
ind.merge_annotations(newLabelsIndex)
# Merge sampled_images index with existing labeled dataset
| 29.75
| 71
| 0.768908
|
4fb4834d0dd0b5f89cfcae47f21c371372995501
| 313
|
py
|
Python
|
tasks/zadanie_3.py
|
DIMITRY-GALYAS1/Rabota-10
|
fb9f84352af23a7324db9c332f6e11661777d53d
|
[
"MIT"
] | null | null | null |
tasks/zadanie_3.py
|
DIMITRY-GALYAS1/Rabota-10
|
fb9f84352af23a7324db9c332f6e11661777d53d
|
[
"MIT"
] | null | null | null |
tasks/zadanie_3.py
|
DIMITRY-GALYAS1/Rabota-10
|
fb9f84352af23a7324db9c332f6e11661777d53d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def main():
q = 1
while True:
z = int(input("Введите число: "))
q *= z
if q == 0:
print("Произведение = 0")
break
else:
print(f"Прозведение = {q}")
if __name__ == '__main__':
main()
| 16.473684
| 41
| 0.440895
|
24be616cb9a3dc57a259d5a2a5d4313f5c44776a
| 5,774
|
py
|
Python
|
src/cowrie/commands/uname.py
|
GreyNoise-Intelligence/cowrie
|
d2a9b30f5fd23428baf32e2de1d24e944cf8cde7
|
[
"BSD-3-Clause"
] | null | null | null |
src/cowrie/commands/uname.py
|
GreyNoise-Intelligence/cowrie
|
d2a9b30f5fd23428baf32e2de1d24e944cf8cde7
|
[
"BSD-3-Clause"
] | null | null | null |
src/cowrie/commands/uname.py
|
GreyNoise-Intelligence/cowrie
|
d2a9b30f5fd23428baf32e2de1d24e944cf8cde7
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2010 Upi Tamminen <desaster@gmail.com>
# See the COPYRIGHT file for more information
"""
uname command
"""
from cowrie.core.config import CowrieConfig
from cowrie.shell.command import HoneyPotCommand
commands = {}
def hardware_platform():
return CowrieConfig().get("shell", "hardware_platform", fallback="x86_64")
def kernel_name():
return CowrieConfig().get("shell", "kernel_name", fallback="Linux")
def kernel_version():
return CowrieConfig().get("shell", "kernel_version", fallback="3.2.0-4-amd64")
def kernel_build_string():
return CowrieConfig().get(
"shell", "kernel_build_string", fallback="#1 SMP Debian 3.2.68-1+deb7u1"
)
def operating_system():
return CowrieConfig().get("shell", "operating_system", fallback="GNU/Linux")
def uname_help():
return """Usage: uname [OPTION]...
Print certain system information. With no OPTION, same as -s.
-a, --all print all information, in the following order,
except omit -p and -i if unknown:
-s, --kernel-name print the kernel name
-n, --nodename print the network node hostname
-r, --kernel-release print the kernel release
-v, --kernel-version print the kernel version
-m, --machine print the machine hardware name
-p, --processor print the processor type (non-portable)
-i, --hardware-platform print the hardware platform (non-portable)
-o, --operating-system print the operating system
--help display this help and exit
--version output version information and exit
GNU coreutils online help: <http://www.gnu.org/software/coreutils/>
Full documentation at: <http://www.gnu.org/software/coreutils/uname>
or available locally via: info '(coreutils) uname invocation'\n
"""
def uname_get_some_help():
return "Try 'uname --help' for more information."
def uname_fail_long(arg):
return f"uname: unrecognized option '{arg}'\n{uname_get_some_help()}\n"
def uname_fail_short(arg):
return f"uname: invalid option -- '{arg}'\n{uname_get_some_help()}\n"
def uname_fail_extra(arg):
# Note: These are apostrophes, not single quotation marks.
return f"uname: extra operand ‘{arg}’\n{uname_get_some_help()}\n"
class command_uname(HoneyPotCommand):
def full_uname(self):
return "{} {} {} {} {} {}\n".format(
kernel_name(),
self.protocol.hostname,
kernel_version(),
kernel_build_string(),
hardware_platform(),
operating_system(),
)
def call(self):
opts = {
"name": False,
"release": False,
"version": False,
"os": False,
"node": False,
"machine": False,
}
flags = [
(["a", "all"], "__ALL__"),
(["s", "kernel-name"], "name"),
(["r", "kernel-release"], "release"),
(["v", "kernel-version"], "version"),
(["o", "operating-system"], "os"),
(["n", "nodename"], "node"),
(["m", "machine", "p", "processor", "i", "hardware-platform"], "machine"),
]
if not self.args:
# IF no params output default
self.write(f"{kernel_name()}\n")
return
# getopt-style parsing
for a in self.args:
a = a.strip()
arg_block = []
was_long = False
if a == "--help":
# Help overrides invalid args following --help
# There's no -h, invalid args before --help still fail.
self.write(uname_help())
return
elif a.startswith("--"):
# arg name w/o --
was_long = True
arg_block.append(a[2:])
elif a.startswith("-"):
# letter by letter
a = a[1:]
if len(a) == 0:
self.write(uname_fail_extra("-"))
return
for split_arg in a:
arg_block.append(split_arg)
else:
self.write(uname_fail_extra(a))
return
for arg in arg_block:
arg_parsed = False
# Find a possible flag for each arg.
for possible_args, target_opt in flags:
if arg not in possible_args:
continue
arg_parsed = True # Got a hit!
# Set all opts for -a/--all, single opt otherwise:
if target_opt == "__ALL__":
for key, value in opts.items():
opts[key] = True
else:
opts[target_opt] = True
break # Next arg please
if not arg_parsed:
self.write(
uname_fail_long(a) if was_long else uname_fail_short(arg)
)
return
# All the options set, let's get the output
output = []
if opts["name"]:
output.append(kernel_name())
if opts["node"]:
output.append(self.protocol.hostname)
if opts["release"]:
output.append(kernel_version())
if opts["version"]:
output.append(kernel_build_string())
if opts["machine"]:
output.append(hardware_platform())
if opts["os"]:
output.append(operating_system())
if len(output) < 1:
output.append(kernel_name())
self.write(" ".join(output) + "\n")
commands["/bin/uname"] = command_uname
commands["uname"] = command_uname
| 30.389474
| 86
| 0.537929
|
bdfe981baf3cf3b1b244f014b51d29757fa69945
| 40,038
|
py
|
Python
|
tensorflow/contrib/tensor_forest/python/tensor_forest.py
|
monokrome/tensorflow
|
2533ada7dd45b84d60677b8735e013d21044651a
|
[
"Apache-2.0"
] | 1
|
2019-06-12T09:23:29.000Z
|
2019-06-12T09:23:29.000Z
|
tensorflow/contrib/tensor_forest/python/tensor_forest.py
|
monokrome/tensorflow
|
2533ada7dd45b84d60677b8735e013d21044651a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/tensor_forest/python/tensor_forest.py
|
monokrome/tensorflow
|
2533ada7dd45b84d60677b8735e013d21044651a
|
[
"Apache-2.0"
] | 1
|
2019-11-05T19:10:32.000Z
|
2019-11-05T19:10:32.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import sys
from tensorflow.contrib.framework.python.ops import variables as framework_variables
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self,
num_trees=100,
max_nodes=10000,
bagging_fraction=1.0,
num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0,
split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1,
dominate_method='bootstrap',
dominate_fraction=0.99,
**kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
self.dominate_method = dominate_method
self.dominate_fraction = dominate_fraction
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Our experiments have found that num_splits_to_consider = num_features
# gives good accuracy.
self.num_splits_to_consider = self.num_splits_to_consider or min(
self.num_features, 1000)
self.max_fertile_nodes = (self.max_fertile_nodes or
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
def get_epoch_variable():
"""Returns the epoch variable, or [0] if not defined."""
# Grab epoch variable defined in
# //third_party/tensorflow/python/training/input.py::limit_epochs
for v in tf_variables.local_variables():
if 'limit_epochs/epoch' in v.op.name:
return array_ops.reshape(v, [1])
# TODO(thomaswc): Access epoch from the data feeder.
return [0]
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = variable_scope.get_variable(
name=self.get_tree_name('start_epoch', tree_num),
dtype=dtypes.int32, shape=[params.max_nodes],
initializer=init_ops.constant_initializer(0))
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.accumulator_to_node_map = variable_scope.get_variable(
name=self.get_tree_name('accumulator_to_node_map', tree_num),
shape=[params.max_fertile_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
# Set up some scalar variables to run through the device assigner, then
# we can use those to colocate everything related to a tree.
self.device_dummies = []
with ops.device(device_assigner):
for i in range(params.num_trees):
self.device_dummies.append(variable_scope.get_variable(
name='device_dummy_%d' % i, shape=0))
for i in range(params.num_trees):
with ops.device(self.device_dummies[i].device):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self,
params,
device_assigner=None,
variables=None,
tree_variables_class=TreeTrainingVariables,
tree_graphs=None,
training=True):
self.params = params
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(self.variables[i], self.params, i)
for i in range(self.params.num_trees)
]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(
value=input_data, num_or_size_splits=self.params.num_features, axis=1)
return array_ops.concat(
[split_data[ind] for ind in self.params.bagged_features[tree_num]], 1)
def training_graph(self,
input_data,
input_labels,
num_trainers=1,
trainer_id=0,
**tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
num_trainers: Number of parallel trainers to split trees among.
trainer_id: Which trainer this instance is.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
Raises:
NotImplementedError: If trying to use bagging with sparse features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
if input_labels is not None:
labels = data_ops.ParseLabelTensorOrDict(input_labels)
data_spec = data_spec or self.get_default_data_spec(input_data)
tree_graphs = []
trees_per_trainer = self.params.num_trees / num_trainers
tree_start = int(trainer_id * trees_per_trainer)
tree_end = int((trainer_id + 1) * trees_per_trainer)
for i in range(tree_start, tree_end):
logging.info('training graph for tree: %d' % i)
with ops.device(self.variables.device_dummies[i].device):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = processed_dense_features
tree_labels = labels
if self.params.bagging_fraction < 1.0:
# TODO(gilberth): Support bagging for sparse features.
if processed_sparse_features is not None:
raise NotImplementedError(
'Bagging not supported with sparse features.')
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.strided_slice(
array_ops.shape(processed_dense_features), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(processed_dense_features, gather_indices)
tree_labels = array_ops.gather(labels, gather_indices)
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(self.trees[i].training_graph(
tree_data,
tree_labels,
seed,
data_spec=data_spec,
sparse_features=processed_sparse_features,
**tree_kwargs))
return control_flow_ops.group(*tree_graphs, name='train')
def inference_graph(self, input_data, **inference_args):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
**inference_args: Keyword arguments to pass through to each tree.
Returns:
The last op in the random forest inference graph.
Raises:
NotImplementedError: If trying to use feature bagging with sparse
features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
tree_data = processed_dense_features
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
probabilities.append(self.trees[i].inference_graph(
tree_data,
data_spec,
sparse_features=processed_sparse_features,
**inference_args))
with ops.device(self.variables.device_dummies[0].device):
all_predict = array_ops.stack(probabilities)
return math_ops.div(
math_ops.reduce_sum(all_predict, 0), self.params.num_trees,
name='probabilities')
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(math_ops.to_float(array_ops.stack(sizes)))
# pylint: disable=unused-argument
def training_loss(self, features, labels, name='training_loss'):
return math_ops.negative(self.average_size(), name=name)
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.negative(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.stack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
def feature_importances(self):
tree_counts = [self.trees[i].feature_usage_counts()
for i in range(self.params.num_trees)]
total_counts = math_ops.reduce_sum(array_ops.stack(tree_counts, 0), 0)
return total_counts / math_ops.reduce_sum(total_counts)
def one_hot_wrapper(num_classes, loss_fn):
"""Some loss functions take one-hot labels."""
def _loss(probs, targets):
if targets.get_shape().ndims > 1:
targets = array_ops.squeeze(targets, squeeze_dims=[1])
one_hot_labels = array_ops.one_hot(
math_ops.to_int32(targets),
num_classes,
on_value=1.,
off_value=0.,
dtype=dtypes.float32)
return loss_fn(probs, one_hot_labels)
return _loss
class TrainingLossForest(RandomForestGraphs):
"""Random Forest that uses training loss as the termination criteria."""
def __init__(self, params, loss_fn=None, **kwargs):
"""Initialize.
Args:
params: Like RandomForestGraphs, a ForestHParams object.
loss_fn: A function that takes probabilities and targets and returns
a loss for each example.
**kwargs: Keyword args to pass to superclass (RandomForestGraphs).
"""
self.loss_fn = loss_fn or one_hot_wrapper(params.num_classes,
loss_ops.log_loss)
self._loss = None
super(TrainingLossForest, self).__init__(params, **kwargs)
def _get_loss(self, features, labels):
"""Constructs, caches, and returns the inference-based loss."""
if self._loss is not None:
return self._loss
def _average_loss():
probs = self.inference_graph(features)
return math_ops.reduce_sum(self.loss_fn(
probs, labels)) / math_ops.to_float(array_ops.shape(labels)[0])
self._loss = control_flow_ops.cond(
self.average_size() > 0, _average_loss,
lambda: constant_op.constant(sys.maxsize, dtype=dtypes.float32))
return self._loss
def training_graph(self, input_data, input_labels, **kwargs):
loss = self._get_loss(input_data, input_labels)
with ops.control_dependencies([loss.op]):
return super(TrainingLossForest, self).training_graph(
input_data, input_labels, **kwargs)
def training_loss(self, features, labels, name='training_loss'):
return array_ops.identity(self._get_loss(features, labels), name=name)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, tree_num):
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(
array_ops.squeeze(
array_ops.strided_slice(self.variables.tree, [0, 0], [1, 1])),
-2), _init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self,
input_data,
input_labels,
random_seed,
data_spec,
sparse_features=None,
input_weights=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A data_ops.TensorForestDataSpec object specifying the
original feature/columns of the data.
sparse_features: A tf.SparseTensor for sparse input data.
input_weights: A float tensor or placeholder holding per-input weights,
or None if all inputs are to be weighted equally.
Returns:
The last op in the random tree training graph.
"""
epoch = math_ops.to_int32(get_epoch_variable())
serialized_input_spec = data_spec.SerializeToString()
if input_weights is None:
input_weights = []
if input_data is None:
input_data = []
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums, splits_squares,
totals_indices, totals_sums, totals_squares,
input_leaves) = (tensor_forest_ops.count_extremely_random_stats(
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_labels,
input_weights,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch,
epoch,
input_spec=serialized_input_spec,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(
tensor_forest_ops.scatter_add_ndim(self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(
tensor_forest_ops.scatter_add_ndim(self.variables.accumulator_sums,
totals_indices, totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(
tensor_forest_ops.scatter_add_ndim(
self.variables.candidate_split_squares, splits_indices,
splits_squares))
splits_update_ops.append(
tensor_forest_ops.scatter_add_ndim(self.variables.accumulator_squares,
totals_indices, totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
tensor_forest_ops.sample_inputs(
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_weights,
self.variables.node_to_accumulator_map,
input_leaves,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
input_spec=serialized_input_spec,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
# Passing input_leaves to finished nodes here means that nodes that
# have become stale won't be deallocated until an input reaches them,
# because we're trying to avoid considering every fertile node for
# performance reasons.
finished, stale = tensor_forest_ops.finished_nodes(
input_leaves,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch,
epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples,
dominate_method=self.params.dominate_method,
dominate_fraction=self.params.dominate_fraction)
# Update leaf scores.
# TODO(thomaswc): Store the leaf scores in a TopN and only update the
# scores of the leaves that were touched by this batch of input.
children = array_ops.squeeze(
array_ops.slice(self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(
array_ops.squeeze(
array_ops.where(is_leaf), squeeze_dims=[1]))
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = tensor_forest_ops.best_splits(
finished,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op,
non_fertile_leaves.op]):
(tree_update_indices, tree_children_updates, tree_threshold_updates,
new_eot) = (tensor_forest_ops.grow_tree(
self.variables.end_of_tree, self.variables.node_to_accumulator_map,
finished, split_indices, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_threshold_updates,
dtype=dtypes.int32)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([tree_update_op]):
(n2a_map_updates, a2n_map_updates, accumulators_cleared,
accumulators_allocated) = (tensor_forest_ops.update_fertile_slots(
finished,
non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
self.variables.node_sums,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
with ops.control_dependencies([n2a_map_updates.op]):
eot_update_op = state_ops.assign(self.variables.end_of_tree, new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(
state_ops.scatter_update(self.variables.node_to_accumulator_map,
n2a_map_updates[0], n2a_map_updates[1]))
updates.append(
state_ops.scatter_update(self.variables.accumulator_to_node_map,
a2n_map_updates[0], a2n_map_updates[1]))
cleared_and_allocated_accumulators = array_ops.concat(
[accumulators_cleared, accumulators_allocated], 0)
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.negative(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat([total_cleared, total_reset], 0)
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.negative(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec, sparse_features=None):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or placeholder for input data.
data_spec: A TensorForestDataSpec proto specifying the original
input columns.
sparse_features: A tf.SparseTensor for sparse input data.
Returns:
The last op in the random tree inference graph.
"""
if input_data is None:
input_data = []
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
return tensor_forest_ops.tree_predictions(
input_data,
sparse_indices,
sparse_values,
sparse_shape,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
input_spec=data_spec.SerializeToString(),
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
def feature_usage_counts(self):
features = array_ops.slice(self.variables.tree, [0, 1], [-1, 1])
# One hot ignores negative values, which is the default for unused nodes.
one_hots = array_ops.one_hot(
array_ops.squeeze(features), self.params.num_features)
return math_ops.reduce_sum(one_hots, 0)
| 40.118236
| 84
| 0.690569
|
4eae4206d626433625f3b1fd6dcef542b9ecaf25
| 1,982
|
py
|
Python
|
office365/runtime/client_query.py
|
xdanw/xlr-sharepoint-fileoperations
|
636114462145e3d9e0425a7f2c5337ec5f90c363
|
[
"Apache-2.0"
] | null | null | null |
office365/runtime/client_query.py
|
xdanw/xlr-sharepoint-fileoperations
|
636114462145e3d9e0425a7f2c5337ec5f90c363
|
[
"Apache-2.0"
] | null | null | null |
office365/runtime/client_query.py
|
xdanw/xlr-sharepoint-fileoperations
|
636114462145e3d9e0425a7f2c5337ec5f90c363
|
[
"Apache-2.0"
] | null | null | null |
from office365.runtime.action_type import ActionType
from office365.runtime.odata.odata_path_parser import ODataPathParser
# Fixes some issues with TLS
import os
os.environ['REQUESTS_CA_BUNDLE'] = 'ca.pem';
class ClientQuery(object):
"""Client query"""
def __init__(self, url, action_type=ActionType.ReadEntry, payload=None):
self.__url = url
self.__actionType = action_type
self.__payload = payload
@staticmethod
def read_entry_query(client_object):
qry = ClientQuery(client_object.url, ActionType.ReadEntry)
return qry
@staticmethod
def create_entry_query(parent_client_object, parameters):
qry = ClientQuery(parent_client_object.url, ActionType.CreateEntry, parameters)
return qry
@staticmethod
def update_entry_query(client_object):
qry = ClientQuery(client_object.url, ActionType.UpdateEntry, client_object.convert_to_payload())
return qry
@staticmethod
def delete_entry_query(client_object):
qry = ClientQuery(client_object.url, ActionType.DeleteEntry)
return qry
@staticmethod
def service_operation_query(client_object, action_type, method_name, method_params=None, payload=None):
url = client_object.url + "/" + ODataPathParser.from_method(method_name, method_params)
qry = ClientQuery(url, action_type, payload)
return qry
@property
def url(self):
return self.__url
@property
def action_type(self):
return self.__actionType
@property
def payload(self):
return self.__payload
@property
def id(self):
return id(self)
def execute(self, context, client_object=None):
from office365.runtime.client_request import ClientRequest
return ClientRequest(context).execute_single_query(self, client_object)
def __hash__(self):
return hash(self.url)
def __eq__(self, other):
return self.url == other.url
| 29.58209
| 107
| 0.705348
|
630cb3eaf62ad1e668afec938512f44d250a0c49
| 2,144
|
py
|
Python
|
corehq/pillows/tasks.py
|
satyaakam/commcare-hq
|
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/pillows/tasks.py
|
satyaakam/commcare-hq
|
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T04:45:16.000Z
|
2021-06-02T04:45:16.000Z
|
corehq/pillows/tasks.py
|
satyaakam/commcare-hq
|
233f255ff20ab3a16013e9fdfdb9c1dcf632e415
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import timedelta
from celery.schedules import crontab
from celery.task import periodic_task
from corehq.apps.es import FormES
from corehq.apps.es.aggregations import CardinalityAggregation
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from corehq.form_processor.utils.xform import resave_form
from corehq.pillows.utils import get_user_type_deep_cache_for_unknown_users, UNKNOWN_USER_TYPE
from corehq.util.decorators import serial_task
from corehq.util.metrics import metrics_gauge
from corehq.util.quickcache import quickcache
@periodic_task(run_every=timedelta(minutes=10))
@quickcache([], timeout=9 * 60) # Protect from many runs after recovering from a backlog
def send_unknown_user_type_stats():
metrics_gauge('commcare.fix_user_types.unknown_user_count',
_get_unknown_user_type_user_ids_approx_count())
metrics_gauge('commcare.fix_user_types.unknown_user_form_count',
FormES().user_type(UNKNOWN_USER_TYPE).count())
@periodic_task(run_every=crontab(minute=0, hour=0))
def fix_user_types():
unknown_user_ids = _get_unknown_user_type_user_ids()
for user_id in unknown_user_ids:
user_type = get_user_type_deep_cache_for_unknown_users(user_id)
if user_type != UNKNOWN_USER_TYPE:
resave_es_forms_with_unknown_user_type.delay(user_id)
@serial_task('{user_id}', queue='background_queue')
def resave_es_forms_with_unknown_user_type(user_id):
domain_form_id_list = (
FormES().user_type(UNKNOWN_USER_TYPE).user_id(user_id)
.values_list('domain', '_id', scroll=True)
)
for domain, form_id in domain_form_id_list:
form = FormAccessors(domain).get_form(form_id)
resave_form(domain, form)
def _get_unknown_user_type_user_ids():
return (FormES().user_type(UNKNOWN_USER_TYPE).user_aggregation().run()
.aggregations.user.keys)
def _get_unknown_user_type_user_ids_approx_count():
agg = CardinalityAggregation('users_count', 'form.meta.userID')
return (FormES().user_type(UNKNOWN_USER_TYPE).aggregation(agg).run()
.aggregations.users_count.value)
| 39.703704
| 94
| 0.777052
|
c656d2ab53ed98f422ac218379a61a5468dd7dab
| 47,068
|
py
|
Python
|
python/ccxt/async_support/bitfinex2.py
|
z-brain/ccxt
|
dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02
|
[
"MIT"
] | 4
|
2021-01-10T09:14:17.000Z
|
2022-02-15T19:09:52.000Z
|
python/ccxt/async_support/bitfinex2.py
|
z-brain/ccxt
|
dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/bitfinex2.py
|
z-brain/ccxt
|
dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02
|
[
"MIT"
] | 4
|
2021-06-02T16:40:35.000Z
|
2022-03-14T04:50:31.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.bitfinex import bitfinex
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
class bitfinex2(bitfinex):
def describe(self):
return self.deep_extend(super(bitfinex2, self).describe(), {
'id': 'bitfinex2',
'name': 'Bitfinex',
'countries': ['VG'],
'version': 'v2',
'certified': False,
'pro': False,
# new metainfo interface
'has': {
'CORS': False,
'cancelAllOrders': True,
'createDepositAddress': True,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'cancelOrder': True,
'deposit': False,
'editOrder': False,
'fetchDepositAddress': True,
'fetchClosedOrders': False,
'fetchFundingFees': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': False,
'fetchOpenOrder': True,
'fetchClosedOrder': True,
'fetchOrderTrades': True,
'fetchStatus': True,
'fetchTickers': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'rateLimit': 1500,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': {
'v1': 'https://api.bitfinex.com',
'public': 'https://api-pub.bitfinex.com',
'private': 'https://api.bitfinex.com',
},
'www': 'https://www.bitfinex.com',
'doc': [
'https://docs.bitfinex.com/v2/docs/',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
'fees': 'https://www.bitfinex.com/fees',
},
'api': {
'v1': {
'get': [
'symbols',
'symbols_details',
],
},
'public': {
'get': [
'conf/{config}',
'conf/pub:{action}:{object}',
'conf/pub:{action}:{object}:{detail}',
'conf/pub:map:{object}',
'conf/pub:map:{object}:{detail}',
'conf/pub:map:currency:{detail}',
'conf/pub:map:currency:sym', # maps symbols to their API symbols, BAB > BCH
'conf/pub:map:currency:label', # verbose friendly names, BNT > Bancor
'conf/pub:map:currency:unit', # maps symbols to unit of measure where applicable
'conf/pub:map:currency:undl', # maps derivatives symbols to their underlying currency
'conf/pub:map:currency:pool', # maps symbols to underlying network/protocol they operate on
'conf/pub:map:currency:explorer', # maps symbols to their recognised block explorer URLs
'conf/pub:map:tx:method',
'conf/pub:list:{object}',
'conf/pub:list:{object}:{detail}',
'conf/pub:list:currency',
'conf/pub:list:pair:exchange',
'conf/pub:list:pair:margin',
'conf/pub:list:competitions',
'conf/pub:info:{object}',
'conf/pub:info:{object}:{detail}',
'conf/pub:info:pair',
'conf/pub:info:tx:status', # [deposit, withdrawal] statuses 1 = active, 0 = maintenance
'conf/pub:fees',
'platform/status',
'tickers',
'ticker/{symbol}',
'trades/{symbol}/hist',
'book/{symbol}/{precision}',
'book/{symbol}/P0',
'book/{symbol}/P1',
'book/{symbol}/P2',
'book/{symbol}/P3',
'book/{symbol}/R0',
'stats1/{key}:{size}:{symbol}:{side}/{section}',
'stats1/{key}:{size}:{symbol}:{side}/last',
'stats1/{key}:{size}:{symbol}:{side}/hist',
'stats1/{key}:{size}:{symbol}/{section}',
'stats1/{key}:{size}:{symbol}/last',
'stats1/{key}:{size}:{symbol}/hist',
'stats1/{key}:{size}:{symbol}:long/last',
'stats1/{key}:{size}:{symbol}:long/hist',
'stats1/{key}:{size}:{symbol}:short/last',
'stats1/{key}:{size}:{symbol}:short/hist',
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
'status/{type}',
'status/deriv',
'liquidations/hist',
'rankings/{key}:{timeframe}:{symbol}/{section}',
'rankings/{key}:{timeframe}:{symbol}/hist',
],
'post': [
'calc/trade/avg',
'calc/fx',
],
},
'private': {
'post': [
# 'auth/r/orders/{symbol}/new', # outdated
# 'auth/r/stats/perf:{timeframe}/hist', # outdated
'auth/r/wallets',
'auth/r/wallets/hist',
'auth/r/orders',
'auth/r/orders/{symbol}',
'auth/w/order/submit',
'auth/w/order/update',
'auth/w/order/cancel',
'auth/w/order/multi',
'auth/w/order/cancel/multi',
'auth/r/orders/{symbol}/hist',
'auth/r/orders/hist',
'auth/r/order/{symbol}:{id}/trades',
'auth/r/trades/{symbol}/hist',
'auth/r/trades/hist',
'auth/r/ledgers/{currency}/hist',
'auth/r/ledgers/hist',
'auth/r/info/margin/{key}',
'auth/r/info/margin/base',
'auth/r/info/margin/sym_all',
'auth/r/positions',
'auth/w/position/claim',
'auth/r/positions/hist',
'auth/r/positions/audit',
'auth/w/deriv/collateral/set',
'auth/r/funding/offers',
'auth/r/funding/offers/{symbol}',
'auth/w/funding/offer/submit',
'auth/w/funding/offer/cancel',
'auth/w/funding/offer/cancel/all',
'auth/w/funding/close',
'auth/w/funding/auto',
'auth/w/funding/keep',
'auth/r/funding/offers/{symbol}/hist',
'auth/r/funding/offers/hist',
'auth/r/funding/loans',
'auth/r/funding/loans/hist',
'auth/r/funding/loans/{symbol}',
'auth/r/funding/loans/{symbol}/hist',
'auth/r/funding/credits',
'auth/r/funding/credits/hist',
'auth/r/funding/credits/{symbol}',
'auth/r/funding/credits/{symbol}/hist',
'auth/r/funding/trades/{symbol}/hist',
'auth/r/funding/trades/hist',
'auth/r/info/funding/{key}',
'auth/r/info/user',
'auth/r/logins/hist',
'auth/w/transfer',
'auth/w/deposit/address',
'auth/w/deposit/invoice',
'auth/w/withdraw',
'auth/r/movements/{currency}/hist',
'auth/r/movements/hist',
'auth/r/alerts',
'auth/w/alert/set',
'auth/w/alert/price:{symbol}:{price}/del',
'auth/w/alert/{type}:{symbol}:{price}/del',
'auth/calc/order/avail',
'auth/w/settings/set',
'auth/r/settings',
'auth/w/settings/del',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.2 / 100,
},
'funding': {
'withdraw': {
'BTC': 0.0004,
'BCH': 0.0001,
'ETH': 0.00135,
'EOS': 0.0,
'LTC': 0.001,
'OMG': 0.15097,
'IOT': 0.0,
'NEO': 0.0,
'ETC': 0.01,
'XRP': 0.02,
'ETP': 0.01,
'ZEC': 0.001,
'BTG': 0.0,
'DASH': 0.01,
'XMR': 0.0001,
'QTM': 0.01,
'EDO': 0.23687,
'DAT': 9.8858,
'AVT': 1.1251,
'SAN': 0.35977,
'USDT': 5.0,
'SPK': 16.971,
'BAT': 1.1209,
'GNT': 2.8789,
'SNT': 9.0848,
'QASH': 1.726,
'YYW': 7.9464,
},
},
},
'options': {
'precision': 'R0', # P0, P1, P2, P3, P4, R0
# convert 'EXCHANGE MARKET' to lowercase 'market'
# convert 'EXCHANGE LIMIT' to lowercase 'limit'
# everything else remains uppercase
'exchangeTypes': {
# 'MARKET': None,
'EXCHANGE MARKET': 'market',
# 'LIMIT': None,
'EXCHANGE LIMIT': 'limit',
# 'STOP': None,
# 'EXCHANGE STOP': None,
# 'TRAILING STOP': None,
# 'EXCHANGE TRAILING STOP': None,
# 'FOK': None,
# 'EXCHANGE FOK': None,
# 'STOP LIMIT': None,
# 'EXCHANGE STOP LIMIT': None,
# 'IOC': None,
# 'EXCHANGE IOC': None,
},
# convert 'market' to 'EXCHANGE MARKET'
# convert 'limit' 'EXCHANGE LIMIT'
# everything else remains as is
'orderTypes': {
'market': 'EXCHANGE MARKET',
'limit': 'EXCHANGE LIMIT',
},
'fiat': {
'USD': 'USD',
'EUR': 'EUR',
'JPY': 'JPY',
'GBP': 'GBP',
},
},
'exceptions': {
'exact': {
'10020': BadRequest,
'10100': AuthenticationError,
'10114': InvalidNonce,
'20060': OnMaintenance,
},
'broad': {
'address': InvalidAddress,
'available balance is only': InsufficientFunds,
'not enough exchange balance': InsufficientFunds,
'Order not found': OrderNotFound,
'symbol: invalid': BadSymbol,
'Invalid order': InvalidOrder,
},
},
})
def is_fiat(self, code):
return(code in self.options['fiat'])
def get_currency_id(self, code):
return 'f' + code
async def fetch_status(self, params={}):
#
# [1] # operative
# [0] # maintenance
#
response = await self.publicGetPlatformStatus(params)
status = self.safe_value(response, 0)
formattedStatus = 'ok' if (status == 1) else 'maintenance'
self.status = self.extend(self.status, {
'status': formattedStatus,
'updated': self.milliseconds(),
})
return self.status
async def fetch_markets(self, params={}):
response = await self.v1GetSymbolsDetails(params)
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string_upper(market, 'pair')
baseId = None
quoteId = None
if id.find(':') >= 0:
parts = id.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
id = 't' + id
baseId = self.get_currency_id(baseId)
quoteId = self.get_currency_id(quoteId)
precision = {
'price': self.safe_integer(market, 'price_precision'),
'amount': self.safe_integer(market, 'price_precision'),
}
limits = {
'amount': {
'min': self.safe_float(market, 'minimum_order_size'),
'max': self.safe_float(market, 'maximum_order_size'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
}
limits['cost'] = {
'min': limits['amount']['min'] * limits['price']['min'],
'max': None,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
'swap': False,
'spot': False,
'futures': False,
})
return result
async def fetch_balance(self, params={}):
# self api call does not return the 'used' amount - use the v1 version instead(which also returns zero balances)
await self.load_markets()
response = await self.privatePostAuthRWallets(params)
balanceType = self.safe_string(params, 'type', 'exchange')
result = {'info': response}
for b in range(0, len(response)):
balance = response[b]
accountType = balance[0]
currency = balance[1]
total = balance[2]
available = balance[4]
if accountType == balanceType:
if currency[0] == 't':
currency = currency[1:]
code = self.safe_currency_code(currency)
account = self.account()
# do not fill in zeroes and missing values in the parser
# rewrite and unify the following to use the unified parseBalance
account['total'] = total
if not available:
if available == 0:
account['free'] = 0
account['used'] = total
else:
account['free'] = total
else:
account['free'] = available
account['used'] = account['total'] - account['free']
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
precision = self.safe_value(self.options, 'precision', 'R0')
request = {
'symbol': self.market_id(symbol),
'precision': precision,
}
if limit is not None:
request['len'] = limit # 25 or 100
fullRequest = self.extend(request, params)
orderbook = await self.publicGetBookSymbolPrecision(fullRequest)
timestamp = self.milliseconds()
result = {
'bids': [],
'asks': [],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
priceIndex = 1 if (fullRequest['precision'] == 'R0') else 0
for i in range(0, len(orderbook)):
order = orderbook[i]
price = order[priceIndex]
amount = abs(order[2])
side = 'bids' if (order[2] > 0) else 'asks'
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market is not None:
symbol = market['symbol']
length = len(ticker)
last = ticker[length - 4]
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': ticker[length - 2],
'low': ticker[length - 1],
'bid': ticker[length - 10],
'bidVolume': None,
'ask': ticker[length - 8],
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': ticker[length - 6],
'percentage': ticker[length - 5] * 100,
'average': None,
'baseVolume': ticker[length - 3],
'quoteVolume': None,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
else:
request['symbols'] = 'ALL'
tickers = await self.publicGetTickers(self.extend(request, params))
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker[0]
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = await self.publicGetTickerSymbol(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_symbol(self, marketId):
if marketId is None:
return marketId
marketId = marketId.replace('t', '')
baseId = None
quoteId = None
if marketId.find(':') >= 0:
parts = marketId.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = marketId[0:3]
quoteId = marketId[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
return base + '/' + quote
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# [
# ID,
# MTS, # timestamp
# AMOUNT,
# PRICE
# ]
#
# fetchMyTrades(private)
#
# [
# ID,
# PAIR,
# MTS_CREATE,
# ORDER_ID,
# EXEC_AMOUNT,
# EXEC_PRICE,
# ORDER_TYPE,
# ORDER_PRICE,
# MAKER,
# FEE,
# FEE_CURRENCY,
# ...
# ]
#
tradeLength = len(trade)
isPrivate = (tradeLength > 5)
id = str(trade[0])
amountIndex = 4 if isPrivate else 2
amount = trade[amountIndex]
cost = None
priceIndex = 5 if isPrivate else 3
price = trade[priceIndex]
side = None
orderId = None
takerOrMaker = None
type = None
fee = None
symbol = None
timestampIndex = 2 if isPrivate else 1
timestamp = trade[timestampIndex]
if isPrivate:
marketId = trade[1]
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = self.parse_symbol(marketId)
orderId = str(trade[3])
takerOrMaker = 'maker' if (trade[8] == 1) else 'taker'
feeCost = trade[9]
feeCurrency = self.safe_currency_code(trade[10])
if feeCost is not None:
feeCost = -feeCost
if symbol in self.markets:
feeCost = self.fee_to_precision(symbol, feeCost)
else:
currencyId = 'f' + feeCurrency
if currencyId in self.currencies_by_id:
currency = self.currencies_by_id[currencyId]
feeCost = self.currency_to_precision(currency['code'], feeCost)
fee = {
'cost': float(feeCost),
'currency': feeCurrency,
}
orderType = trade[6]
type = self.safe_string(self.options['exchangeTypes'], orderType)
if symbol is None:
if market is not None:
symbol = market['symbol']
if amount is not None:
side = 'sell' if (amount < 0) else 'buy'
amount = abs(amount)
if cost is None:
if price is not None:
cost = amount * price
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'side': side,
'type': type,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
sort = '-1'
request = {
'symbol': market['id'],
}
if since is not None:
request['start'] = since
sort = '1'
if limit is not None:
request['limit'] = limit # default 120, max 5000
request['sort'] = sort
response = await self.publicGetTradesSymbolHist(self.extend(request, params))
#
# [
# [
# ID,
# MTS, # timestamp
# AMOUNT,
# PRICE
# ]
# ]
#
trades = self.sort_by(response, 1)
return self.parse_trades(trades, market, None, limit)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=100, params={}):
await self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100 # default 100, max 5000
if since is None:
since = self.milliseconds() - self.parse_timeframe(timeframe) * limit * 1000
request = {
'symbol': market['id'],
'timeframe': self.timeframes[timeframe],
'sort': 1,
'start': since,
'limit': limit,
}
response = await self.publicGetCandlesTradeTimeframeSymbolHist(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_order_status(self, status):
if status is None:
return status
parts = status.split(' ')
state = self.safe_string(parts, 0)
statuses = {
'ACTIVE': 'open',
'PARTIALLY': 'open',
'EXECUTED': 'closed',
'CANCELED': 'canceled',
'INSUFFICIENT': 'canceled',
'RSN_DUST': 'rejected',
'RSN_PAUSE': 'rejected',
}
return self.safe_string(statuses, state, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 0)
symbol = None
marketId = self.safe_string(order, 3)
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
symbol = self.parse_symbol(marketId)
if (symbol is None) and (market is not None):
symbol = market['symbol']
# https://github.com/ccxt/ccxt/issues/6686
# timestamp = self.safe_timestamp(order, 5)
timestamp = self.safe_integer(order, 5)
remaining = abs(self.safe_float(order, 6))
amount = abs(self.safe_float(order, 7))
filled = amount - remaining
side = 'sell' if (order[7] < 0) else 'buy'
orderType = self.safe_string(order, 8)
type = self.safe_string(self.safe_value(self.options, 'exchangeTypes'), orderType)
status = None
statusString = self.safe_string(order, 13)
if statusString is not None:
parts = statusString.split(' @ ')
status = self.parse_order_status(self.safe_string(parts, 0))
price = self.safe_float(order, 16)
average = self.safe_float(order, 17)
cost = price * filled
clientOrderId = self.safe_string(order, 2)
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderTypes = self.safe_value(self.options, 'orderTypes', {})
orderType = self.safe_string(orderTypes, type, type)
amount = -amount if (side == 'sell') else amount
request = {
'symbol': market['id'],
'type': orderType,
'amount': self.number_to_string(amount),
}
if type != 'market':
request['price'] = self.number_to_string(price)
clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')
if clientOrderId is not None:
request['cid'] = clientOrderId
params = self.omit(params, ['cid', 'clientOrderId'])
response = await self.privatePostAuthWOrderSubmit(self.extend(request, params))
#
# [
# 1578784364.748, # Millisecond Time Stamp of the update
# "on-req", # Purpose of notification('on-req', 'oc-req', 'uca', 'fon-req', 'foc-req')
# null, # Unique ID of the message
# null, # Ignore
# [
# [
# 37271830598, # Order ID
# null, # Group ID
# 1578784364748, # Client Order ID
# "tBTCUST", # Pair
# 1578784364748, # Millisecond timestamp of creation
# 1578784364748, # Millisecond timestamp of update
# -0.005, # Positive means buy, negative means sell
# -0.005, # Original amount
# "EXCHANGE LIMIT", # Order type(LIMIT, MARKET, STOP, TRAILING STOP, EXCHANGE MARKET, EXCHANGE LIMIT, EXCHANGE STOP, EXCHANGE TRAILING STOP, FOK, EXCHANGE FOK, IOC, EXCHANGE IOC)
# null, # Previous order type
# null, # Millisecond timestamp of Time-In-Force: automatic order cancellation
# null, # Ignore
# 0, # Flags(see https://docs.bitfinex.com/docs/flag-values)
# "ACTIVE", # Order Status
# null, # Ignore
# null, # Ignore
# 20000, # Price
# 0, # Average price
# 0, # The trailing price
# 0, # Auxiliary Limit price(for STOP LIMIT)
# null, # Ignore
# null, # Ignore
# null, # Ignore
# 0, # 1 - hidden order
# null, # If another order caused self order to be placed(OCO) self will be that other order's ID
# null, # Ignore
# null, # Ignore
# null, # Ignore
# "API>BFX", # Origin of action: BFX, ETHFX, API>BFX, API>ETHFX
# null, # Ignore
# null, # Ignore
# null # Meta
# ]
# ],
# null, # Error code
# "SUCCESS", # Status(SUCCESS, ERROR, FAILURE, ...)
# "Submitting 1 orders." # Text of the notification
# ]
#
status = self.safe_string(response, 6)
if status != 'SUCCESS':
errorCode = response[5]
errorText = response[7]
raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(#' + errorCode + ')')
orders = self.safe_value(response, 4, [])
order = self.safe_value(orders, 0)
return self.parse_order(order, market)
async def cancel_all_orders(self, symbol=None, params={}):
request = {
'all': 1,
}
response = await self.privatePostAuthWOrderCancelMulti(self.extend(request, params))
orders = self.safe_value(response, 4, [])
return self.parse_orders(orders)
async def cancel_order(self, id, symbol=None, params={}):
cid = self.safe_value_2(params, 'cid', 'clientOrderId') # client order id
request = None
if cid is not None:
cidDate = self.safe_value(params, 'cidDate') # client order id date
if cidDate is None:
raise InvalidOrder(self.id + " canceling an order by clientOrderId('cid') requires both 'cid' and 'cid_date'('YYYY-MM-DD')")
request = {
'cid': cid,
'cid_date': cidDate,
}
params = self.omit(params, ['cid', 'clientOrderId'])
else:
request = {
'id': int(id),
}
response = await self.privatePostAuthWOrderCancel(self.extend(request, params))
order = self.safe_value(response, 4)
return self.parse_order(order)
async def fetch_open_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = await self.fetch_open_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
async def fetch_closed_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = await self.fetch_closed_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = await self.privatePostAuthROrders(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.privatePostAuthROrdersSymbol(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# returns the most recent closed or canceled orders up to circa two weeks ago
await self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = await self.privatePostAuthROrdersHist(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.privatePostAuthROrdersSymbolHist(self.extend(request, params))
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 2500
return self.parse_orders(response, market, since, limit)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
orderId = int(id)
request = {
'id': orderId,
'symbol': market['id'],
}
# valid for trades upto 10 days old
response = await self.privatePostAuthROrderSymbolIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {
'end': self.milliseconds(),
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 1000
method = 'privatePostAuthRTradesHist'
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = 'privatePostAuthRTradesSymbolHist'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_deposit_address(self, code, params={}):
await self.load_markets()
request = {
'op_renew': 1,
}
response = await self.fetch_deposit_address(code, self.extend(request, params))
return response
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'op_renew': 0, # a value of 1 will generate a new address
}
response = await self.privatePostAuthWDepositAddress(self.extend(request, params))
#
# [
# 1582269616687, # MTS Millisecond Time Stamp of the update
# 'acc_dep', # TYPE Purpose of notification 'acc_dep' for account deposit
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# null, # PLACEHOLDER
# 'BITCOIN', # METHOD Method of deposit
# 'BTC', # CURRENCY_CODE Currency code of new address
# null, # PLACEHOLDER
# '1BC9PZqpUmjyEB54uggn8TFKj49zSDYzqG', # ADDRESS
# null, # POOL_ADDRESS
# ],
# null, # CODE null or integer work in progress
# 'SUCCESS', # STATUS Status of the notification, SUCCESS, ERROR, FAILURE
# 'success', # TEXT Text of the notification
# ]
#
result = self.safe_value(response, 4, [])
poolAddress = self.safe_string(result, 5)
address = self.safe_string(result, 4) if (poolAddress is None) else poolAddress
tag = None if (poolAddress is None) else self.safe_string(result, 4)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
# todo add support for all movements, deposits and withdrawals
#
data = self.safe_value(transaction, 4, [])
timestamp = self.safe_integer(transaction, 0)
code = None
if currency is not None:
code = currency['code']
feeCost = self.safe_float(data, 8)
if feeCost is not None:
feeCost = abs(feeCost)
amount = self.safe_float(data, 5)
id = self.safe_value(data, 0)
status = 'ok'
if id == 0:
id = None
status = 'failed'
tag = self.safe_string(data, 3)
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': None, # self is actually the tag for XRP transfers(the address is missing)
'addressTo': None,
'tagFrom': None,
'tag': tag, # refix it properly for the tag from description
'tagTo': tag,
'type': 'withdrawal',
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'amount': self.number_to_string(amount),
'address': address,
}
if tag is not None:
request['payment_id'] = tag
response = await self.privatePostAuthWWithdraw(self.extend(request, params))
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
text = self.safe_string(response, 7)
if text != 'success':
self.throw_broadly_matched_exception(self.exceptions['broad'], text, text)
transaction = self.parse_transaction(response, currency)
return self.extend(transaction, {
'address': address,
})
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'v1':
request = api + request
else:
request = self.version + request
url = self.urls['api'][api] + '/' + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.json(query)
auth = '/api/' + request + nonce + body
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha384)
headers = {
'bfx-nonce': nonce,
'bfx-apikey': self.apiKey,
'bfx-signature': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if response:
if 'message' in response:
if response['message'].find('not enough exchange balance') >= 0:
raise InsufficientFunds(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + self.json(response))
return response
elif response == '':
raise ExchangeError(self.id + ' returned empty response')
return response
def handle_errors(self, statusCode, statusText, url, method, responseHeaders, responseBody, response, requestHeaders, requestBody):
if statusCode == 500:
# See https://docs.bitfinex.com/docs/abbreviations-glossary#section-errorinfo-codes
errorCode = self.number_to_string(response[1])
errorText = response[2]
feedback = self.id + ' ' + errorText
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorText, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], errorText, feedback)
raise ExchangeError(self.id + ' ' + errorText + '(#' + errorCode + ')')
| 41.801066
| 207
| 0.472954
|
7f3084213fc3d835ef0ef6e6cbf51a3d6ceea689
| 11,567
|
py
|
Python
|
labyrinthe.py
|
erwanaubry/Labyrinthe_Python
|
3a49c3ca9da3b8252d14bf72d6cf821c066e6b0a
|
[
"Unlicense"
] | 1
|
2020-03-15T11:11:43.000Z
|
2020-03-15T11:11:43.000Z
|
labyrinthe.py
|
erwanaubry/Labyrinthe_Python
|
3a49c3ca9da3b8252d14bf72d6cf821c066e6b0a
|
[
"Unlicense"
] | null | null | null |
labyrinthe.py
|
erwanaubry/Labyrinthe_Python
|
3a49c3ca9da3b8252d14bf72d6cf821c066e6b0a
|
[
"Unlicense"
] | null | null | null |
from listeJoueurs import *
from plateau import *
def Labyrinthe(nomsJoueurs=["joueur1","joueurs2"],nbTresors=24, nbTresorsMax=0):
"""
permet de créer un labyrinthe avec nbJoueurs joueurs, nbTresors trésors
chacun des joueurs aura au plus nbTresorMax à trouver
si ce dernier paramètre est à 0, on distribuera le maximum de trésors possible
à chaque joueur en restant équitable
un joueur courant est choisi et la phase est initialisée
paramètres: nomsJoueurs est la liste des noms des joueurs participant à la partie (entre 1 et 4)
nbTresors le nombre de trésors différents il en faut au moins 12 et au plus 49
nbTresorMax le nombre de trésors maximum distribué à chaque joueur
résultat: le labyrinthe crée
"""
labyrinthe={}
labyrinthe['joueurs']=[]
labyrinthe['phase']=1
labyrinthe['joueurs']=ListeJoueurs(nomsJoueurs)
initAleatoireJoueurCourant(labyrinthe['joueurs'])
distribuerTresors(labyrinthe['joueurs'],nbTresors,nbTresorsMax)
labyrinthe['plateau']=Plateau(len(nomsJoueurs),nbTresors)
return labyrinthe
def getPlateau(labyrinthe):
"""
retourne la matrice représentant le plateau de jeu
paramètre: labyrinthe le labyrinthe considéré
résultat: la matrice représentant le plateau de ce labyrinthe
"""
return labyrinthe['plateau']
def getNbParticipants(labyrinthe):
"""
retourne le nombre de joueurs engagés dans la partie
paramètre: labyrinthe le labyrinthe considéré
résultat: le nombre de joueurs de la partie
"""
return getNbJoueurs(labyrinthe['joueurs'])
def getNomJoueurCourant(labyrinthe):
"""
retourne le nom du joueur courant
paramètre: labyrinthe le labyrinthe considéré
résultat: le nom du joueurs courant
"""
return nomJoueurCourant(labyrinthe['joueurs'])
def getNumJoueurCourant(labyrinthe):
"""
retourne le numero du joueur courant
paramètre: labyrinthe le labyrinthe considéré
résultat: le numero du joueurs courant
"""
return numJoueurCourant(labyrinthe['joueurs'])
def getPhase(labyrinthe):
"""
retourne la phase du jeu courante
paramètre: labyrinthe le labyrinthe considéré
résultat: le numéro de la phase de jeu courante
"""
return labyrinthe['phase']
def changerPhase(labyrinthe):
"""
change de phase de jeu en passant la suivante
paramètre: labyrinthe le labyrinthe considéré
la fonction ne retourne rien mais modifie le labyrinthe
"""
if labyrinthe['phase']==1:
labyrinthe['phase']=2
elif labyrinthe['phase']==2:
labyrinthe['phase']=1
def getNbTresors(labyrinthe):
"""
retourne le nombre de trésors qu'il reste sur le labyrinthe
paramètre: labyrinthe le labyrinthe considéré
résultat: le nombre de trésors sur le plateau
"""
res=0
for i in range(len(labyrinthe['joueurs'])):
res+=nbTresorsRestantsJoueur(labyrinthe['joueurs'],i)
return res
def getListeJoueurs(labyrinthe):
"""
retourne la liste joueur structures qui gèrent les joueurs et leurs trésors
paramètre: labyrinthe le labyrinthe considéré
résultat: les joueurs sous la forme de la structure implémentée dans listeJoueurs.py
"""
return labyrinthe['joueurs']
def enleverTresor(labyrinthe,lin,col,numTresor):
"""
enleve le trésor numTresor du plateau du labyrinthe.
Si l'opération s'est bien passée le nombre total de trésors dans le labyrinthe
est diminué de 1
paramètres: labyrinthe: le labyrinthe considéré
lig: la ligne où se trouve la carte
col: la colonne où se trouve la carte
numTresor: le numéro du trésor à prendre sur la carte
la fonction ne retourne rien mais modifie le labyrinthe
"""
prendreTresor(getVal(labyrinthe['plateau'],lin,col))
def prendreJoueurCourant(labyrinthe,lin,col):
"""
enlève le joueur courant de la carte qui se trouve sur la case lin,col du plateau
si le joueur ne s'y trouve pas la fonction ne fait rien
paramètres: labyrinthe: le labyrinthe considéré
lig: la ligne où se trouve la carte
col: la colonne où se trouve la carte
la fonction ne retourne rien mais modifie le labyrinthe
"""
prendrePionPlateau(labyrinthe['plateau']['matrice'],lin,col,getNumJoueurCourant(labyrinthe['joueurs']))
def poserJoueurCourant(labyrinthe,lin,col):
"""
pose le joueur courant sur la case lin,col du plateau
paramètres: labyrinthe: le labyrinthe considéré
lig: la ligne où se trouve la carte
col: la colonne où se trouve la carte
la fonction ne retourne rien mais modifie le labyrinthe
"""
poserPionPlateau(labyrinthe['plateau']['matrice'], lin, col, getNumJoueurCourant(labyrinthe['joueurs']))
def getCarteAJouer(labyrinthe):
"""
donne la carte à jouer
paramètre: labyrinthe: le labyrinthe considéré
résultat: la carte à jouer
"""
return labyrinthe['plateau']['CA']
def coupInterdit(labyrinthe,direction,rangee):
"""
retourne True si le coup proposé correspond au coup interdit
elle retourne False sinon
paramètres: labyrinthe: le labyrinthe considéré
direction: un caractère qui indique la direction choisie ('N','S','E','O')
rangee: le numéro de la ligne ou de la colonne choisie
résultat: un booléen indiquant si le coup est interdit ou non
"""
CP=[0,2,4,6]
if rangee in CP:
return True
else:
return False
def jouerCarte(labyrinthe,direction,rangee):
"""
fonction qui joue la carte amovible dans la direction et sur la rangée passées
en paramètres. Cette fonction
- met à jour le plateau du labyrinthe
- met à jour la carte à jouer
- met à jour la nouvelle direction interdite
paramètres: labyrinthe: le labyrinthe considéré
direction: un caractère qui indique la direction choisie ('N','S','E','O')
rangee: le numéro de la ligne ou de la colonne choisie
Cette fonction ne retourne pas de résultat mais mais à jour le labyrinthe
"""
if direction=='N':
res=decalageColonneEnHaut(labyrinthe, rangee, labyrinthe['CA'])
labyrinthe['CA']=res
elif direction=='E':
res = decalageLigneADroite(labyrinthe, rangee, labyrinthe['CA'])
labyrinthe['CA'] = res
elif direction=='S':
res = decalageColonneEnBas(labyrinthe, rangee, labyrinthe['CA'])
labyrinthe['CA'] = res
elif direction=='O':
res = decalageLigneAGauche(labyrinthe, rangee, labyrinthe['CA'])
labyrinthe['CA'] = res
def tournerCarte(labyrinthe,sens='H'):
"""
tourne la carte à jouer dans le sens indiqué en paramètre (H horaire A antihoraire)
paramètres: labyritnthe: le labyrinthe considéré
sens: un caractère indiquant le sens dans lequel tourner la carte
Cette fonction ne retourne pas de résultat mais mais à jour le labyrinthe
"""
if sens=='H':
tournerHoraire(labyrinthe['plateau']['CA'])
elif sens=='A':
tournerAntiHoraire(labyrinthe['plateau']['CA'])
def getTresorCourant(labyrinthe):
"""
retourne le numéro du trésor que doit cherche le joueur courant
paramètre: labyritnthe: le labyrinthe considéré
resultat: le numéro du trésor recherché par le joueur courant
"""
prochainTresorJoueur(labyrinthe['joueurs'],getNumJoueurCourant(labyrinthe))
def getCoordonneesTresorCourant(labyrinthe):
"""
donne les coordonnées du trésor que le joueur courant doit trouver
paramètre: labyritnthe: le labyrinthe considéré
resultat: les coordonnées du trésor à chercher ou None si celui-ci
n'est pas sur le plateau
"""
getCoordonneesTresor(labyrinthe['plateau'], tresorCourant(labyrinthe['joueurs']))
def getCoordonneesJoueurCourant(labyrinthe):
"""
donne les coordonnées du joueur courant sur le plateau
paramètre: labyritnthe: le labyrinthe considéré
resultat: les coordonnées du joueur courant ou None si celui-ci
n'est pas sur le plateau
"""
return getCoordonneesJoueur(labyrinthe['plateau'],numJoueurCourant(labyrinthe['joueurs']))
def executerActionPhase1(labyrinthe,action,rangee):
"""
exécute une action de jeu de la phase 1
paramètres: labyrinthe: le labyrinthe considéré
action: un caractère indiquant l'action à effecter
si action vaut 'T' => faire tourner la carte à jouer
si action est une des lettres N E S O et rangee est un des chiffre 1,3,5
=> insèrer la carte à jouer à la direction action sur la rangée rangee
et faire le nécessaire pour passer en phase 2
résultat: un entier qui vaut
0 si l'action demandée était valide et demandait de tourner la carte
1 si l'action demandée était valide et demandait d'insérer la carte
2 si l'action est interdite car l'opposée de l'action précédente
3 si action et rangee sont des entiers positifs
4 dans tous les autres cas
"""
if action=='T':
tournerCarte(labyrinthe, sens='H')
return 0
elif str(action) in 'NESO' and coupInterdit(labyrinthe['plateau'],action,rangee)==False:
jouerCarte(labyrinthe['plateau'], action, rangee)
changerPhase(labyrinthe)
return 1
elif coupInterdit(labyrinthe,action,rangee)==True:
return 2
elif action>0 and rangee>0:
return 3
else:
return 4
def accessibleDistJoueurCourant(labyrinthe, ligA,colA):
"""
verifie si le joueur courant peut accéder la case ligA,colA
si c'est le cas la fonction retourne une liste représentant un chemin possible
sinon ce n'est pas le cas, la fonction retourne None
paramètres: labyrinthe le labyrinthe considéré
ligA la ligne de la case d'arrivée
colA la colonne de la case d'arrivée
résultat: une liste de couples d'entier représentant un chemin que le joueur
courant atteigne la case d'arrivée s'il existe None si pas de chemin
"""
x,y=getCoordonneesJoueur(labyrinthe['plateau'],numJoueurCourant(labyrinthe['joueurs']))
return accessibleDist(labyrinthe['plateau'],x,y,ligA,colA)
def finirTour(labyrinthe):
"""
vérifie si le joueur courant vient de trouver un trésor (si oui fait le nécessaire)
vérifie si la partie est terminée, si ce n'est pas le cas passe au joueur suivant
paramètre: labyrinthe le labyrinthe considéré
résultat: un entier qui vaut
0 si le joueur courant n'a pas trouvé de trésor
1 si le joueur courant a trouvé un trésor mais la partie n'est pas terminée
2 si le joueur courant a trouvé son dernier trésor (la partie est donc terminée)
"""
changerPhase(labyrinthe)
if getCoordonneesJoueur(labyrinthe['plateau'],numJoueurCourant(labyrinthe['joueurs']))!=getCoordonneesTresor(labyrinthe['plateau'],tresorCourant(labyrinthe['joueurs'])) and joueurCourantAFini(labyrinthe['joueurs'])==False:
return 0
if getCoordonneesJoueur(labyrinthe['plateau'],numJoueurCourant(labyrinthe['joueurs']))==getCoordonneesTresor(labyrinthe['plateau'],tresorCourant(labyrinthe['joueurs'])):
if joueurCourantAFini(labyrinthe['joueurs'])==False:
return 1
if joueurCourantAFini(labyrinthe['joueurs'])==True:
return 2
| 38.556667
| 226
| 0.689634
|
c13e614b5b7c8b191fe493ea9e416c10dcb77d67
| 2,001
|
py
|
Python
|
IMLearn/metrics/loss_functions.py
|
OmriBenbenisty/IML.HUJI
|
18ca1b8086b8aaa149dca95f96de26750ea0497f
|
[
"MIT"
] | null | null | null |
IMLearn/metrics/loss_functions.py
|
OmriBenbenisty/IML.HUJI
|
18ca1b8086b8aaa149dca95f96de26750ea0497f
|
[
"MIT"
] | null | null | null |
IMLearn/metrics/loss_functions.py
|
OmriBenbenisty/IML.HUJI
|
18ca1b8086b8aaa149dca95f96de26750ea0497f
|
[
"MIT"
] | null | null | null |
import numpy as np
def mean_square_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate MSE loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
MSE of given predictions
"""
return np.square(y_true - y_pred).mean()
def misclassification_error(y_true: np.ndarray, y_pred: np.ndarray, normalize: bool = True) -> float:
"""
Calculate misclassification loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
normalize: bool, default = True
Normalize by number of samples or not
Returns
-------
Misclassification of given predictions
"""
prod = y_true * y_pred
return np.sum((prod < 0).astype(int)) / prod.shape[0] if normalize\
else np.sum((prod < 0).astype(int))
# return np.linalg.norm(y_true - y_pred) / y_true.shape[0] if normalize \
# else np.linalg.norm(y_true - y_pred)
def accuracy(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate accuracy of given predictions
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
Accuracy of given predictions
"""
return ((y_true - y_pred) == 0).astype(int).mean()
def cross_entropy(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the cross entropy of given predictions
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
Cross entropy of given predictions
"""
raise NotImplementedError()
| 24.703704
| 101
| 0.625187
|
e434ab5a365836201daf8d646868331ae66e721e
| 344
|
py
|
Python
|
modules/random_dog.py
|
ChaseBosman/chatbot
|
a39e655e6d586fa596471cd20617dff5f9795a96
|
[
"Unlicense"
] | 3
|
2019-10-19T12:07:06.000Z
|
2020-10-05T17:24:56.000Z
|
modules/random_dog.py
|
ChaseBosman/chatbot
|
a39e655e6d586fa596471cd20617dff5f9795a96
|
[
"Unlicense"
] | 17
|
2019-10-05T12:30:17.000Z
|
2021-07-25T20:06:33.000Z
|
modules/random_dog.py
|
ChaseBosman/chatbot
|
a39e655e6d586fa596471cd20617dff5f9795a96
|
[
"Unlicense"
] | 26
|
2018-10-19T05:43:12.000Z
|
2020-10-02T05:27:48.000Z
|
import requests
import json
def random_dog_pic():
try:
url = 'https://dog.ceo/api/breeds/image/random'
response = requests.get(url)
response_json = json.loads(response.text)
return "Here's a super cute doc pic: " + response_json.get('message')
except:
return "No dogs available today :/"
| 26.461538
| 77
| 0.627907
|
ec8d8fb43d9a801e49858c37f0222540c8c913f8
| 13,979
|
py
|
Python
|
ansible/modules/network/netvisor/pn_trunk.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/network/netvisor/pn_trunk.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/network/netvisor/pn_trunk.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
""" PN CLI trunk-create/trunk-delete/trunk-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_trunk
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to create/delete/modify a trunk.
description:
- Execute trunk-create or trunk-delete command.
- Trunks can be used to aggregate network links at Layer 2 on the local
switch. Use this command to create a new trunk.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to create trunk,
'absent' to delete trunk and 'update' to modify trunk.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name for the trunk configuration.
required: true
pn_ports:
description:
- Specify the port number(s) for the link(s) to aggregate into the trunk.
- Required for trunk-create.
pn_speed:
description:
- Specify the port speed or disable the port.
choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g']
pn_egress_rate_limit:
description:
- Specify an egress port data rate limit for the configuration.
pn_jumbo:
description:
- Specify if the port can receive jumbo frames.
pn_lacp_mode:
description:
- Specify the LACP mode for the configuration.
choices: ['off', 'passive', 'active']
pn_lacp_priority:
description:
- Specify the LACP priority. This is a number between 1 and 65535 with a
default value of 32768.
pn_lacp_timeout:
description:
- Specify the LACP time out as slow (30 seconds) or fast (4seconds).
The default value is slow.
choices: ['slow', 'fast']
pn_lacp_fallback:
description:
- Specify the LACP fallback mode as bundles or individual.
choices: ['bundle', 'individual']
pn_lacp_fallback_timeout:
description:
- Specify the LACP fallback timeout in seconds. The range is between 30
and 60 seconds with a default value of 50 seconds.
pn_edge_switch:
description:
- Specify if the switch is an edge switch.
pn_pause:
description:
- Specify if pause frames are sent.
pn_description:
description:
- Specify a description for the trunk configuration.
pn_loopback:
description:
- Specify loopback if you want to use loopback.
pn_mirror_receive:
description:
- Specify if the configuration receives mirrored traffic.
pn_unknown_ucast_level:
description:
- Specify an unknown unicast level in percent. The default value is 100%.
pn_unknown_mcast_level:
description:
- Specify an unknown multicast level in percent. The default value is 100%.
pn_broadcast_level:
description:
- Specify a broadcast level in percent. The default value is 100%.
pn_port_macaddr:
description:
- Specify the MAC address of the port.
pn_loopvlans:
description:
- Specify a list of looping vlans.
pn_routing:
description:
- Specify if the port participates in routing on the network.
pn_host:
description:
- Host facing port control setting.
"""
EXAMPLES = """
- name: create trunk
pn_trunk:
state: 'present'
pn_name: 'spine-to-leaf'
pn_ports: '11,12,13,14'
- name: delete trunk
pn_trunk:
state: 'absent'
pn_name: 'spine-to-leaf'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the trunk command.
returned: always
type: list
stderr:
description: The set of error responses from the trunk command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
TRUNK_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the trunk-show command.
If a trunk with given name exists, return TRUNK_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: TRUNK_EXISTS
"""
name = module.params['pn_name']
show = cli + ' trunk-show format switch,name no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
# Global flags
global TRUNK_EXISTS
if name in out:
TRUNK_EXISTS = True
else:
TRUNK_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'trunk-create'
if state == 'absent':
command = 'trunk-delete'
if state == 'update':
command = 'trunk-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_ports=dict(type='str'),
pn_speed=dict(type='str',
choices=['disable', '10m', '100m', '1g', '2.5g',
'10g', '40g']),
pn_egress_rate_limit=dict(type='str'),
pn_jumbo=dict(type='bool'),
pn_lacp_mode=dict(type='str', choices=[
'off', 'passive', 'active']),
pn_lacp_priority=dict(type='int'),
pn_lacp_timeout=dict(type='str'),
pn_lacp_fallback=dict(type='str', choices=[
'bundle', 'individual']),
pn_lacp_fallback_timeout=dict(type='str'),
pn_edge_switch=dict(type='bool'),
pn_pause=dict(type='bool'),
pn_description=dict(type='str'),
pn_loopback=dict(type='bool'),
pn_mirror_receive=dict(type='bool'),
pn_unknown_ucast_level=dict(type='str'),
pn_unknown_mcast_level=dict(type='str'),
pn_broadcast_level=dict(type='str'),
pn_port_macaddr=dict(type='str'),
pn_loopvlans=dict(type='str'),
pn_routing=dict(type='bool'),
pn_host=dict(type='bool')
),
required_if=(
["state", "present", ["pn_name", "pn_ports"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
ports = module.params['pn_ports']
speed = module.params['pn_speed']
egress_rate_limit = module.params['pn_egress_rate_limit']
jumbo = module.params['pn_jumbo']
lacp_mode = module.params['pn_lacp_mode']
lacp_priority = module.params['pn_lacp_priority']
lacp_timeout = module.params['pn_lacp_timeout']
lacp_fallback = module.params['pn_lacp_fallback']
lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
edge_switch = module.params['pn_edge_switch']
pause = module.params['pn_pause']
description = module.params['pn_description']
loopback = module.params['pn_loopback']
mirror_receive = module.params['pn_mirror_receive']
unknown_ucast_level = module.params['pn_unknown_ucast_level']
unknown_mcast_level = module.params['pn_unknown_mcast_level']
broadcast_level = module.params['pn_broadcast_level']
port_macaddr = module.params['pn_port_macaddr']
loopvlans = module.params['pn_loopvlans']
routing = module.params['pn_routing']
host = module.params['pn_host']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'trunk-delete':
check_cli(module, cli)
if TRUNK_EXISTS is False:
module.exit_json(
skipped=True,
msg='Trunk with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'trunk-create':
check_cli(module, cli)
if TRUNK_EXISTS is True:
module.exit_json(
skipped=True,
msg='Trunk with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
# Appending options
if ports:
cli += ' ports ' + ports
if speed:
cli += ' speed ' + speed
if egress_rate_limit:
cli += ' egress-rate-limit ' + egress_rate_limit
if jumbo is True:
cli += ' jumbo '
if jumbo is False:
cli += ' no-jumbo '
if lacp_mode:
cli += ' lacp-mode ' + lacp_mode
if lacp_priority:
cli += ' lacp-priority ' + lacp_priority
if lacp_timeout:
cli += ' lacp-timeout ' + lacp_timeout
if lacp_fallback:
cli += ' lacp-fallback ' + lacp_fallback
if lacp_fallback_timeout:
cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
if edge_switch is True:
cli += ' edge-switch '
if edge_switch is False:
cli += ' no-edge-switch '
if pause is True:
cli += ' pause '
if pause is False:
cli += ' no-pause '
if description:
cli += ' description ' + description
if loopback is True:
cli += ' loopback '
if loopback is False:
cli += ' no-loopback '
if mirror_receive is True:
cli += ' mirror-receive-only '
if mirror_receive is False:
cli += ' no-mirror-receive-only '
if unknown_ucast_level:
cli += ' unknown-ucast-level ' + unknown_ucast_level
if unknown_mcast_level:
cli += ' unknown-mcast-level ' + unknown_mcast_level
if broadcast_level:
cli += ' broadcast-level ' + broadcast_level
if port_macaddr:
cli += ' port-mac-address ' + port_macaddr
if loopvlans:
cli += ' loopvlans ' + loopvlans
if routing is True:
cli += ' routing '
if routing is False:
cli += ' no-routing '
if host is True:
cli += ' host-enable '
if host is False:
cli += ' host-disable '
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| 30.85872
| 81
| 0.618141
|
adc72902e0bdd4ca39e68a4015381e449d728f74
| 334
|
py
|
Python
|
pset6/greedy.py
|
NelsonVides/CS50
|
132b4b60f8bfc6775a7075744975a86f3e53a5ce
|
[
"MIT"
] | null | null | null |
pset6/greedy.py
|
NelsonVides/CS50
|
132b4b60f8bfc6775a7075744975a86f3e53a5ce
|
[
"MIT"
] | null | null | null |
pset6/greedy.py
|
NelsonVides/CS50
|
132b4b60f8bfc6775a7075744975a86f3e53a5ce
|
[
"MIT"
] | null | null | null |
while True:
#print("Tell me how many rows you want")
dollars = float(input("How much do I owe you? "))
if dollars > 0:
break
cents = int(dollars * 100)
coins = 0;
coins += cents // 25
cents = cents % 25
coins += cents // 10
cents = cents % 10
coins += cents // 5
cents = cents % 5
coins += cents
print(coins)
| 15.904762
| 53
| 0.598802
|
9756459115989f769f19a8a111663dcf9c8761b6
| 759
|
py
|
Python
|
ch3/test_autouse.py
|
ujuc/python_testing_with_python
|
645700582773532c3520f97a299e2944563b113d
|
[
"Beerware"
] | 1
|
2018-08-15T16:26:32.000Z
|
2018-08-15T16:26:32.000Z
|
ch3/test_autouse.py
|
ujuc/python_testing_with_pytest
|
645700582773532c3520f97a299e2944563b113d
|
[
"Beerware"
] | null | null | null |
ch3/test_autouse.py
|
ujuc/python_testing_with_pytest
|
645700582773532c3520f97a299e2944563b113d
|
[
"Beerware"
] | null | null | null |
"""Demonstrate autouse fixtures."""
import pytest
import time
@pytest.fixture(autouse=True, scope='session')
def footer_session_scope():
"""Report the time at the end of a session."""
yield
now = time.time()
print('--')
print('finished : {}'.format(time.strftime('%d %b %X', time.localtime(now))))
print('--------------')
@pytest.fixture(autouse=True)
def footer_function_scope():
"""Report test durations after each function."""
start = time.time()
yield
stop = time.time()
delta = stop - start
print('\ntest duration : {:0.3} seconds'.format(delta))
def test_1():
"""Simulate long-ish running test."""
time.sleep(1)
def test_2():
"""Simulate slightly longer test."""
time.sleep(1.23)
| 21.685714
| 81
| 0.621871
|
670d4154568911583c99386168b5c454456e559d
| 1,031
|
py
|
Python
|
configs/mirnet_train.py
|
soumik12345/enhance-me
|
c0f9bcb6d4eb46030e90d47e58059f8624f5cf7a
|
[
"MIT"
] | 1
|
2022-02-01T23:20:19.000Z
|
2022-02-01T23:20:19.000Z
|
configs/mirnet_train.py
|
soumik12345/enhance-me
|
c0f9bcb6d4eb46030e90d47e58059f8624f5cf7a
|
[
"MIT"
] | 2
|
2021-11-27T08:45:47.000Z
|
2021-11-28T08:45:59.000Z
|
configs/mirnet_train.py
|
soumik12345/enhance-me
|
c0f9bcb6d4eb46030e90d47e58059f8624f5cf7a
|
[
"MIT"
] | null | null | null |
import ml_collections
def get_config() -> ml_collections.ConfigDict:
config = ml_collections.ConfigDict()
config.experiment_name = "lol_dataset_256" # Experiment Name
config.image_size = 128 # Image Size
config.dataset_label = "lol" # Dataset Label
config.apply_random_horizontal_flip = True # Flag: Apply Random Horizontal Flip
config.apply_random_vertical_flip = True # Flag: Apply Random Vertical Flip
config.apply_random_rotation = True # Flag: Apply Random Rotation
config.use_mixed_precision = True # Flag: Use Mixed-precision
config.val_split = 0.1 # Validation Split
config.batch_size = 4 # Batch Size
config.num_recursive_residual_groups = 3 # Number of recursive residual groups in MIRNet
config.num_multi_scale_residual_blocks = 2 # Number of multi-scale residual blocks in MIRNet
config.learning_rate = 1e-4 # learning rate
config.epsilon = 1e-3 # Constant for Charbonnier Loss
config.epochs = 50 # Number of training epochs
return config
| 49.095238
| 97
| 0.744908
|
72c238e688b079fce4ecfa3678350798aa83538e
| 2,211
|
py
|
Python
|
keepmealive/folders/tests.py
|
kymy86/keepmealive
|
fcca7cb825ae947978ca3251dc0331207cec2527
|
[
"Apache-2.0"
] | null | null | null |
keepmealive/folders/tests.py
|
kymy86/keepmealive
|
fcca7cb825ae947978ca3251dc0331207cec2527
|
[
"Apache-2.0"
] | null | null | null |
keepmealive/folders/tests.py
|
kymy86/keepmealive
|
fcca7cb825ae947978ca3251dc0331207cec2527
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import status
from rest_framework.test import APITestCase
from folders.models import Folder
from django.contrib.auth.models import User
class FolderTests(APITestCase):
def setUp(self):
self.users = User.objects.create_user(
username='test',
email='test@test.com',
password='testpwd'
)
url = '/api/auth/token/'
data = {
'username': 'test',
'password': 'testpwd'
}
response = self.client.post(url, data, format='json')
self.token = response.data['token']
"""
Test folder creation
"""
def test_create_folder(self):
url = '/api/folders/folder/'
data = {
'name': 'test folder',
'idparent': 0,
}
header = {
'HTTP_AUTHORIZATION': 'JWT {}'.format(self.token)
}
response = self.client.post(url, data, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Folder.objects.count(), 1)
self.assertEqual(Folder.objects.get().name, 'test folder')
"""
Test folder updating
"""
def test_update_folder(self):
folder = Folder.objects.create(name='test folder', idparent=0)
url = '/api/folders/folder/'+str(folder.id)+"/"
data = {
'name': 'Test folder updated',
'idparent': str(folder.idparent)
}
header = {
'HTTP_AUTHORIZATION': 'JWT {}'.format(self.token)
}
response = self.client.put(url, data, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Folder.objects.get().name, 'Test folder updated')
"""
Test folder deletion
"""
def test_delete_folder(self):
folder = Folder.objects.create(name='test folder', idparent=0)
url = '/api/folders/folder/'+str(folder.id)+"/"
header = {
'HTTP_AUTHORIZATION': 'JWT {}'.format(self.token)
}
response = self.client.delete(url, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
| 32.514706
| 74
| 0.585708
|
daff3ef70ba198c2f8a4fa57f72129090a20def0
| 5,659
|
py
|
Python
|
test/functional/p2p-instantsend.py
|
INTICOIN/SolD
|
cfa2f3a96b0f8831fee63c70203af17732181fe5
|
[
"MIT"
] | null | null | null |
test/functional/p2p-instantsend.py
|
INTICOIN/SolD
|
cfa2f3a96b0f8831fee63c70203af17732181fe5
|
[
"MIT"
] | null | null | null |
test/functional/p2p-instantsend.py
|
INTICOIN/SolD
|
cfa2f3a96b0f8831fee63c70203af17732181fe5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The SolD Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import SolDTestFramework
from test_framework.util import isolate_node, sync_mempools, reconnect_isolated_node, assert_equal, \
assert_raises_rpc_error
'''
p2p-instantsend.py
Tests InstantSend functionality (prevent doublespend for unconfirmed transactions)
'''
class InstantSendTest(SolDTestFramework):
def set_test_params(self):
self.set_sold_test_params(7, 3, fast_dip3_enforcement=True)
# set sender, receiver, isolated nodes
self.isolated_idx = 1
self.receiver_idx = 2
self.sender_idx = 3
def run_test(self):
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0)
self.wait_for_sporks_same()
self.test_mempool_doublespend()
self.test_block_doublespend()
def test_block_doublespend(self):
sender = self.nodes[self.sender_idx]
receiver = self.nodes[self.receiver_idx]
isolated = self.nodes[self.isolated_idx]
# feed the sender with some balance
sender_addr = sender.getnewaddress()
self.nodes[0].sendtoaddress(sender_addr, 1)
self.bump_mocktime(1)
self.nodes[0].generate(2)
self.sync_all()
# create doublespending transaction, but don't relay it
dblspnd_tx = self.create_raw_tx(sender, isolated, 0.5, 1, 100)
# isolate one node from network
isolate_node(isolated)
# instantsend to receiver
receiver_addr = receiver.getnewaddress()
is_id = sender.sendtoaddress(receiver_addr, 0.9)
# wait for the transaction to propagate
connected_nodes = self.nodes.copy()
del connected_nodes[self.isolated_idx]
self.sync_mempools(connected_nodes)
for node in connected_nodes:
self.wait_for_instantlock(is_id, node)
# send doublespend transaction to isolated node
isolated.sendrawtransaction(dblspnd_tx['hex'])
# generate block on isolated node with doublespend transaction
self.bump_mocktime(1)
isolated.generate(1)
wrong_block = isolated.getbestblockhash()
# connect isolated block to network
reconnect_isolated_node(isolated, 0)
# check doublespend block is rejected by other nodes
timeout = 10
for i in range(0, self.num_nodes):
if i == self.isolated_idx:
continue
res = self.nodes[i].waitforblock(wrong_block, timeout)
assert (res['hash'] != wrong_block)
# wait for long time only for first node
timeout = 1
# send coins back to the controller node without waiting for confirmations
receiver.sendtoaddress(self.nodes[0].getnewaddress(), 0.9, "", "", True)
assert_equal(receiver.getwalletinfo()["balance"], 0)
# mine more blocks
# TODO: mine these blocks on an isolated node
self.bump_mocktime(1)
# make sure the above TX is on node0
self.sync_mempools([n for n in self.nodes if n is not isolated])
self.nodes[0].generate(2)
self.sync_all()
def test_mempool_doublespend(self):
sender = self.nodes[self.sender_idx]
receiver = self.nodes[self.receiver_idx]
isolated = self.nodes[self.isolated_idx]
# feed the sender with some balance
sender_addr = sender.getnewaddress()
self.nodes[0].sendtoaddress(sender_addr, 1)
self.bump_mocktime(1)
self.nodes[0].generate(2)
self.sync_all()
# create doublespending transaction, but don't relay it
dblspnd_tx = self.create_raw_tx(sender, isolated, 0.5, 1, 100)
dblspnd_txid = bytes_to_hex_str(hash256(hex_str_to_bytes(dblspnd_tx['hex']))[::-1])
# isolate one node from network
isolate_node(isolated)
# send doublespend transaction to isolated node
isolated.sendrawtransaction(dblspnd_tx['hex'])
# let isolated node rejoin the network
# The previously isolated node should NOT relay the doublespending TX
reconnect_isolated_node(isolated, 0)
for node in self.nodes:
if node is not isolated:
assert_raises_rpc_error(-5, "No such mempool or blockchain transaction", node.getrawtransaction, dblspnd_txid)
# instantsend to receiver. The previously isolated node should prune the doublespend TX and request the correct
# TX from other nodes.
receiver_addr = receiver.getnewaddress()
is_id = sender.sendtoaddress(receiver_addr, 0.9)
# wait for the transaction to propagate
self.sync_mempools()
for node in self.nodes:
self.wait_for_instantlock(is_id, node)
assert_raises_rpc_error(-5, "No such mempool or blockchain transaction", isolated.getrawtransaction, dblspnd_txid)
# send coins back to the controller node without waiting for confirmations
receiver.sendtoaddress(self.nodes[0].getnewaddress(), 0.9, "", "", True)
assert_equal(receiver.getwalletinfo()["balance"], 0)
# mine more blocks
self.bump_mocktime(1)
self.nodes[0].generate(2)
self.sync_all()
if __name__ == '__main__':
InstantSendTest().main()
| 42.231343
| 126
| 0.675208
|
497f0a9332e116705731e2b8fc418c02b33d2026
| 2,223
|
py
|
Python
|
tensorflow/python/ops/ragged/ragged_eager_test.py
|
wainshine/tensorflow
|
dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d
|
[
"Apache-2.0"
] | 54
|
2017-06-17T14:07:48.000Z
|
2022-03-29T02:11:20.000Z
|
tensorflow/python/ops/ragged/ragged_eager_test.py
|
wainshine/tensorflow
|
dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d
|
[
"Apache-2.0"
] | 19
|
2021-12-28T12:44:55.000Z
|
2022-01-13T08:11:28.000Z
|
tensorflow/python/ops/ragged/ragged_eager_test.py
|
wainshine/tensorflow
|
dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d
|
[
"Apache-2.0"
] | 11
|
2018-04-19T22:36:01.000Z
|
2021-08-02T08:44:43.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.ragged in eager execution mode."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
class RaggedTensorTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
dict(pylist=[[b'a', b'b'], [b'c']]),
dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]]),
dict(pylist=[[[1, 2], [3, 4]], [[5, 6], [], [7, 8]]], ragged_rank=1),
])
def testRaggedTensorToList(self, pylist, ragged_rank=None):
rt = ragged_factory_ops.constant(pylist, ragged_rank)
self.assertAllEqual(rt, pylist)
@parameterized.parameters([
dict(pylist=[[b'a', b'b'], [b'c']],
expected_str="[[b'a', b'b'], [b'c']]"),
dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]],
expected_str='[[[1, 2], [3]], [[4, 5, 6], [], [7]]]'),
dict(pylist=[[0, 1], np.arange(2, 2000)],
expected_str='[[0, 1], [2, 3, 4, ..., 1997, 1998, 1999]]'),
dict(pylist=[[[0, 1]], [np.arange(2, 2000)]],
expected_str='[[[0, 1]],\n [[2, 3, 4, ..., 1997, 1998, 1999]]]'),
])
def testRaggedTensorStr(self, pylist, expected_str):
rt = ragged_factory_ops.constant(pylist)
self.assertEqual(str(rt), f'<tf.RaggedTensor {expected_str}>')
if __name__ == '__main__':
ops.enable_eager_execution()
googletest.main()
| 39.696429
| 80
| 0.625281
|
2c3394aa27d16be303570da47d86bc6fa7ce1642
| 1,146
|
py
|
Python
|
tests/test_utils.py
|
Badger-Finance/badger-utils
|
e6cc5cf22f24840486f4a762eab024f87da83c18
|
[
"MIT"
] | 1
|
2021-09-29T10:04:50.000Z
|
2021-09-29T10:04:50.000Z
|
tests/test_utils.py
|
Badger-Finance/badger-utils
|
e6cc5cf22f24840486f4a762eab024f87da83c18
|
[
"MIT"
] | 11
|
2021-10-17T15:52:22.000Z
|
2022-01-18T15:10:36.000Z
|
tests/test_utils.py
|
SHAKOTN/badger-utils
|
e6cc5cf22f24840486f4a762eab024f87da83c18
|
[
"MIT"
] | 1
|
2021-11-25T14:04:39.000Z
|
2021-11-25T14:04:39.000Z
|
import pytest
from brownie import accounts
from badger_utils.utils import approx
from badger_utils.utils import is_address_eoa
from badger_utils.utils import val
@pytest.mark.parametrize(
"actual, expected, threshold",
[
(1, 1, 1), (90, 99, 99), (75, 100, 75)
]
)
def test_approx_match(actual, expected, threshold):
assert approx(actual, expected, threshold)
@pytest.mark.parametrize(
"actual, expected, threshold",
[
(1, 2, 1), (90, 99, 1), (75, 100, 25)
]
)
def test_approx_no_match(actual, expected, threshold):
assert not approx(actual, expected, threshold)
@pytest.mark.parametrize(
"amount, decimals, expected",
[
(0, 18, "0.000000000000000000"),
(1000000000, 18, "0.000000001000000000"),
(1000000000000000000, 18, "1.000000000000000000"),
]
)
def test_val(amount, decimals, expected):
result = val(amount, decimals)
assert result == expected
def test_is_address_eoa_accounts():
for account in accounts:
assert is_address_eoa(account.address)
def test_is_address_eoa_token(token):
assert not is_address_eoa(token.address)
| 23.875
| 58
| 0.686736
|
cc62d90caf3e5a31e5d9185c3b32cce03f49dcc0
| 24,628
|
py
|
Python
|
bnlearn/structure_learning.py
|
khuyentran1401/bnlearn
|
ce2c856337ce058205b09ee36ae2b8741b93cb5d
|
[
"MIT"
] | 2
|
2022-01-03T21:01:27.000Z
|
2022-01-04T00:14:08.000Z
|
bnlearn/structure_learning.py
|
khuyentran1401/bnlearn
|
ce2c856337ce058205b09ee36ae2b8741b93cb5d
|
[
"MIT"
] | null | null | null |
bnlearn/structure_learning.py
|
khuyentran1401/bnlearn
|
ce2c856337ce058205b09ee36ae2b8741b93cb5d
|
[
"MIT"
] | null | null | null |
"""Structure learning. Given a set of data samples, estimate a DAG that captures the dependencies between the variables."""
# ------------------------------------
# Name : structure_learning.py
# Author : E.Taskesen
# Contact : erdogant@gmail.com
# Licence : See licences
# ------------------------------------
# %% Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pgmpy.estimators import BDeuScore, K2Score, BicScore
from pgmpy.estimators import ExhaustiveSearch, HillClimbSearch, TreeSearch
from pgmpy.models import NaiveBayes
import pgmpy
from packaging import version
if version.parse(pgmpy.__version__)>=version.parse("0.1.13"):
from pgmpy.estimators import PC as ConstraintBasedEstimator
else:
from pgmpy.estimators import ConstraintBasedEstimator
import bnlearn
# %% Structure Learning
def fit(df, methodtype='hc', scoretype='bic', black_list=None, white_list=None, bw_list_method=None, max_indegree=None, tabu_length=100, epsilon=1e-4, max_iter=1e6, root_node=None, class_node=None, fixed_edges=None, return_all_dags=False, verbose=3):
"""Structure learning fit model.
Description
-----------
Search strategies for structure learning
The search space of DAGs is super-exponential in the number of variables and the above scoring functions allow for local maxima.
To learn model structure (a DAG) from a data set, there are three broad techniques:
1. Score-based structure learning (BIC/BDeu/K2 score; exhaustive search, hill climb/tabu search)
* exhaustivesearch
* hillclimbsearch
* chow-liu
* Tree-augmented Naive Bayes (tan)
* NaiveBayesian
2. Constraint-based structure learning (PC)
a. chi-square test
3. Hybrid structure learning (The combination of both techniques) (MMHC)
Score-based Structure Learning.
This approach construes model selection as an optimization task. It has two building blocks:
A scoring function sD:->R that maps models to a numerical score, based on how well they fit to a given data set D.
A search strategy to traverse the search space of possible models M and select a model with optimal score.
Commonly used scoring functions to measure the fit between model and data are Bayesian Dirichlet scores such as BDeu or K2 and the Bayesian Information Criterion (BIC, also called MDL).
BDeu is dependent on an equivalent sample size.
Parameters
----------
df : pd.DataFrame()
Input dataframe.
methodtype : str, (default : 'hc')
String Search strategy for structure_learning.
'hc' or 'hillclimbsearch' (default)
'ex' or 'exhaustivesearch'
'cs' or 'constraintsearch'
'cl' or 'chow-liu' (requires setting root_node parameter)
'nb' or 'naivebayes' (requires <root_node>)
'tan' (requires <root_node> and <class_node> parameter)
scoretype : str, (default : 'bic')
Scoring function for the search spaces.
'bic', 'k2', 'bdeu'
black_list : List or None, (default : None)
List of edges are black listed.
In case of filtering on nodes, the nodes black listed nodes are removed from the dataframe. The resulting model will not contain any nodes that are in black_list.
white_list : List or None, (default : None)
List of edges are white listed.
In case of filtering on nodes, the search is limited to those edges. The resulting model will then only contain nodes that are in white_list.
Works only in case of methodtype='hc' See also paramter: `bw_list_method`
bw_list_method : list of str or tuple, (default : None)
A list of edges can be passed as `black_list` or `white_list` to exclude or to limit the search.
* 'edges' : [('A', 'B'), ('C','D'), (...)] This option is limited to only methodtype='hc'
* 'nodes' : ['A', 'B', ...] Filter the dataframe based on the nodes for `black_list` or `white_list`. Filtering can be done for every methodtype/scoretype.
max_indegree : int, (default : None)
If provided and unequal None, the procedure only searches among models where all nodes have at most max_indegree parents. (only in case of methodtype='hc')
epsilon: float (default: 1e-4)
Defines the exit condition. If the improvement in score is less than `epsilon`, the learned model is returned. (only in case of methodtype='hc')
max_iter: int (default: 1e6)
The maximum number of iterations allowed. Returns the learned model when the number of iterations is greater than `max_iter`. (only in case of methodtype='hc')
root_node: String. (only in case of chow-liu, Tree-augmented Naive Bayes (TAN))
The root node for treeSearch based methods.
class_node: String
The class node is required for Tree-augmented Naive Bayes (TAN)
fixed_edges: iterable, Only in case of HillClimbSearch.
A list of edges that will always be there in the final learned model. The algorithm will add these edges at the start of the algorithm and will never change it.
return_all_dags : Bool, (default: False)
Return all possible DAGs. Only in case methodtype='exhaustivesearch'
verbose : int, (default : 3)
0: None, 1: Error, 2: Warning, 3: Info (default), 4: Debug, 5: Trace
Returns
-------
dict with model.
Examples
--------
>>> # Import bnlearn
>>> import bnlearn as bn
>>>
>>> # Load DAG
>>> model = bn.import_DAG('asia')
>>>
>>> # plot ground truth
>>> G = bn.plot(model)
>>>
>>> # Sampling
>>> df = bn.sampling(model, n=10000)
>>>
>>> # Structure learning of sampled dataset
>>> model_sl = bn.structure_learning.fit(df, methodtype='hc', scoretype='bic')
>>>
>>> # Compute edge strength using chi-square independence test
>>> model_sl = bn.independence_test(model_sl, df)
>>>
>>> # Plot based on structure learning of sampled data
>>> bn.plot(model_sl, pos=G['pos'])
>>>
>>> # Compare networks and make plot
>>> bn.compare_networks(model, model_sl, pos=G['pos'])
"""
out = []
# Set config
config = {'method': methodtype, 'scoring': scoretype, 'black_list': black_list, 'white_list': white_list, 'bw_list_method': bw_list_method, 'max_indegree': max_indegree, 'tabu_length': tabu_length, 'epsilon': epsilon, 'max_iter': max_iter, 'root_node': root_node, 'class_node': class_node, 'fixed_edges': fixed_edges, 'return_all_dags': return_all_dags, 'verbose': verbose}
# Make some checks
config = _make_checks(df, config, verbose=verbose)
# Make sure columns are of type string
df.columns = df.columns.astype(str)
# Filter on white_list and black_list
df = _white_black_list_filter(df, white_list, black_list, bw_list_method=config['bw_list_method'], verbose=verbose)
# Lets go!
if config['verbose']>=3: print('[bnlearn] >Computing best DAG using [%s]' %(config['method']))
# ExhaustiveSearch can be used to compute the score for every DAG and returns the best-scoring one:
if config['method']=='nv' or config['method']=='naivebayes':
out = _naivebayes(df,
root_node=config['root_node'],
estimator_type=None,
feature_vars=None,
dependent_var=None,
verbose=3)
# ExhaustiveSearch can be used to compute the score for every DAG and returns the best-scoring one:
if config['method']=='ex' or config['method']=='exhaustivesearch':
out = _exhaustivesearch(df,
scoretype=config['scoring'],
return_all_dags=config['return_all_dags'],
verbose=config['verbose'])
# HillClimbSearch
if config['method']=='hc' or config['method']=='hillclimbsearch':
out = _hillclimbsearch(df,
scoretype=config['scoring'],
black_list=config['black_list'],
white_list=config['white_list'],
max_indegree=config['max_indegree'],
tabu_length=config['tabu_length'],
bw_list_method=bw_list_method,
epsilon=config['epsilon'],
max_iter=config['max_iter'],
fixed_edges=config['fixed_edges'],
verbose=config['verbose'],
)
# Constraint-based Structure Learning
if config['method']=='cs' or config['method']=='constraintsearch':
"""Constraint-based Structure Learning
A different, but quite straightforward approach to build a DAG from data is this:
Identify independencies in the data set using hypothesis tests
Construct DAG (pattern) according to identified independencies (Conditional) Independence Tests
Independencies in the data can be identified using chi2 conditional independence tests."""
out = _constraintsearch(df, verbose=config['verbose'])
# TreeSearch-based Structure Learning
if config['method']=='chow-liu' or config['method']=='tan':
"""TreeSearch based Structure Learning."""
out = _treesearch(df, config['method'], config['root_node'], class_node=config['class_node'], verbose=config['verbose'])
# Store
out['model_edges'] = list(out['model'].edges())
out['adjmat'] = bnlearn._dag2adjmat(out['model'])
out['config'] = config
# return
return(out)
# %% Make Checks
def _make_checks(df, config, verbose=3):
assert isinstance(pd.DataFrame(), type(df)), 'df must be of type pd.DataFrame()'
if not np.isin(config['scoring'], ['bic', 'k2', 'bdeu']): raise Exception('"scoretype=%s" is invalid.' %(config['scoring']))
if not np.isin(config['method'], ['naivebayes', 'nb', 'tan', 'cl', 'chow-liu', 'hc', 'ex', 'cs', 'exhaustivesearch', 'hillclimbsearch', 'constraintsearch']): raise Exception('"methodtype=%s" is invalid.' %(config['method']))
if isinstance(config['white_list'], str):
config['white_list'] = [config['white_list']]
if isinstance(config['black_list'], str):
config['black_list'] = [config['black_list']]
if (config['white_list'] is not None) and len(config['white_list'])==0:
config['white_list'] = None
if (config['black_list'] is not None) and len(config['black_list'])==0:
config['black_list'] = None
if (config['method']!='hc') and (config['bw_list_method']=='edges'): raise Exception('[bnlearn] >The "bw_list_method=%s" does not work with "methodtype=%s"' %(config['bw_list_method'], config['method']))
if (config['method']=='tan') and (config['class_node'] is None): raise Exception('[bnlearn] >The treeSearch method TAN requires setting the <class_node> parameter: "%s"' %(str(config['class_node'])))
if ((config['method']=='nb') | (config['method']=='naivebayes')) and (config['root_node'] is None): raise Exception('[bnlearn] >The <%s> method requires setting the "root_node" parameter: "%s"' %(config['method'], str(config['class_node'])))
if config['method']=='cl':
config['method'] = 'chow-liu'
if config['fixed_edges'] is None:
config['fixed_edges']=set()
# Remove this block in future (21-10-2021)
if config['bw_list_method']=='filter':
if verbose>=2: print('[bnlearn] >Warning: The parameter bw_list_method="filter" is changed into bw_list_method="nodes". The old naming will be removed in future releases.')
config['bw_list_method'] = "nodes"
if config['bw_list_method']=='enforce':
if verbose>=2: print('[bnlearn] >Warning: The parameter bw_list_method="enforce" is changed into bw_list_method="edges". The old naming will be removed in future releases.')
config['bw_list_method'] = "edges"
# End remove block
# Show warnings
if (config['bw_list_method'] is None) and ((config['black_list'] is not None) or (config['white_list'] is not None)):
raise Exception('[bnlearn] >Error: The use of black_list or white_list requires setting bw_list_method.')
if df.shape[1]>10 and df.shape[1]<15:
if verbose>=2: print('[bnlearn] >Warning: Computing DAG with %d nodes can take a very long time!' %(df.shape[1]))
if (config['max_indegree'] is not None) and config['method']!='hc':
if verbose>=2: print('[bnlearn] >Warning: max_indegree only works in case of methodtype="hc"')
if (config['class_node'] is not None) and config['method']!='tan':
if verbose>=2: print('[bnlearn] >Warning: max_indegree only works in case of methodtype="tan"')
return config
# %% TreeSearch methods
def _naivebayes(df, root_node, estimator_type=None, feature_vars=None, dependent_var=None, verbose=3):
"""Naive Bayesian model.
Description
-----------
Naive Bayes is a special case of Bayesian Model where the only edges in the
model are from the feature variables to the dependent variable.
Parameters
----------
df : pandas DataFrame object
A DataFrame object with column names same as the variable names of network.
root_node : str
Parent node of the model.
estimator_type : TYPE, optional
Any pgmpy estimator. If nothing is specified, the default ``MaximumLikelihoodEstimator`` would be used.
* 'MaximumLikelihoodEstimator' (default)
* 'BayesianEstimator'
feature_vars: list (array-like)
A list of variable predictor variables (i.e. the features) in the model.
dependent_var: hashable object
The dependent variable (i.e. the variable to be predicted) in the model.
verbose : int, (default : 3)
0:None, 1:Error, 2:Warning, 3:Info (default), 4:Debug, 5:Trace
Returns
-------
None.
References
----------
* https://pgmpy.org/models/naive.html
* https://pgmpy.org/_modules/pgmpy/models/NaiveBayes.html#NaiveBayes
"""
model = NaiveBayes(feature_vars=feature_vars, dependent_var=dependent_var)
model.fit(df, parent_node=root_node, estimator=estimator_type)
# Store
out={}
out['model']=model
# Return
return(out)
# %% white_list and black_list
def _white_black_list_filter(df, white_list, black_list, bw_list_method='edges', verbose=3):
# if bw_list_method=='edges':
# # Keep only edges that are in white_list.
# if white_list is not None:
# if verbose>=3: print('[bnlearn] >Filter variables on white_list..')
# parent = [ u for (u, v) in white_list]
# child = [ v for (u, v) in white_list]
# white_list_node = [x.lower() for x in set(parent+child)]
# Iloc = np.isin(df.columns.str.lower(), white_list_node)
# df = df.loc[:, Iloc]
if bw_list_method=='nodes':
# Keep only variables that are in white_list.
if white_list is not None:
if verbose>=3: print('[bnlearn] >Filter variables (nodes) on white_list..')
white_list = [x.lower() for x in white_list]
Iloc = np.isin(df.columns.str.lower(), white_list)
df = df.loc[:, Iloc]
# Exclude variables that are in black_list.
if black_list is not None:
if verbose>=3: print('[bnlearn] >Filter variables (nodes) on black_list..')
black_list = [x.lower() for x in black_list]
Iloc = ~np.isin(df.columns.str.lower(), black_list)
df = df.loc[:, Iloc]
if (white_list is not None) or (black_list is not None):
if verbose>=3: print('[bnlearn] >Number of features after white/black listing: %d' %(df.shape[1]))
if df.shape[1]<=1: raise Exception('[bnlearn] >Error: [%d] variables are remaining. A minimum of 2 would be nice.' %(df.shape[1]))
return df
# %% TreeSearch methods
def _treesearch(df, estimator_type, root_node, class_node=None, verbose=3):
"""Tree search methods.
Description
-----------
The TreeSearch methods Chow-liu and TAN (Tree-augmented Naive Bayes)
searches for DAGs with attempts to find a model with optimal score.
"""
out={}
est = TreeSearch(df, root_node=root_node)
model = est.estimate(estimator_type=estimator_type, class_node=class_node)
# Store
out['model']=model
# Return
return(out)
# %% Constraint-based Structure Learning
def _constraintsearch(df, significance_level=0.05, verbose=3):
"""Contrain search.
PC PDAG construction is only guaranteed to work under the assumption that the
identified set of independencies is *faithful*, i.e. there exists a DAG that
exactly corresponds to it. Spurious dependencies in the data set can cause
the reported independencies to violate faithfulness. It can happen that the
estimated PDAG does not have any faithful completions (i.e. edge orientations
that do not introduce new v-structures). In that case a warning is issued.
test_conditional_independence() returns a tripel (chi2, p_value, sufficient_data),
consisting in the computed chi2 test statistic, the p_value of the test, and a heuristig
flag that indicates if the sample size was sufficient.
The p_value is the probability of observing the computed chi2 statistic (or an even higher chi2 value),
given the null hypothesis that X and Y are independent given Zs.
This can be used to make independence judgements, at a given level of significance.
DAG (pattern) construction
With a method for independence testing at hand, we can construct a DAG from the data set in three steps:
1. Construct an undirected skeleton - `estimate_skeleton()`
2. Orient compelled edges to obtain partially directed acyclid graph (PDAG; I-equivalence class of DAGs) - `skeleton_to_pdag()`
3. Extend DAG pattern to a DAG by conservatively orienting the remaining edges in some way - `pdag_to_dag()`
The first two steps form the so-called PC algorithm, see [2], page 550. PDAGs are `DirectedGraph`s, that may contain both-way edges, to indicate that the orientation for the edge is not determined.
"""
out = {}
# Set search algorithm
model = ConstraintBasedEstimator(df)
# Some checks for dependency
# print(_is_independent(est, 'Sprinkler', 'Rain', significance_level=significance_level))
# print(_is_independent(est, 'Cloudy', 'Rain', significance_level=significance_level))
# print(_is_independent(est, 'Sprinkler', 'Rain', ['Wet_Grass'], significance_level=significance_level))
# Estimate using chi2
[skel, seperating_sets] = model.build_skeleton(significance_level=significance_level)
if verbose>=4: print("Undirected edges: ", skel.edges())
pdag = model.skeleton_to_pdag(skel, seperating_sets)
if verbose>=4: print("PDAG edges: ", pdag.edges())
dag = pdag.to_dag()
if verbose>=4: print("DAG edges: ", dag.edges())
out['undirected'] = skel
out['undirected_edges'] = skel.edges()
out['pdag'] = pdag
out['pdag_edges'] = pdag.edges()
out['dag'] = dag
out['dag_edges'] = dag.edges()
# Search using "estimate()" method provides a shorthand for the three steps above and directly returns a "BayesianModel"
best_model = model.estimate(significance_level=significance_level)
out['model'] = best_model
if verbose>=4: print(best_model.edges())
return(out)
# %% hillclimbsearch
def _hillclimbsearch(df, scoretype='bic', black_list=None, white_list=None, max_indegree=None, tabu_length=100, epsilon=1e-4, max_iter=1e6, bw_list_method='edges', fixed_edges=set(), verbose=3):
"""Heuristic hill climb searches for DAGs, to learn network structure from data. `estimate` attempts to find a model with optimal score.
Description
-----------
Performs local hill climb search to estimates the `DAG` structure
that has optimal score, according to the scoring method supplied in the constructor.
Starts at model `start` and proceeds by step-by-step network modifications
until a local maximum is reached. Only estimates network structure, no parametrization.
Once more nodes are involved, one needs to switch to heuristic search.
HillClimbSearch implements a greedy local search that starts from the DAG
"start" (default: disconnected DAG) and proceeds by iteratively performing
single-edge manipulations that maximally increase the score.
The search terminates once a local maximum is found.
For details on scoring see Koller & Friedman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818).
If a number `max_indegree` is provided, only modifications that keep the number
of parents for each node below `max_indegree` are considered. A list of
edges can optionally be passed as `black_list` or `white_list` to exclude those
edges or to limit the search.
"""
out={}
# Set scoring type
scoring_method = _SetScoringType(df, scoretype, verbose=verbose)
# Set search algorithm
model = HillClimbSearch(df)
# Compute best DAG
if bw_list_method=='edges':
if (black_list is not None) or (white_list is not None):
if verbose>=3: print('[bnlearn] >Filter edges based on black_list/white_list')
# best_model = model.estimate()
best_model = model.estimate(scoring_method=scoring_method, max_indegree=max_indegree, tabu_length=tabu_length, epsilon=epsilon, max_iter=max_iter, black_list=black_list, white_list=white_list, fixed_edges=fixed_edges, show_progress=False)
else:
# At this point, variables are readily filtered based on bw_list_method or not (if nothing defined).
best_model = model.estimate(scoring_method=scoring_method, max_indegree=max_indegree, tabu_length=tabu_length, epsilon=epsilon, max_iter=max_iter, fixed_edges=fixed_edges, show_progress=False)
# Store
out['model']=best_model
# Return
return(out)
# %% ExhaustiveSearch
def _exhaustivesearch(df, scoretype='bic', return_all_dags=False, verbose=3):
"""Exhaustivesearch.
Description
------------
The first property makes exhaustive search intractable for all but very
small networks, the second prohibits efficient local optimization
algorithms to always find the optimal structure. Thus, identifiying the
ideal structure is often not tractable. Despite these bad news, heuristic
search strategies often yields good results if only few nodes are involved
(read: less than 5).
Parameters
----------
df : pandas DataFrame object
A DataFrame object with column names same as the variable names of network.
scoretype : str, (default : 'bic')
Scoring function for the search spaces.
'bic', 'k2', 'bdeu'
return_all_dags : Bool, (default: False)
Return all possible DAGs.
verbose : int, (default : 3)
0:None, 1:Error, 2:Warning, 3:Info (default), 4:Debug, 5:Trace
Returns
-------
None.
"""
if df.shape[1]>15 and verbose>=3:
print('[bnlearn] >Warning: Structure learning with more then 15 nodes is computationally not feasable with exhaustivesearch. Use hillclimbsearch or constraintsearch instead!!') # noqa
out={}
# Set scoring type
scoring_method = _SetScoringType(df, scoretype, verbose=verbose)
# Exhaustive search across all dags
model = ExhaustiveSearch(df, scoring_method=scoring_method)
# Compute best DAG
best_model = model.estimate()
# Store
out['model']=best_model
# Compute all possible DAGs
if return_all_dags:
out['scores']=[]
out['dag']=[]
# print("\nAll DAGs by score:")
for [score, dag] in reversed(model.all_scores()):
out['scores'].append(score)
out['dag'].append(dag)
# print(score, dag.edges())
plt.plot(out['scores'])
plt.show()
return(out)
# %% Set scoring type
def _SetScoringType(df, scoretype, verbose=3):
if verbose>=3: print('[bnlearn] >Set scoring type at [%s]' %(scoretype))
if scoretype=='bic':
scoring_method = BicScore(df)
elif scoretype=='k2':
scoring_method = K2Score(df)
elif scoretype=='bdeu':
scoring_method = BDeuScore(df, equivalent_sample_size=5)
return(scoring_method)
# %%
def _is_independent(model, X, Y, Zs=[], significance_level=0.05):
return model.test_conditional_independence(X, Y, Zs)[1] >= significance_level
| 47
| 377
| 0.667289
|
2f72a8ab83efe475c38b562981e4f04f9ee82ac5
| 30,103
|
py
|
Python
|
client/python/lib/tests/test_tensors.py
|
Xaenalt/model_server
|
f977dbf1246ebf85e960ca058e814deac7c6a16c
|
[
"Apache-2.0"
] | 305
|
2018-10-01T12:41:28.000Z
|
2020-04-24T10:36:08.000Z
|
client/python/lib/tests/test_tensors.py
|
Xaenalt/model_server
|
f977dbf1246ebf85e960ca058e814deac7c6a16c
|
[
"Apache-2.0"
] | 199
|
2020-04-29T08:43:21.000Z
|
2022-03-29T09:05:52.000Z
|
client/python/lib/tests/test_tensors.py
|
Xaenalt/model_server
|
f977dbf1246ebf85e960ca058e814deac7c6a16c
|
[
"Apache-2.0"
] | 80
|
2020-04-29T14:54:41.000Z
|
2022-03-30T14:50:29.000Z
|
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from tensorflow.core.framework.tensor_shape_pb2 import TensorShapeProto
from tensorflow.core.framework.tensor_pb2 import TensorProto
from tensorflow.core.framework.types_pb2 import DataType
from ovmsclient.tfs_compat.grpc.tensors import TENSOR_TO_NP_MAP, make_ndarray, make_tensor_proto
import numpy as np
shape = TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)])
def test_make_ndarray_valid_int():
tensor_proto = TensorProto(tensor_shape=shape, dtype=DataType.DT_INT8,
tensor_content=bytes([1, 2, 3]))
array = make_ndarray(tensor_proto)
assert array.tolist() == [1, 2, 3]
assert array.dtype == np.int8
def test_make_ndarray_valid_float():
content = [1.0, 2.0, 3.0]
np_content = np.array(content, dtype=np.float32)
tensor_proto = TensorProto(tensor_shape=shape, dtype=DataType.DT_FLOAT,
tensor_content=np_content.tobytes())
array = make_ndarray(tensor_proto)
assert array.tolist() == [1.0, 2.0, 3.0]
assert array.dtype == np.float32
def test_make_ndarray_valid_3_dims_shape():
content = [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]
np_content = np.array(content, dtype=np.float32)
_shape = TensorShapeProto(dim=[TensorShapeProto.Dim(size=1), TensorShapeProto.Dim(size=2),
TensorShapeProto.Dim(size=3)])
tensor_proto = TensorProto(tensor_shape=_shape, dtype=DataType.DT_FLOAT,
tensor_content=np_content.tobytes())
array = make_ndarray(tensor_proto)
assert array.tolist() == [[[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]]
assert array.dtype == np.float32
dtype_4_bytes = 'S4'
def test_make_ndarray_valid_string():
hex_string = "11111111"
tensor_proto = TensorProto(tensor_shape=shape, dtype=DataType.DT_STRING,
string_val=[bytes.fromhex(hex_string)])
array = make_ndarray(tensor_proto)
assert array.tolist() == [b'\x11\x11\x11\x11', b'\x11\x11\x11\x11', b'\x11\x11\x11\x11']
assert array.dtype == dtype_4_bytes
def test_make_ndarray_valid_no_content():
tensor_proto = TensorProto(tensor_shape=shape, dtype=DataType.DT_INT8)
array = make_ndarray(tensor_proto)
assert array.tolist() == [0, 0, 0]
assert array.dtype == np.int8
dtype_1_byte = 'S1'
def test_make_ndarray_valid_no_content_string():
tensor_proto = TensorProto(tensor_shape=shape, dtype=DataType.DT_STRING)
array = make_ndarray(tensor_proto)
assert array.tolist() == [b'', b'', b'']
assert array.dtype == dtype_1_byte
def test_make_ndarray_valid_no_content_string_with_other_type_content():
tensor_proto = TensorProto(tensor_shape=shape, dtype=DataType.DT_STRING)
array = make_ndarray(tensor_proto)
assert array.tolist() == [b'', b'', b'']
assert array.dtype == dtype_1_byte
def test_make_ndarray_invalid_type():
tensor_proto = TensorProto(tensor_shape=shape)
tensor_proto.dtype = 0
with pytest.raises(TypeError) as exception_info:
make_ndarray(tensor_proto)
exception = exception_info.value
assert str(exception) == "Unsupported tensor type: 0"
def test_make_ndarray_invalid_no_shape():
tensor_proto = TensorProto(dtype=DataType.DT_INT8, tensor_content=bytes([1, 2, 3]))
with pytest.raises(ValueError) as exception_info:
make_ndarray(tensor_proto)
exception = exception_info.value
assert str(exception) == "cannot reshape array of size 3 into shape ()"
def test_make_ndarray_invalid_shape_does_not_match():
tensor_proto = TensorProto(tensor_shape=shape, dtype=DataType.DT_INT8,
tensor_content=bytes([1, 2, 3, 4]))
with pytest.raises(ValueError) as exception_info:
make_ndarray(tensor_proto)
exception = exception_info.value
assert str(exception) == "cannot reshape array of size 4 into shape (3,)"
@pytest.mark.parametrize("params, expected_shape, expected_dtype", [
({"values": [1, 2, 3]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT32
),
({"values": [1, 2, 3], "shape": [3]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT32
),
({"values": [1, 2, 3], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT8
),
({"values": [1, 2, 3], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_FLOAT
),
({"values": [1, 2, 3], "shape": [3], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT8
),
({"values": np.array([1, 2, 3])},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT64
),
({"values": np.array([1, 2, 3]), "shape": [3]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT64
),
({"values": np.array([1, 2, 3]), "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT8
),
({"values": np.array([1, 2, 3]), "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_FLOAT
),
({"values": np.array([1, 2, 3]), "shape": [3], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT8
),
({"values": np.array([1, 2, 3], dtype=np.int64), "shape": [3], "dtype": DataType.DT_INT16},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT16
),
({"values": np.array([1, 2, 3], dtype=np.int16), "shape": [3], "dtype": DataType.DT_INT64},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT64
),
])
def test_make_tensor_proto_valid_int(params, expected_shape, expected_dtype):
tensor_proto = make_tensor_proto(**params)
np_dtype = TENSOR_TO_NP_MAP.get(expected_dtype)
np_values = np.array(params["values"], dtype=np_dtype)
assert tensor_proto.tensor_content == np_values.tobytes()
assert tensor_proto.dtype == expected_dtype
assert tensor_proto.tensor_shape == expected_shape
@pytest.mark.parametrize("params, expected_shape, expected_dtype", [
({"values": []},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=0)]), DataType.DT_FLOAT
),
({"values": [], "shape": []},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=0)]), DataType.DT_FLOAT
),
({"values": [], "shape": [0]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=0)]), DataType.DT_FLOAT
),
({"values": [], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=0)]), DataType.DT_INT8
),
({"values": [], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=0)]), DataType.DT_FLOAT
),
({"values": [], "shape": [], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=0)]), DataType.DT_INT8
),
({"values": [], "shape": [0], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=0)]), DataType.DT_INT8
),
])
def test_make_tensor_proto_valid_empty_list(params, expected_shape, expected_dtype):
tensor_proto = make_tensor_proto(**params)
np_dtype = TENSOR_TO_NP_MAP.get(expected_dtype)
np_values = np.array(params["values"], dtype=np_dtype)
assert tensor_proto.tensor_content == np_values.tobytes()
assert tensor_proto.dtype == expected_dtype
assert tensor_proto.tensor_shape == expected_shape
@pytest.mark.parametrize("params, expected_shape, expected_dtype", [
({"values": [[], [], []]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3), TensorShapeProto.Dim(size=0)]),
DataType.DT_FLOAT
),
({"values": [[], [], []], "shape": [3, 0]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3), TensorShapeProto.Dim(size=0)]),
DataType.DT_FLOAT
),
({"values": [[], [], []], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3), TensorShapeProto.Dim(size=0)]),
DataType.DT_INT8
),
({"values": [[], [], []], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3), TensorShapeProto.Dim(size=0)]),
DataType.DT_FLOAT
),
({"values": [[], [], []], "shape": [3, 0], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3), TensorShapeProto.Dim(size=0)]),
DataType.DT_INT8
),
])
def test_make_tensor_proto_valid_empty_list_of_empty_lists(params, expected_shape, expected_dtype):
tensor_proto = make_tensor_proto(**params)
np_dtype = TENSOR_TO_NP_MAP.get(expected_dtype)
np_values = np.array(params["values"], dtype=np_dtype)
assert tensor_proto.tensor_content == np_values.tobytes()
assert tensor_proto.dtype == expected_dtype
assert tensor_proto.tensor_shape == expected_shape
@pytest.mark.parametrize("params, expected_shape, expected_dtype", [
({"values": [1.0, 2.0, 3.0]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_FLOAT
),
({"values": [1.0, 2.0, 3.0], "shape": [3]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_FLOAT
),
({"values": [1.0, 2.0, 3.0], "dtype": DataType.DT_INT16},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT16
),
({"values": [1.0, 2.0, 3.0], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_FLOAT
),
({"values": [1.0, 2.0, 3.0], "shape": [3], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_INT8
),
({"values": [1.0, 2.0, 3.0], "shape": [3], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]), DataType.DT_FLOAT
),
])
def test_make_tensor_proto_valid_float(params, expected_shape, expected_dtype):
tensor_proto = make_tensor_proto(**params)
np_dtype = TENSOR_TO_NP_MAP.get(expected_dtype)
np_values = np.array(params["values"], dtype=np_dtype)
assert tensor_proto.tensor_content == np_values.tobytes()
assert tensor_proto.dtype == expected_dtype
assert tensor_proto.tensor_shape == expected_shape
@pytest.mark.parametrize("params, expected_shape, expected_dtype, expected_field", [
({"values": 5.0},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_FLOAT,
"float_val"
),
({"values": 5.0, "shape": [1]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_FLOAT,
"float_val"
),
({"values": 5.0, "dtype": DataType.DT_INT32},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_INT32,
"int_val"
),
({"values": 5.0, "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_FLOAT,
"float_val"
),
({"values": 5.0, "shape": [1], "dtype": DataType.DT_INT32},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_INT32,
"int_val"
),
({"values": 5.0, "shape": [1], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_FLOAT,
"float_val"
),
({"values": np.float32(5.0)},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_FLOAT,
"float_val"
),
({"values": np.float32(5.0), "shape": [1]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_FLOAT,
"float_val"
),
({"values": np.float32(5.0), "dtype": DataType.DT_INT32},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_INT32,
"int_val"
),
({"values": np.float32(5.0), "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_FLOAT,
"float_val"
),
({"values": np.float32(5.0), "shape": [1], "dtype": DataType.DT_INT32},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_INT32,
"int_val"
),
({"values": np.float32(5.0), "shape": [1], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_FLOAT,
"float_val"
),
({"values": np.float32(5.0), "shape": [1], "dtype": DataType.DT_DOUBLE},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_DOUBLE,
"double_val"
),
])
def test_make_tensor_proto_valid_scalar(params, expected_shape, expected_dtype, expected_field):
tensor_proto = make_tensor_proto(**params)
np_dtype = TENSOR_TO_NP_MAP.get(expected_dtype)
np_values = np.array(params["values"], dtype=np_dtype)
assert tensor_proto.__getattribute__(expected_field) == np_values
assert tensor_proto.dtype == expected_dtype
assert tensor_proto.tensor_shape == expected_shape
@pytest.mark.parametrize("params, expected_shape, expected_dtype, expected_field", [
({"values": bytes([0x13, 0x00, 0x00, 0x00, 0x08])},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_STRING,
"string_val"
),
({"values": bytes([0x13, 0x00, 0x00, 0x00, 0x08]), "shape": [1]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_STRING,
"string_val"
),
({"values": bytes([0x13, 0x00, 0x00, 0x00, 0x08]), "dtype": DataType.DT_STRING},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_STRING,
"string_val"
),
({"values": bytes([0x13, 0x00, 0x00, 0x00, 0x08]), "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=5)]), DataType.DT_INT8,
"int_val"
),
({"values": bytes([0x13, 0x00, 0x00, 0x00, 0x08]), "shape": [5], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=5)]), DataType.DT_INT8,
"int_val"
),
({"values": bytes([0x13, 0x00, 0x00, 0x00, 0x08]), "shape": [1], "dtype": DataType.DT_STRING},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]), DataType.DT_STRING,
"string_val"
),
])
def test_make_tensor_proto_valid_string(params, expected_shape, expected_dtype, expected_field):
tensor_proto = make_tensor_proto(**params)
if expected_field == "string_val":
assert tensor_proto.__getattribute__(expected_field) == [params["values"]]
else:
assert (tensor_proto.__getattribute__(expected_field)
== np.frombuffer(params["values"],
dtype=TENSOR_TO_NP_MAP.get(expected_dtype)).tolist())
assert tensor_proto.dtype == expected_dtype
assert tensor_proto.tensor_shape == expected_shape
def test_make_tensor_proto_valid_string_to_float_dtype():
values = bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])
tensor_proto = make_tensor_proto(values=values, shape=[3], dtype=DataType.DT_INT16)
np_values = np.frombuffer(np.array(values).tobytes(), dtype=np.int16).tolist()
assert tensor_proto.int_val == np_values
assert tensor_proto.dtype == DataType.DT_INT16
assert tensor_proto.tensor_shape == TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)])
@pytest.mark.parametrize("params, expected_shape, expected_dtype, expected_field", [
({"values": [bytes([0x13, 0x00, 0x00, 0x00, 0x08]), bytes([0x13, 0x00, 0x00, 0x00, 0x08])]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2)]), DataType.DT_STRING,
"string_val"
),
({"values": [bytes([0x13, 0x00, 0x00, 0x00, 0x08]), bytes([0x13, 0x00, 0x00, 0x00, 0x08])],
"shape": [2]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2)]), DataType.DT_STRING,
"string_val"
),
({"values": [bytes([0x13, 0x00, 0x00, 0x00, 0x08]), bytes([0x13, 0x00, 0x00, 0x00, 0x08])],
"dtype": DataType.DT_STRING},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2)]), DataType.DT_STRING,
"string_val"
),
({"values": [bytes([0x13, 0x00, 0x00, 0x00, 0x08]), bytes([0x13, 0x00, 0x00, 0x00, 0x08])],
"dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=10)]), DataType.DT_INT8,
"tensor_content"
),
({"values": [bytes([0x13, 0x00, 0x00, 0x00, 0x08]), bytes([0x13, 0x00, 0x00, 0x00, 0x08])],
"shape": [10], "dtype": DataType.DT_INT8},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=10)]), DataType.DT_INT8,
"tensor_content"
),
({"values": [bytes([0x13, 0x00, 0x00, 0x00, 0x08]), bytes([0x13, 0x00, 0x00, 0x00, 0x08])],
"shape": [2], "dtype": DataType.DT_STRING},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2)]), DataType.DT_STRING,
"string_val"
),
])
def test_make_tensor_proto_valid_string_batch_size_2(params, expected_shape, expected_dtype,
expected_field):
tensor_proto = make_tensor_proto(**params)
np_values = np.array(params["values"])
if expected_field == "string_val":
assert tensor_proto.__getattribute__(expected_field) == params["values"]
elif expected_field == "tensor_content":
assert tensor_proto.__getattribute__(expected_field) == np_values.tobytes()
assert tensor_proto.dtype == expected_dtype
assert tensor_proto.tensor_shape == expected_shape
@pytest.mark.parametrize("params, expected_shape, expected_dtype", [
({"values": [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_FLOAT,
),
({"values": [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], "shape": [2, 3]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_FLOAT,
),
({"values": [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], "shape": [6]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=6)]), DataType.DT_FLOAT,
),
({"values": [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], "dtype": DataType.DT_INT32},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_INT32,
),
({"values": [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_FLOAT,
),
({"values": [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], "shape": [2, 3], "dtype": DataType.DT_INT32},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_INT32,
),
({"values": [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], "shape": [2, 3], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_FLOAT,
),
({"values": [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], "shape": [6], "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=6)]), DataType.DT_FLOAT,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_DOUBLE,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]), "shape": [2, 3]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_DOUBLE,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]), "shape": [6]},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=6)]), DataType.DT_DOUBLE,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]), "dtype": DataType.DT_INT32},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_INT32,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]), "dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_FLOAT,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]), "shape": [2, 3],
"dtype": DataType.DT_INT32},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_INT32,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]), "shape": [2, 3],
"dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_FLOAT,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]), "shape": [6],
"dtype": DataType.DT_FLOAT},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=6)]), DataType.DT_FLOAT,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], dtype=np.float32),
"dtype": DataType.DT_HALF},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_HALF,
),
({"values": np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], dtype=np.float16),
"dtype": DataType.DT_DOUBLE},
TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)]),
DataType.DT_DOUBLE,
),
])
def test_make_tensor_proto_valid_2_dims_shape(params, expected_shape, expected_dtype):
tensor_proto = make_tensor_proto(**params)
np_dtype = TENSOR_TO_NP_MAP.get(expected_dtype)
np_values = np.array(params["values"], dtype=np_dtype)
assert tensor_proto.tensor_content == np_values.tobytes()
assert tensor_proto.dtype == expected_dtype
assert tensor_proto.tensor_shape == expected_shape
def test_make_tensor_proto_valid_make_ndarray_valid():
values = [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]
np_values = np.array(values, dtype=np.float32)
_shape = TensorShapeProto(dim=[TensorShapeProto.Dim(size=2), TensorShapeProto.Dim(size=3)])
tensor_proto = TensorProto(tensor_shape=_shape, dtype=DataType.DT_FLOAT,
tensor_content=np_values.tobytes())
array = make_ndarray(tensor_proto)
tensor_proto = make_tensor_proto(values=array, dtype=DataType.DT_FLOAT)
assert tensor_proto.tensor_content == np_values.tobytes()
assert tensor_proto.dtype == DataType.DT_FLOAT
assert tensor_proto.tensor_shape == _shape
def test_make_tensor_proto_valid_string_reshape():
values = bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])
tensor_proto = make_tensor_proto(values=[[values, values], [values, values]],
shape=[4], dtype=DataType.DT_STRING)
assert all(val == np.array(values) for val in tensor_proto.string_val)
assert tensor_proto.dtype == DataType.DT_STRING
assert tensor_proto.tensor_shape == TensorShapeProto(dim=[TensorShapeProto.Dim(size=4)])
def test_make_tensor_proto_valid_2D_array_to_bytes():
values = [[1, 2, 3], [4, 5, 6]]
tensor_proto = make_tensor_proto(values=values, shape=[6], dtype=DataType.DT_STRING)
assert tensor_proto.string_val == np.array(values, dtype=np.bytes_).reshape([6]).tolist()
assert tensor_proto.dtype == DataType.DT_STRING
assert tensor_proto.tensor_shape == TensorShapeProto(dim=[TensorShapeProto.Dim(size=6)])
def test_make_tensor_proto_invalid_shape_element_values():
values = [1, 2, 3]
with pytest.raises(TypeError) as exception_info:
make_tensor_proto(values=values, shape=[-3], dtype=None)
exception = exception_info.value
assert str(exception) == "shape type should be list or tuple with unsigned integers"
@pytest.mark.causes_deprecation_warning
def test_make_tensor_proto_invalid_dimensions():
values = [[1.0, 2.0], [1.0, 2.0, 3.0]]
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=values, shape=[2, 3], dtype=DataType.DT_FLOAT)
exception = exception_info.value
assert str(exception) == ("argument must be a dense tensor: [[1.0, 2.0], [1.0, 2.0, 3.0]] "
"- got shape [2], but wanted [2, 2]")
def test_make_tensor_proto_invalid_string_to_float_dtype():
values = bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=values, shape=None, dtype=DataType.DT_FLOAT)
exception = exception_info.value
assert str(exception) == ("could not cast bytes to <class 'numpy.float32'>. "
"buffer size must be a multiple of element size")
@pytest.mark.causes_deprecation_warning
def test_make_tensor_proto_invalid_string_dimensions():
values = bytes([0x13, 0x00])
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=[[values, values, values], [values, values]],
shape=None, dtype=DataType.DT_STRING)
exception = exception_info.value
assert str(exception) == ("argument must be a dense tensor: "
f"{[[values, values, values], [values, values]]} "
"- got shape [2], but wanted [2, 3]")
@pytest.mark.causes_deprecation_warning
def test_make_tensor_proto_invalid_dimensions_2():
values = [[(1, 2, 3)], [(1, 2)], [(1, 2, 3)]]
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=values, shape=[2, 3], dtype=DataType.DT_FLOAT)
exception = exception_info.value
assert str(exception) == ("could not cast values to <class 'numpy.float32'>. "
"setting an array element with a sequence.")
@pytest.mark.causes_deprecation_warning
def test_make_tensor_proto_invalid_dimensions_no_shape_provided():
values = [[1, 2, 3], [4, 5]]
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=values, shape=None, dtype=DataType.DT_INT8)
exception = exception_info.value
assert str(exception) == ("argument must be a dense tensor: [[1, 2, 3], [4, 5]] "
"- got shape [2], but wanted [2, 3]")
def test_make_tensor_proto_invalid_shape_type():
values = 5.0
with pytest.raises(TypeError) as exception_info:
make_tensor_proto(values=values, shape=0, dtype=DataType.DT_FLOAT)
exception = exception_info.value
assert str(exception) == "shape type should be list or tuple with unsigned integers"
def test_make_tensor_proto_invalid_values_dtype():
values = [np.float128(2.5)]
with pytest.raises(TypeError) as exception_info:
make_tensor_proto(values=values, shape=None, dtype=None)
exception = exception_info.value
assert str(exception) == "provided values type is not valid"
def test_make_tensor_proto_invalid_dtype_value():
values = [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]
with pytest.raises(TypeError) as exception_info:
make_tensor_proto(values=values, shape=None, dtype=int)
exception = exception_info.value
assert str(exception) == "<class 'int'> is not valid dtype value"
def test_make_tensor_proto_invalid_values_type():
values = (1, 2, 3)
with pytest.raises(TypeError) as exception_info:
make_tensor_proto(values=values, shape=None, dtype=None)
exception = exception_info.value
assert str(exception) == "values type should be (list, np.ndarray, scalar), but is tuple"
def test_make_tensor_proto_invalid_string_2D_array():
values = bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=[[values, values], [values, values]],
shape=None, dtype=DataType.DT_STRING)
exception = exception_info.value
assert str(exception) == "bytes values with dtype DT_STRING must be in shape [N]"
def test_make_tensor_proto_invalid_string_reshape():
values = bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=values, shape=[6], dtype=DataType.DT_STRING)
exception = exception_info.value
assert str(exception) == "cannot reshape array of size 1 into shape (6,)"
def test_make_tensor_proto_invalid_string_reshape_2():
values = bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=values, shape=[2, 3], dtype=DataType.DT_STRING)
exception = exception_info.value
assert str(exception) == "bytes values with dtype DT_STRING must be in shape [N]"
def test_make_tensor_proto_invalid_string_2D_array_with_shape():
values = bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=[[values, values], [values, values]],
shape=[2, 2], dtype=DataType.DT_STRING)
exception = exception_info.value
assert str(exception) == "bytes values with dtype DT_STRING must be in shape [N]"
def test_make_tensor_proto_invalid_int_reshape():
values = [1, 2, 3]
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=values, shape=[4], dtype=DataType.DT_INT8)
exception = exception_info.value
assert str(exception) == "cannot reshape array of size 3 into shape (4,)"
def test_make_tensor_proto_invalid_empty_list_of_empty_lists_reshape():
values = [[], [], []]
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=values, shape=[4, 2], dtype=DataType.DT_INT8)
exception = exception_info.value
assert str(exception) == "cannot reshape array of size 0 into shape (4,2)"
def test_make_tensor_proto_invalid_dtype_provided():
values = [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]
with pytest.raises(ValueError) as exception_info:
make_tensor_proto(values=values, shape=None, dtype=DataType.DT_STRING)
exception = exception_info.value
assert str(exception) == "bytes values with dtype DT_STRING must be in shape [N]"
| 44.204112
| 99
| 0.67196
|
5603d47cbe5ce5946b8e055c4cc8ce051e7100ea
| 1,320
|
py
|
Python
|
app/core/models.py
|
aliplutus/recipe-app-api
|
0d5cd9263616152d7d5ec495c4e5bc31660b8383
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
aliplutus/recipe-app-api
|
0d5cd9263616152d7d5ec495c4e5bc31660b8383
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
aliplutus/recipe-app-api
|
0d5cd9263616152d7d5ec495c4e5bc31660b8383
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
# note: **extra_fields
# this is in order to make our function more flexible
# so when we add more filds we don't need to explisitly write them.
"""create and save new user"""
if not email:
raise ValueError('users must have email.')
user = self.model(email=self.normalize_email(email), **extra_fields)
# Q Q QQQ
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""create + save new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model (email instead of user name). """
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 35.675676
| 90
| 0.674242
|
8f9fca02f76f3e64bcd613e7d0d57f79e3dacfc9
| 6,386
|
py
|
Python
|
seekr2/tests/test_matrix_stability.py
|
seekrcentral/seekr2
|
45154d477147f9278b97491a6270ff31435c837b
|
[
"MIT"
] | 1
|
2021-07-14T16:13:17.000Z
|
2021-07-14T16:13:17.000Z
|
seekr2/tests/test_matrix_stability.py
|
seekrcentral/seekr2
|
45154d477147f9278b97491a6270ff31435c837b
|
[
"MIT"
] | 10
|
2021-05-26T15:29:46.000Z
|
2021-09-20T18:23:20.000Z
|
seekr2/tests/test_matrix_stability.py
|
seekrcentral/seekr2
|
45154d477147f9278b97491a6270ff31435c837b
|
[
"MIT"
] | 1
|
2021-05-22T01:15:46.000Z
|
2021-05-22T01:15:46.000Z
|
"""
test_matrix_stability.py
For systems with very slow kinetics, the conventional matrix solvers seem
to have trouble computing first passage times due to poor matrix conditioning.
Recreates the problem and test matrix solutions that will remain stable for
milestoning matrices with long timescales.
"""
import os
import pytest
import numpy as np
import seekr2.prepare as prepare
import seekr2.toy.smoluchowski as smol
import seekr2.tests.create_model_input as create_model_input
import seekr2.tests.test_analyze as test_analyze
def make_unstable_matrix_smoluchowski_mmvt_model(rootdir, num_input_anchors):
"""
Create a milestoning model that will result in an unstable matrix
"""
smol_input = create_model_input.create_smoluchowski_mmvt_model_input(
rootdir, num_input_anchors+1)
os.chdir(rootdir)
smol_model, model_xml_path = prepare.prepare(
smol_input, force_overwrite=False)
model_dir = os.path.dirname(model_xml_path)
smol_model.anchor_rootdir = os.path.abspath(model_dir)
return smol_model
def make_mmvt_smol_model(rootdir, num_input_anchors):
"""
Obtain two Smoluchowski models: One as an exact system to act as a
benchmark, and the second as a milestoning system with an unstable
matrix.
"""
model = make_unstable_matrix_smoluchowski_mmvt_model(rootdir, num_input_anchors)
return model
def make_unstable_matrix_smoluchowski_elber_model(rootdir, num_input_anchors):
"""
Create a milestoning model that will result in an unstable matrix
"""
smol_input = create_model_input.create_smoluchowski_elber_model_input(
rootdir, num_input_anchors+1)
os.chdir(rootdir)
smol_model, model_xml_path = prepare.prepare(
smol_input, force_overwrite=False)
model_dir = os.path.dirname(model_xml_path)
smol_model.anchor_rootdir = os.path.abspath(model_dir)
return smol_model
def make_elber_smol_model(rootdir, num_input_anchors):
"""
Obtain two Smoluchowski models: One as an exact system to act as a
benchmark, and the second as a milestoning system with an unstable
matrix.
"""
model = make_unstable_matrix_smoluchowski_elber_model(rootdir, num_input_anchors)
return model
def get_benchmark_time(potential_energy_function, num_input_anchors):
"""
Find the exact solution for a Smoluchowski system for comparison with
matrix results.
"""
milestones = [1.0]
absorbing_boundary = num_input_anchors
calc = smol.SmoluchowskiCalculation1d(
potential_energy_function, milestones=milestones,
absorbing_boundary=absorbing_boundary, n=401)
outer_surface_flux = 4.0 * np.pi * absorbing_boundary**2 \
* calc.regions[1].J_outward
w_a = calc.potential_energy_function.evaluate_energy(calc.regions[1].a)
inner_region_height = smol.expBetaW(w_a, calc.beta)
outer_region_height = calc.regions[1].u_r_outward[0]
inner_region_volume = calc.regions[0].partition_function \
* (outer_region_height / inner_region_height)
outer_region_volume = 1.0
time = (inner_region_volume + outer_region_volume) / outer_surface_flux
return time
def make_analytic_pi_alpha(num_regions, potential_energy_function):
milestones = list(range(1, num_regions))
absorbing_boundary = num_regions
calc = smol.SmoluchowskiCalculation1d(
potential_energy_function, milestones=milestones,
absorbing_boundary=absorbing_boundary)
pi_alpha = np.zeros((1, len(calc.regions)))
for i, region in enumerate(calc.regions):
pi_alpha[0,i] = region.partition_function / calc.partition_function
return pi_alpha
def compare_solvers_analytical_calculated(tmpdir, potential_energy_function):
lowest = 5 # 55
highest = 10 # 60
interval = 5
pre_equilibrium_approx = False
print("i benchmark_time mmvt_time")
for i in range(lowest, highest+interval, interval):
mmvt_rootdir = os.path.join(
tmpdir, "smol_mmvt_matrix_problem_{}".format(i))
os.mkdir(mmvt_rootdir)
elber_rootdir = os.path.join(
tmpdir, "smol_elber_matrix_problem_{}".format(i))
os.mkdir(elber_rootdir)
num_input_anchors = i
mmvt_model = make_mmvt_smol_model(mmvt_rootdir, num_input_anchors)
analytic_pi_alpha = make_analytic_pi_alpha(i, potential_energy_function)
elber_model = make_elber_smol_model(elber_rootdir, num_input_anchors)
benchmark_time = get_benchmark_time(
potential_energy_function, num_input_anchors)
mmvt_model.k_on_info = None
mmvt_time, dummy, mmvt_analysis = \
test_analyze.make_smoluchowski_mmvt_analysis(
mmvt_model, potential_energy_function,
pre_equilibrium_approx=pre_equilibrium_approx)
elber_model.k_on_info = None
#elber_time, dummy, elber_analysis = \
# test_analyze.make_smoluchowski_elber_analysis(
# elber_model, potential_energy_function)
elber_time = 0.0
#print(i, benchmark_time, elber_time, mmvt_time)
assert np.isclose(benchmark_time, mmvt_time, rtol=1e-2)
return
def test_make_method_comparison_data_for_plotting(tmpdir):
potential_energy_function = smol.LinearPotentialEnergyFunction(a=1.0)
compare_solvers_analytical_calculated(tmpdir, potential_energy_function)
@pytest.mark.skip()
def test_scipy_matrix_solver(tmpdir):
#potential_energy_function = smol.FlatPotentialEnergyFunction()
potential_energy_function = smol.LinearPotentialEnergyFunction(a=1.0)
rootdir = os.path.join(tmpdir, "smol_matrix_problem")
os.mkdir(rootdir)
num_input_anchors = 5
#model = make_mmvt_smol_model(rootdir, num_input_anchors)
model = make_elber_smol_model(rootdir, num_input_anchors)
benchmark_time = get_benchmark_time(
potential_energy_function, num_input_anchors)
#print("benchmark_time:", benchmark_time)
model.k_on_info = None
#mmvt_time, dummy1, my_analysis_mmvt = test_analyze.make_smoluchowski_mmvt_analysis(
# model, potential_energy_function)
elber_time, dummy2, my_analysis_elber = test_analyze.make_smoluchowski_elber_analysis(
model, potential_energy_function)
#print(benchmark_time, elber_time, mmvt_time)
assert np.isclose(benchmark_time, elber_time, rtol=1e-2)
| 40.675159
| 90
| 0.74632
|
161369473860afb8abed3dac64752fe56d260a8d
| 513
|
py
|
Python
|
Level1/Lessons12944/12944_njy.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons12944/12944_njy.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons12944/12944_njy.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | 1
|
2021-04-05T07:35:59.000Z
|
2021-04-05T07:35:59.000Z
|
#평균구하기
def solution(arr):
return sum(arr)/len(arr)
'''
테스트 1 〉 통과 (0.00ms, 10.2MB)
테스트 2 〉 통과 (0.01ms, 10.2MB)
테스트 3 〉 통과 (0.01ms, 10.1MB)
테스트 4 〉 통과 (0.00ms, 10.2MB)
테스트 5 〉 통과 (0.01ms, 10.1MB)
테스트 6 〉 통과 (0.00ms, 10.2MB)
테스트 7 〉 통과 (0.00ms, 10.2MB)
테스트 8 〉 통과 (0.00ms, 10.2MB)
테스트 9 〉 통과 (0.00ms, 10MB)
테스트 10 〉 통과 (0.00ms, 10.2MB)
테스트 11 〉 통과 (0.00ms, 10.2MB)
테스트 12 〉 통과 (0.01ms, 10MB)
테스트 13 〉 통과 (0.00ms, 10.2MB)
테스트 14 〉 통과 (0.00ms, 10.2MB)
테스트 15 〉 통과 (0.00ms, 10.1MB)
테스트 16 〉 통과 (0.01ms, 10.1MB)
'''
| 24.428571
| 28
| 0.580897
|
4af55c82e1e50ac6f4b642dd3db9faf2bf109e51
| 674
|
py
|
Python
|
project/forms/tFA.py
|
DenisZhmakin/VK-Music-Downloader
|
217d54f462b2da74776eec47bf1c355c54b017ab
|
[
"Unlicense"
] | null | null | null |
project/forms/tFA.py
|
DenisZhmakin/VK-Music-Downloader
|
217d54f462b2da74776eec47bf1c355c54b017ab
|
[
"Unlicense"
] | 1
|
2021-12-20T03:42:21.000Z
|
2021-12-20T09:57:57.000Z
|
project/forms/tFA.py
|
DenisZhmakin/VK-Music-Downloader
|
217d54f462b2da74776eec47bf1c355c54b017ab
|
[
"Unlicense"
] | null | null | null |
from PyQt5 import uic
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QIntValidator
class TwoFactorAuth(QWidget):
code_received = pyqtSignal(str)
def __init__(self):
QWidget.__init__(self)
uic.loadUi("designs/tFA.ui", self)
self.ok_button.clicked.connect(self.ok_button_click)
self.code_lineedit.setValidator(
QIntValidator(100000, 999999, self)
)
def ok_button_click(self):
auth_code = self.code_lineedit.text()
if len(auth_code) == 6:
self.code_received.emit(auth_code)
self.close()
else:
pass
| 24.962963
| 60
| 0.654303
|
960043c6cf866c107c8ab2b75473178741cc79ed
| 6,242
|
py
|
Python
|
pymc3_ext/smc/sample_smc.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
pymc3_ext/smc/sample_smc.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
pymc3_ext/smc/sample_smc.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
from .smc import SMC
import logging
def sample_smc(
draws=1000,
kernel="metropolis",
n_steps=25,
parallel=False,
start=None,
cores=None,
tune_steps=True,
p_acc_rate=0.99,
threshold=0.5,
epsilon=1.0,
dist_func="absolute_error",
sum_stat=False,
progressbar=False,
model=None,
random_seed=-1,
):
"""
Sequential Monte Carlo based sampling
Parameters
----------
draws : int
The number of samples to draw from the posterior (i.e. last stage). And also the number of
independent chains. Defaults to 1000.
kernel : str
Kernel method for the SMC sampler. Available option are ``metropolis`` (default) and `ABC`.
Use `ABC` for likelihood free inference togheter with a ``pm.Simulator``.
n_steps : int
The number of steps of each Markov Chain. If ``tune_steps == True`` ``n_steps`` will be used
for the first stage and for the others it will be determined automatically based on the
acceptance rate and `p_acc_rate`, the max number of steps is ``n_steps``.
parallel : bool
Distribute computations across cores if the number of cores is larger than 1.
Defaults to False.
start : dict, or array of dict
Starting point in parameter space. It should be a list of dict with length `chains`.
When None (default) the starting point is sampled from the prior distribution.
cores : int
The number of chains to run in parallel. If ``None`` (default), it will be automatically
set to the number of CPUs in the system.
tune_steps : bool
Whether to compute the number of steps automatically or not. Defaults to True
p_acc_rate : float
Used to compute ``n_steps`` when ``tune_steps == True``. The higher the value of
``p_acc_rate`` the higher the number of steps computed automatically. Defaults to 0.99.
It should be between 0 and 1.
threshold : float
Determines the change of beta from stage to stage, i.e.indirectly the number of stages,
the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
It should be between 0 and 1.
epsilon : float
Standard deviation of the gaussian pseudo likelihood. Only works with `kernel = ABC`
dist_func : str
Distance function. Available options are ``absolute_error`` (default) and
``sum_of_squared_distance``. Only works with ``kernel = ABC``
sum_stat : bool
Whether to use or not a summary statistics. Defaults to False. Only works with
``kernel = ABC``
progressbar : bool
Flag for displaying a progress bar. Defaults to False.
model : Model (optional if in ``with`` context)).
random_seed : int
random seed
Notes
-----
SMC works by moving through successive stages. At each stage the inverse temperature
:math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
So in more general terms we are always computing samples from a tempered posterior that we can
write as:
.. math::
p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)
A summary of the algorithm is:
1. Initialize :math:`\beta` at zero and stage at zero.
2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
tempered posterior is the prior).
3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
4. Compute a set of N importance weights W. The weights are computed as the ratio of the
likelihoods of a sample at stage i+1 and stage i.
5. Obtain :math:`S_{w}` by re-sampling according to W.
6. Use W to compute the covariance for the proposal distribution.
7. For stages other than 0 use the acceptance rate from the previous stage to estimate the
scaling of the proposal distribution and `n_steps`.
8. Run N Metropolis chains (each one of length `n_steps`), starting each one from a different
sample in :math:`S_{w}`.
9. Repeat from step 3 until :math:`\beta \ge 1`.
10. The final result is a collection of N samples from the posterior.
References
----------
.. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
Geophysical Journal International, 2013, 194(3), pp.1701-1726,
`link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__
.. [Ching2007] Ching, J. and Chen, Y. (2007).
Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
%282007%29133:7%28816%29>`__
"""
smc = SMC(
draws=draws,
kernel=kernel,
n_steps=n_steps,
parallel=parallel,
start=start,
cores=cores,
tune_steps=tune_steps,
p_acc_rate=p_acc_rate,
threshold=threshold,
epsilon=epsilon,
dist_func=dist_func,
sum_stat=sum_stat,
progressbar=progressbar,
model=model,
random_seed=random_seed,
)
_log = logging.getLogger("pymc3_ext")
_log.info("Sample initial stage: ...")
stage = 0
smc.initialize_population()
smc.setup_kernel()
smc.initialize_logp()
while smc.beta < 1:
smc.update_weights_beta()
_log.info(
"Stage: {:3d} Beta: {:.3f} Steps: {:3d} Acce: {:.3f}".format(
stage, smc.beta, smc.n_steps, smc.acc_rate
)
)
smc.resample()
smc.update_proposal()
if stage > 0:
smc.tune()
smc.mutate()
stage += 1
if smc.parallel and smc.cores > 1:
smc.pool.close()
smc.pool.join()
trace = smc.posterior_to_trace()
return trace
| 38.770186
| 100
| 0.644665
|
9a13b36b35c2aca975b304db4bb5cd1876e89eca
| 916
|
py
|
Python
|
src/piece.py
|
proceduraljigsaw/ImpactPuzzle
|
42a76d52056ed84d52a2e7eeb171c579671c9c0d
|
[
"MIT"
] | 3
|
2020-12-27T18:46:30.000Z
|
2021-04-13T11:17:48.000Z
|
src/piece.py
|
proceduraljigsaw/ImpactPuzzle
|
42a76d52056ed84d52a2e7eeb171c579671c9c0d
|
[
"MIT"
] | null | null | null |
src/piece.py
|
proceduraljigsaw/ImpactPuzzle
|
42a76d52056ed84d52a2e7eeb171c579671c9c0d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 ProceduralJigsaw
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import numpy as np
from tab import Tab
from point import Point
class Piece():
def __init__(self, tabs: Tab):
self.tabs = [tab for tab in tabs if tab]
self.__calc_centroid()
self.neigbors = set([])
def __calc_centroid(self):
cx = [tab.centroid.x for tab in self.tabs]
cy = [tab.centroid.y for tab in self.tabs]
self.centroid = Point(np.mean(cx), np.mean(cy))
def border(self):
return list({tab for tab in self.tabs for neighbor in self.neigbors if tab in neighbor.tabs or (not tab.radial and tab.rad_pos == 0)})
def addtab(self, tab):
if not tab in self.tabs:
self.tabs.append(tab)
self.__calc_centroid()
def addneighbor(self, neighbor):
self.neigbors.add(neighbor)
| 29.548387
| 142
| 0.646288
|
ab466b5c34a12a7b5caeca126eb9e29f9fc8a871
| 18,496
|
py
|
Python
|
module_sample_name.py
|
openmindednewby/Tool-for-creating-lip-recognition-datasets
|
d8bd97cfa112e8e2fb1f4cca20a7093493e3ed9d
|
[
"MIT"
] | 4
|
2020-07-22T16:10:55.000Z
|
2021-11-25T08:32:34.000Z
|
module_sample_name.py
|
openmindednewby/Tool-for-creating-lip-recognition-datasets
|
d8bd97cfa112e8e2fb1f4cca20a7093493e3ed9d
|
[
"MIT"
] | null | null | null |
module_sample_name.py
|
openmindednewby/Tool-for-creating-lip-recognition-datasets
|
d8bd97cfa112e8e2fb1f4cca20a7093493e3ed9d
|
[
"MIT"
] | 2
|
2020-12-30T22:07:54.000Z
|
2021-02-02T01:12:13.000Z
|
''' This function which will print the random name sample variable'''
#-------------------------
# Random password generator function
def passw_gen(MODE=3,LENGTH=10):
'''
passw_gen(MODE=3,LENGTH=10)
The first argument MODE determins the characters which will be used in the random name generation
MODE=0 for letters: abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
MODE=1 for digits: 0123456789
MODE=2 for special characters: !"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~
MODE=3 for letters and digits (defult): abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
MODE=4 for letters and special characters: abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~
MODE=5 for digits and special characters: 0123456789!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~
MODE=6 for digits, letters and special characters: abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~
The second argument LENGTH determins the length of the random generated password'''
import string
import random
# create a character list which contains all possible character generation modes
characters=['1','2','3','4','5','6','7']
characters[0] = string.ascii_letters
characters[1] = string.digits
characters[2] = string.punctuation
characters[3] = string.ascii_letters + string.digits
characters[4] = string.ascii_letters + string.punctuation
characters[5] = string.digits + string.punctuation
characters[6] = string.ascii_letters + string.digits + string.punctuation
MODE=int(MODE)# convert input into an int
LENGTH=int(LENGTH)# convert input into an int
password = "".join(random.choice(characters[MODE]) for x in range(LENGTH)) # The join() method takes all items in an iterable and joins them into one string.
return password
#---------------------------
# name generator fuction
from datetime import date
def name_gen(TYPE=1,WORD_LENGTH=0,WORD=0,POSITION='X',YEAR=str(date.today())):
'''
name_gen(TYPE=1,WORD_LENGTH=0,WORD=0,POSITION='X',YEAR=str(date.today())):
The TYPE argument accepts either values of 0 or 1 and is use to add the 'V' for a video file or 'A' for an audio file and 'W' for the file containing the word infront of the random generated name. The defult value is 1 which coresponds to video format.
The WORD_LENGTH argument accepts any integer between 0 to (length of word -1). It determins how many letters starting from the first letter to include in the file name from the WORD argument. If the argument 'a' is passed, the whole length of the word is added to the file name. If any other string is passed the vlaue is set to 0 and only the first letter is printed. If a float is passed, if it is of the format 1.0 2.0 0.0 it is intepred as an intiger otherwise WORD_LENGTH=0.
The WORD argument accepts any STR input and extracts the first letter of the word and assignes it to INDEX_1
The POSITION argument is used to assign the position of the word in a sentence. For example in the sentence 'Where is Bob', the word Bob is the 3rd word i the sentence there for its position is 3. If no argument is used the 'XX' will be assigned in the 14 and 15 index values of the name.
The YEAR argument should correspond to the current year the file was created in an integer format of YYYY, if no argument is passed the current year the machine is set at the machine is used.
'''
#TYPE
# Checks to see what argument is passed to TYPE and makes sure the INDEX+0 variable is assigned with The correct argument
if TYPE == 0:
INDEX_0 ='A'
elif TYPE == 1:
INDEX_0 = 'V'
elif TYPE ==2:
INDEX_0 = 'W'
else:
INDEX_0 = 'V'
# WORD_LENGTH
if (str(type(WORD_LENGTH))==str(str)) and (WORD_LENGTH!='a'):# if integer is input
WORD_LENGTH=0
elif (str(type(WORD_LENGTH))==str(float)) and WORD_LENGTH-int(WORD_LENGTH)==0:
WORD_LENGTH=int(WORD_LENGTH)
elif (str(type(WORD_LENGTH))==str(float)) and WORD_LENGTH-int(WORD_LENGTH)!=0:
WORD_LENGTH=0
else:
pass
# WORD
# Checks to see what argument is passed to WORD and makes sure the INDEX+1 variable is assigned with the first letter of the word
if str(type(WORD))==str(int):# if integer is input
WORD=0
INDEX_1='0'
elif str(type(WORD))==str(float):# if float input
WORD=0
INDEX_1='0'
elif (str(type(WORD))==str(str) and WORD_LENGTH=='a'):
INDEX_1=WORD
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==0):
INDEX_1=WORD[0]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==1):
INDEX_1=WORD[0:2]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==2):
INDEX_1=WORD[0:3]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==3):
INDEX_1=WORD[0:4]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==4):
INDEX_1=WORD[0:5]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==5):
INDEX_1=WORD[0:6]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==6):
INDEX_1=WORD[0:7]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==7):
INDEX_1=WORD[0:8]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==8):
INDEX_1=WORD[0:9]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==9):
INDEX_1=WORD[0:10]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==10):
INDEX_1=WORD[0:11]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==11):
INDEX_1=WORD[0:12]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==12):
INDEX_1=WORD[0:13]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==13):
INDEX_1=WORD[0:14]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==14):
INDEX_1=WORD[0:15]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==15):
INDEX_1=WORD[0:16]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==16):
INDEX_1=WORD[0:17]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==17):
INDEX_1=WORD[0:18]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==18):
INDEX_1=WORD[0:19]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==19):
INDEX_1=WORD[0:20]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==20):
INDEX_1=WORD[0:21]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==21):
INDEX_1=WORD[0:22]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==22):
INDEX_1=WORD[0:23]
else:
WORD='0'
INDEX_1='0'
#POSITION
# If a string argument is passed insted of an integer. The value is ignored and the default value 'X' is used FOR BOTH INDEX_14 and INDEX_15
if str(type(POSITION)) == str(str):
POSITION= 'X'
INDEX_15='X'
INDEX_16='X'
#if an integer value is passed
if str(type(POSITION))==str(int):
POSITION=str(POSITION) # convert int to str to allow indexing and len() function
if len(POSITION)==2:# and if its length is equal to 2 transform it into an integer and assign the first value to variable INDEX_14 and second value to variable INDEX_15
INDEX_15=POSITION[0]
INDEX_16=POSITION[1]
elif len(POSITION)==1:# if the legth of the integer passed is equal to 1 convert input into a strign assing 0 for INDEX_14 and the input value for INDEX_15
INDEX_15='0'
INDEX_16=POSITION[0]
# since no sentence can be longer than 100 words, the length passes should be 1 or 2
else:
POSITION= 'X'
INDEX_15='X'
INDEX_16='X'
# If a float argument is passed AND its value remains the same if it is converted into an integer. It is accepted if float is of the format 9.0 5.0 3.0 etc
if (str(type(POSITION)) == str(float) and POSITION-int(POSITION)==0):
POSITION=int(POSITION)
POSITION=str(POSITION) # convert to string
if len(POSITION)==2:
INDEX_15=POSITION[0]
INDEX_16=POSITION[1]
elif len(POSITION)==1:
INDEX_15='0'
INDEX_16=POSITION[0]
else:
POSITION='X'
INDEX_15='X'
INDEX_16='X'
elif (str(type(POSITION)) == str(float) and POSITION-int(POSITION)!=0):
POSITION='X'
INDEX_15='X'
INDEX_16='X'
# YEAR
if str(type(YEAR)) == str(int):
YEAR=str(YEAR)# converts int objects to a string to allow indexing
if str(type(YEAR)) == str(float): # if a float object is passed the output will be the current date of the system which is the defualt value
YEAR=str(date.today())
if len(YEAR) != 4: # checks to see whater a 4 digit value is passed as it should, other wise the argument is ingored and the defult value is print which is the current date of the system
YEAR=str(date.today())
YEAR=YEAR[1:4] # selects the last 3 values of the expected YYYY format
name = INDEX_0+INDEX_1+YEAR+INDEX_15+INDEX_16
return name
#---------------------------
# Random name and password generator function
from datetime import date
def name_and_pass_gen(PASS_GEN_MODE=3,PASS_GEN_LENGTH=10,TYPE=1,WORD_LENGTH=0,WORD=0,POSITION='X',YEAR=str(date.today())):
'''
name_and_pass_gen(PASS_GEN_MODE=3,PASS_GEN_LENGTH=10,TYPE=1,WORD_LENGTH=0,WORD=0,POSITION='X',YEAR=str(date.today())):
PASS_GEN_MODE and PASS_GEN_LENGTH are arguments of the passw_gen function.
The TYPE argument accepts either values of 0 or 1 and is use to add the 'V' for a video file or 'A' for an audio file and 'W' for the file containing the word infront of the random generated name. The defult value is 1 which coresponds to video format.
The WORD_LENGTH argument accepts any integer between 0 to (length of word -1). It determins how many letters starting from the first letter to include in the file name from the WORD argument. If the argument 'a' is passed, the whole length of the word is added to the file name. If any other string is passed the vlaue is set to 0 and only the first letter is printed. If a float is passed, if it is of the format 1.0 2.0 0.0 it is intepred as an intiger otherwise WORD_LENGTH=0.
The WORD argument accepts any STR input and extracts the first letter of the word and assignes it to INDEX_1
The POSITION argument is used to assign the position of the word in a sentence. For example in the sentence 'Where is Bob', the word Bob is the 3rd word i the sentence there for its position is 3. If no argument is used the 'XX' will be assigned in the 14 and 15 index values of the name.
The YEAR argument should correspond to the current year the file was created in an integer format of YYYY, if no argument is passed the current year the machine is set at the machine is used.
'''
# generate a random password
password=passw_gen(PASS_GEN_MODE,PASS_GEN_LENGTH)
#TYPE
# Checks to see what argument is passed to TYPE and makes sure the INDEX+0 variable is assigned with The correct argument
if TYPE == 0:
INDEX_0 ='A'
elif TYPE == 1:
INDEX_0 = 'V'
elif TYPE ==2:
INDEX_0 = 'W'
else:
INDEX_0 = 'V'
# WORD_LENGTH
if (str(type(WORD_LENGTH))==str(str)) and (WORD_LENGTH!='a'):# if integer is input
WORD_LENGTH=0
elif (str(type(WORD_LENGTH))==str(float)) and WORD_LENGTH-int(WORD_LENGTH)==0:
WORD_LENGTH=int(WORD_LENGTH)
elif (str(type(WORD_LENGTH))==str(float)) and WORD_LENGTH-int(WORD_LENGTH)!=0:
WORD_LENGTH=0
else:
pass
# WORD
# Checks to see what argument is passed to WORD and makes sure the INDEX+1 variable is assigned with the first letter of the word
if str(type(WORD))==str(int):# if integer is input
WORD=0
INDEX_1='0'
elif str(type(WORD))==str(float):# if float input
WORD=0
INDEX_1='0'
elif (str(type(WORD))==str(str) and WORD_LENGTH=='a'):
INDEX_1=WORD
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==0):
INDEX_1=WORD[0]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==1):
INDEX_1=WORD[0:2]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==2):
INDEX_1=WORD[0:3]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==3):
INDEX_1=WORD[0:4]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==4):
INDEX_1=WORD[0:5]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==5):
INDEX_1=WORD[0:6]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==6):
INDEX_1=WORD[0:7]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==7):
INDEX_1=WORD[0:8]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==8):
INDEX_1=WORD[0:9]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==9):
INDEX_1=WORD[0:10]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==10):
INDEX_1=WORD[0:11]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==11):
INDEX_1=WORD[0:12]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==12):
INDEX_1=WORD[0:13]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==13):
INDEX_1=WORD[0:14]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==14):
INDEX_1=WORD[0:15]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==15):
INDEX_1=WORD[0:16]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==16):
INDEX_1=WORD[0:17]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==17):
INDEX_1=WORD[0:18]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==18):
INDEX_1=WORD[0:19]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==19):
INDEX_1=WORD[0:20]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==20):
INDEX_1=WORD[0:21]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==21):
INDEX_1=WORD[0:22]
elif (str(type(WORD))==str(str) and int(WORD_LENGTH)==22):
INDEX_1=WORD[0:23]
else:
WORD='0'
INDEX_1='0'
#POSITION
# If a string argument is passed insted of an integer. The value is ignored and the default value 'X' is used FOR BOTH INDEX_14 and INDEX_15
if str(type(POSITION)) == str(str):
POSITION= 'X'
INDEX_15='X'
INDEX_16='X'
#if an integer value is passed
if str(type(POSITION))==str(int):
POSITION=str(POSITION) # convert int to str to allow indexing and len() function
if len(POSITION)==2:# and if its length is equal to 2 transform it into an integer and assign the first value to variable INDEX_14 and second value to variable INDEX_15
INDEX_15=POSITION[0]
INDEX_16=POSITION[1]
elif len(POSITION)==1:# if the legth of the integer passed is equal to 1 convert input into a strign assing 0 for INDEX_14 and the input value for INDEX_15
INDEX_15='0'
INDEX_16=POSITION[0]
# since no sentence can be longer than 100 words, the length passes should be 1 or 2
else:
POSITION= 'X'
INDEX_15='X'
INDEX_16='X'
# If a float argument is passed AND its value remains the same if it is converted into an integer. It is accepted if float is of the format 9.0 5.0 3.0 etc
if (str(type(POSITION)) == str(float) and POSITION-int(POSITION)==0):
POSITION=int(POSITION)
POSITION=str(POSITION) # convert to string
if len(POSITION)==2:
INDEX_15=POSITION[0]
INDEX_16=POSITION[1]
elif len(POSITION)==1:
INDEX_15='0'
INDEX_16=POSITION[0]
else:
POSITION='X'
INDEX_15='X'
INDEX_16='X'
elif (str(type(POSITION)) == str(float) and POSITION-int(POSITION)!=0):
POSITION='X'
INDEX_15='X'
INDEX_16='X'
# YEAR
if str(type(YEAR)) == str(int):
YEAR=str(YEAR)# converts int objects to a string to allow indexing
if str(type(YEAR)) == str(float): # if a float object is passed the output will be the current date of the system which is the defualt value
YEAR=str(date.today())
if len(YEAR) != 4: # checks to see whater a 4 digit value is passed as it should, other wise the argument is ingored and the defult value is print which is the current date of the system
YEAR=str(date.today())
YEAR=YEAR[1:4] # selects the last 3 values of the expected YYYY format
name = INDEX_0+INDEX_1+password+YEAR+INDEX_15+INDEX_16
return name
#---------------------------
# Random name and password generator function
from datetime import date
def add_name_and_pass(FILE_NAME='', PASSWORD=''):
'''
add_name_and_pass(FILE_NAME='', PASSWORD='')
FILE_NAME corresponds to the name of the file generated from the name_gen function
PASSWORD the random string of integers generated from the pass_gen function
This fanction preserves the same format of function name_and_pass_gen regardless of the FILE_NAME or PASSWORD
'''
if (str(type(FILE_NAME))==str(int)) or (str(type(FILE_NAME)) == str(float)) or (str(type(PASSWORD))==str(int)) or (str(type(PASSWORD)) == str(float)):
return print('Only string inputs are accepted for function add_name_and_pass')
LENGTH=len(FILE_NAME)
name = FILE_NAME[0:int(LENGTH-5)] + PASSWORD + FILE_NAME[int(LENGTH-5):]
return name
#---------------------
# Create a folder and assign with the input name
def folder_gen(RANDOM_STRING, FILE_PERMISION = '777'):
''' RANDOM_STRING ---> is the name of the folder to be generated
FILE_PERMISION ---> The rights to the folder
'''
import os
import subprocess
# create the folder
command = 'mkdir -m' + str(FILE_PERMISION) +' ' + str(RANDOM_STRING) # + ' >/dev/null 2>&1'
subprocess.call(command, shell=True)
# get the full path as an output
FOLDER_PATH = os.path.abspath(str(RANDOM_STRING))
FOLDER_PATH = str(FOLDER_PATH)
return FOLDER_PATH
| 42.324943
| 484
| 0.639598
|
199c0c07de1cf878f559d0710e901e7fc78988a4
| 5,806
|
py
|
Python
|
runtime/Python3/src/antlr4/Recognizer.py
|
charwliu/antlr4
|
1c987e77eaa2baf786023cfff437a409f4623221
|
[
"BSD-3-Clause"
] | null | null | null |
runtime/Python3/src/antlr4/Recognizer.py
|
charwliu/antlr4
|
1c987e77eaa2baf786023cfff437a409f4623221
|
[
"BSD-3-Clause"
] | null | null | null |
runtime/Python3/src/antlr4/Recognizer.py
|
charwliu/antlr4
|
1c987e77eaa2baf786023cfff437a409f4623221
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
from antlr4.RuleContext import RuleContext
from antlr4.Token import Token
from antlr4.error.ErrorListener import ProxyErrorListener, ConsoleErrorListener
# need forward delcaration
RecognitionException = None
class Recognizer(object):
tokenTypeMapCache = dict()
ruleIndexMapCache = dict()
def __init__(self):
self._listeners = [ ConsoleErrorListener.INSTANCE ]
self._interp = None
self._stateNumber = -1
def extractVersion(self, version):
pos = version.find(".")
major = version[0:pos]
version = version[pos+1:]
pos = version.find(".")
if pos==-1:
pos = version.find("-")
if pos==-1:
pos = len(version)
minor = version[0:pos]
return major, minor
def checkVersion(self, toolVersion):
runtimeVersion = "4.6"
rvmajor, rvminor = self.extractVersion(runtimeVersion)
tvmajor, tvminor = self.extractVersion(toolVersion)
if rvmajor!=tvmajor or rvminor!=tvminor:
print("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion)
def addErrorListener(self, listener):
self._listeners.append(listener)
def removeErrorListener(self, listener):
self._listeners.remove(listener)
def removeErrorListeners(self):
self._listeners = []
def getTokenTypeMap(self):
tokenNames = self.getTokenNames()
if tokenNames is None:
from antlr4.error.Errors import UnsupportedOperationException
raise UnsupportedOperationException("The current recognizer does not provide a list of token names.")
result = self.tokenTypeMapCache.get(tokenNames, None)
if result is None:
result = zip( tokenNames, range(0, len(tokenNames)))
result["EOF"] = Token.EOF
self.tokenTypeMapCache[tokenNames] = result
return result
# Get a map from rule names to rule indexes.
#
# <p>Used for XPath and tree pattern compilation.</p>
#
def getRuleIndexMap(self):
ruleNames = self.getRuleNames()
if ruleNames is None:
from antlr4.error.Errors import UnsupportedOperationException
raise UnsupportedOperationException("The current recognizer does not provide a list of rule names.")
result = self.ruleIndexMapCache.get(ruleNames, None)
if result is None:
result = zip( ruleNames, range(0, len(ruleNames)))
self.ruleIndexMapCache[ruleNames] = result
return result
def getTokenType(self, tokenName:str):
ttype = self.getTokenTypeMap().get(tokenName, None)
if ttype is not None:
return ttype
else:
return Token.INVALID_TYPE
# What is the error header, normally line/character position information?#
def getErrorHeader(self, e:RecognitionException):
line = e.getOffendingToken().line
column = e.getOffendingToken().column
return "line "+line+":"+column
# How should a token be displayed in an error message? The default
# is to display just the text, but during development you might
# want to have a lot of information spit out. Override in that case
# to use t.toString() (which, for CommonToken, dumps everything about
# the token). This is better than forcing you to override a method in
# your token objects because you don't have to go modify your lexer
# so that it creates a new Java type.
#
# @deprecated This method is not called by the ANTLR 4 Runtime. Specific
# implementations of {@link ANTLRErrorStrategy} may provide a similar
# feature when necessary. For example, see
# {@link DefaultErrorStrategy#getTokenErrorDisplay}.
#
def getTokenErrorDisplay(self, t:Token):
if t is None:
return "<no token>"
s = t.text
if s is None:
if t.type==Token.EOF:
s = "<EOF>"
else:
s = "<" + str(t.type) + ">"
s = s.replace("\n","\\n")
s = s.replace("\r","\\r")
s = s.replace("\t","\\t")
return "'" + s + "'"
def getErrorListenerDispatch(self):
return ProxyErrorListener(self._listeners)
# subclass needs to override these if there are sempreds or actions
# that the ATN interp needs to execute
def sempred(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
return True
def precpred(self, localctx:RuleContext , precedence:int):
return True
@property
def state(self):
return self._stateNumber
# Indicate that the recognizer has changed internal state that is
# consistent with the ATN state passed in. This way we always know
# where we are in the ATN as the parser goes along. The rule
# context objects form a stack that lets us see the stack of
# invoking rules. Combine this and we have complete ATN
# configuration information.
@state.setter
def state(self, atnState:int):
self._stateNumber = atnState
del RecognitionException
import unittest
class Test(unittest.TestCase):
def testVersion(self):
major, minor = Recognizer().extractVersion("1.2")
self.assertEqual("1", major)
self.assertEqual("2", minor)
major, minor = Recognizer().extractVersion("1.2.3")
self.assertEqual("1", major)
self.assertEqual("2", minor)
major, minor = Recognizer().extractVersion("1.2-snapshot")
self.assertEqual("1", major)
self.assertEqual("2", minor)
| 36.062112
| 113
| 0.650017
|
0678fc0f3e9f5895976f569be0122508b5733884
| 2,062
|
py
|
Python
|
directkeys.py
|
ewertonhm/Pong-Py
|
214542d8509a6ea1bacea3f7b189e59063d40b84
|
[
"Unlicense"
] | null | null | null |
directkeys.py
|
ewertonhm/Pong-Py
|
214542d8509a6ea1bacea3f7b189e59063d40b84
|
[
"Unlicense"
] | null | null | null |
directkeys.py
|
ewertonhm/Pong-Py
|
214542d8509a6ea1bacea3f7b189e59063d40b84
|
[
"Unlicense"
] | null | null | null |
# direct inputs
# source to this solution and code:
# http://stackoverflow.com/questions/14489013/simulate-python-keypresses-for-controlling-a-game
# http://www.gamespp.com/directx/directInputKeyboardScanCodes.html
import ctypes
import time
SendInput = ctypes.windll.user32.SendInput
W = 0x11
A = 0x1E
S = 0x1F
D = 0x20
NP_2 = 0x50
NP_4 = 0x4B
NP_6 = 0x4D
NP_8 = 0x48
# C struct redefinitions
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
# Actuals Functions
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
if __name__ == '__main__':
PressKey(0x11)
time.sleep(1)
ReleaseKey(0x11)
time.sleep(1)
| 28.246575
| 95
| 0.616877
|
e9fa21e951fb4c10b2e69c60bc131020aa867f13
| 23
|
py
|
Python
|
src/postmarker/__init__.py
|
joveice/postmarker
|
e2c6f1ab4d5d3dd6fa3760959a626adb4a135199
|
[
"MIT"
] | 13
|
2016-07-08T14:52:16.000Z
|
2018-04-17T23:56:17.000Z
|
src/postmarker/__init__.py
|
joveice/postmarker
|
e2c6f1ab4d5d3dd6fa3760959a626adb4a135199
|
[
"MIT"
] | 22
|
2016-06-24T19:44:33.000Z
|
2018-09-27T13:43:04.000Z
|
src/postmarker/__init__.py
|
joveice/postmarker
|
e2c6f1ab4d5d3dd6fa3760959a626adb4a135199
|
[
"MIT"
] | 4
|
2017-04-12T08:01:56.000Z
|
2018-06-28T15:37:31.000Z
|
__version__ = "0.18.1"
| 11.5
| 22
| 0.652174
|
984c95789a5636f19492385c293e3b27e560ad9e
| 551
|
py
|
Python
|
cogs/roleAssign.py
|
hexoserver1/discord_bot
|
a6d5b9da017803063eac61f2e20b246f4d96ba39
|
[
"MIT"
] | 122
|
2016-08-05T02:27:31.000Z
|
2022-03-21T07:53:10.000Z
|
cogs/roleAssign.py
|
hexoserver1/discord_bot
|
a6d5b9da017803063eac61f2e20b246f4d96ba39
|
[
"MIT"
] | 15
|
2017-12-07T14:28:20.000Z
|
2021-11-19T13:03:37.000Z
|
cogs/roleAssign.py
|
hexoserver1/discord_bot
|
a6d5b9da017803063eac61f2e20b246f4d96ba39
|
[
"MIT"
] | 100
|
2016-08-21T18:12:29.000Z
|
2022-02-19T11:21:23.000Z
|
import discord
from discord.ext import commands
import loadconfig
class roleAssign(discord.Client):
'''Fügt eine Rolle neuen Benutzern beim joinen des Server hinzu'''
def __init__(self, bot):
self.bot = bot
async def on_member_join(self, member):
if member.server.id == loadconfig.__botserverid__ or True:
role = discord.utils.get(member.server.roles, name=loadconfig.__selfassignrole__)
await self.bot.add_roles(member, role)
def setup(bot):
bot.add_cog(roleAssign(bot))
| 29
| 94
| 0.682396
|
817010b677a0d7e0e4ab7529cca369bafc559e87
| 808
|
py
|
Python
|
cpu.py
|
Terraminator/Rasperry_Cooldown
|
6edff7fefc469d051da8d77359441dffbc6f4849
|
[
"BSD-2-Clause"
] | 1
|
2021-08-31T19:17:01.000Z
|
2021-08-31T19:17:01.000Z
|
cpu.py
|
Terraminator/Rasperry_Cooldown
|
6edff7fefc469d051da8d77359441dffbc6f4849
|
[
"BSD-2-Clause"
] | null | null | null |
cpu.py
|
Terraminator/Rasperry_Cooldown
|
6edff7fefc469d051da8d77359441dffbc6f4849
|
[
"BSD-2-Clause"
] | null | null | null |
from gpiozero import CPUTemperature
import RPi.GPIO as GPIO
import time
import os
normal = 30
warm = 35
high = 40
veryhigh = 45
cpu = CPUTemperature()
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
GPIO.output(17, GPIO.LOW)
while True:
cpu = CPUTemperature()
if cpu.temperature >= veryhigh or cpu.temperature == veryhigh:
GPIO.output(17, GPIO.HIGH)
os.system("sudo shutdown -h 0")
elif cpu.temperature >= high or cpu.temperature == high:
GPIO.output(17, GPIO.HIGH)
elif cpu.temperature <= hoch and cpu.temperature >= warm:
GPIO.output(17, GPIO.HIGH)
elif cpu.temperature <= normal or cpu.temperature == normal:
GPIO.output(17, GPIO.LOW)
elif cpu.temperature <= warm or cpu.temperature == warm:
GPIO.output(17, GPIO.LOW)
time.sleep(1)
GPIO.cleanup()
| 23.764706
| 64
| 0.691832
|
8a7f1a8c9f7c7f0225b06e12d80e1703b6af7a63
| 1,293
|
py
|
Python
|
docker_ws/src/calibrate/input_fn.py
|
Pomiculture/GAN-Vitis-AI
|
148da346c3ec882f24a98b8231800a94c54cc709
|
[
"Apache-2.0"
] | null | null | null |
docker_ws/src/calibrate/input_fn.py
|
Pomiculture/GAN-Vitis-AI
|
148da346c3ec882f24a98b8231800a94c54cc709
|
[
"Apache-2.0"
] | null | null | null |
docker_ws/src/calibrate/input_fn.py
|
Pomiculture/GAN-Vitis-AI
|
148da346c3ec882f24a98b8231800a94c54cc709
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################################################
# Calibrate graph for quantization step
###############################################################################################################
import os
import tensorflow as tf
import numpy as np
###############################################################################################################
# Get the environment variables (number of images in the batch, input resolution)
num_batch_images = int(os.environ['CALIB_BATCH_SIZE'])
codings_size = int(os.environ['CODINGS_SIZE'])
num_iter = int(os.environ['NB_ITER'])
input_tensor = os.environ['INPUT_NODE_NAME']
# Log total number of images to process
print("Processing {} images...".format(num_iter * num_batch_images))
def calib_input(iter):
'''
Input of the GAN generator algorithm for calibration during the quantization process.
'''
# Set seed for random values generation
tf.compat.v1.set_random_seed(iter)
# Generate noisy input of size 'num_batch_images' images
noise = tf.random.normal([num_batch_images, codings_size, 1, 1])
# Convert tensor to numpy array
noise = noise.eval(session=tf.compat.v1.Session())
# Link input noise to input node name
return {input_tensor : noise}
| 38.029412
| 111
| 0.563032
|
fb0dcbc07101b58c69af6ad966999c2000c06f62
| 2,650
|
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_wjh.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_wjh.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_wjh.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.onyx import onyx_wjh
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..onyx_module import TestOnyxModule, load_fixture
class TestOnyxWJHModule(TestOnyxModule):
module = onyx_wjh
def setUp(self):
self.enabled = False
super(TestOnyxWJHModule, self).setUp()
self.mock_get_config = patch.object(
onyx_wjh.OnyxWJHModule, "_get_wjh_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible_collections.community.general.plugins.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxWJHModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_wjh_show.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_wjh_no_change(self):
set_module_args(dict(group='forwarding', enabled=False))
self.execute_module(changed=False)
def test_wjh_enable(self):
set_module_args(dict(group='forwarding', enabled=True))
commands = ['what-just-happened forwarding enable']
self.execute_module(changed=True, commands=commands)
def test_wjh_export_no_change(self):
set_module_args(dict(export_group='forwarding', auto_export=False))
self.execute_module(changed=False)
def test_wjh_export_enable(self):
set_module_args(dict(export_group='forwarding', auto_export=True))
commands = ['what-just-happened auto-export forwarding enable']
self.execute_module(changed=True, commands=commands)
def test_wjh_export_disable(self):
set_module_args(dict(export_group='all', auto_export=False))
commands = ['no what-just-happened auto-export all enable']
self.execute_module(changed=True, commands=commands)
def test_wjh_clear(self):
set_module_args(dict(clear_group='all'))
commands = ['clear what-just-happened pcap-files all']
self.execute_module(changed=True, commands=commands)
| 39.552239
| 103
| 0.728679
|
1522a36ed5cb39591bd497ba958bc407c24f78da
| 541
|
py
|
Python
|
products/migrations/0001_initial.py
|
OjureFred/BlazeMarketplace
|
e207d538e25dfa8866b01e74886d8e91a54c53fe
|
[
"MIT"
] | null | null | null |
products/migrations/0001_initial.py
|
OjureFred/BlazeMarketplace
|
e207d538e25dfa8866b01e74886d8e91a54c53fe
|
[
"MIT"
] | null | null | null |
products/migrations/0001_initial.py
|
OjureFred/BlazeMarketplace
|
e207d538e25dfa8866b01e74886d8e91a54c53fe
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2020-12-09 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('description', models.TextField()),
],
),
]
| 23.521739
| 114
| 0.567468
|
6aae89b56bc757f98c51d3d3665f0f64e58fb84a
| 35
|
py
|
Python
|
discord/types/widget.py
|
Harukomaze/disnake
|
541f5c9623a02be894cd1015dbb344070700cb87
|
[
"MIT"
] | null | null | null |
discord/types/widget.py
|
Harukomaze/disnake
|
541f5c9623a02be894cd1015dbb344070700cb87
|
[
"MIT"
] | null | null | null |
discord/types/widget.py
|
Harukomaze/disnake
|
541f5c9623a02be894cd1015dbb344070700cb87
|
[
"MIT"
] | null | null | null |
from disnake.types.widget import *
| 17.5
| 34
| 0.8
|
37d896b1ce285001c2362b684c811ebd33ff8790
| 22,111
|
py
|
Python
|
tests/test_path_operations.py
|
Darkheir/s3path
|
238f6ff0abf1a3199c8f17d58c778d72b03f10a2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_path_operations.py
|
Darkheir/s3path
|
238f6ff0abf1a3199c8f17d58c778d72b03f10a2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_path_operations.py
|
Darkheir/s3path
|
238f6ff0abf1a3199c8f17d58c778d72b03f10a2
|
[
"Apache-2.0"
] | null | null | null |
import sys
from pathlib import Path
from io import UnsupportedOperation
from tempfile import NamedTemporaryFile
import boto3
from botocore.exceptions import ClientError
import pytest
from s3path import PureS3Path, S3Path, StatResult
# todo: test samefile/touch method
# todo: test security and boto config changes
# todo: test open method check R/W bytes/unicode
def test_path_support():
assert PureS3Path in S3Path.mro()
assert Path in S3Path.mro()
def test_stat(s3_mock):
path = S3Path('fake-bucket/fake-key')
with pytest.raises(ValueError):
path.stat()
path = S3Path('/fake-bucket/fake-key')
with pytest.raises(ClientError):
path.stat()
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'Test.test')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/Test.test')
stat = path.stat()
assert isinstance(stat, StatResult)
assert stat == StatResult(
size=object_summary.size,
last_modified=object_summary.last_modified,
)
with NamedTemporaryFile() as local_file:
local_file.write(path.read_bytes())
local_file.flush()
local_path = Path(local_file.name)
local_stat = local_path.stat()
s3_stat = path.stat()
assert s3_stat.st_size == local_stat.st_size == s3_stat.size
assert s3_stat.last_modified.timestamp() == s3_stat.st_mtime
assert s3_stat.st_mtime < local_stat.st_mtime
with pytest.raises(UnsupportedOperation):
path.stat().st_atime
path = S3Path('/test-bucket')
assert path.stat() is None
def test_exists(s3_mock):
path = S3Path('./fake-key')
with pytest.raises(ValueError):
path.exists()
path = S3Path('/fake-bucket/fake-key')
with pytest.raises(ClientError):
path.exists()
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
assert not S3Path('/test-bucket/Test.test').exists()
path = S3Path('/test-bucket/directory/Test.test')
assert path.exists()
for parent in path.parents:
assert parent.exists()
def test_glob(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
assert list(S3Path('/test-bucket/').glob('*.test')) == []
assert list(S3Path('/test-bucket/directory/').glob('*.test')) == [S3Path('/test-bucket/directory/Test.test')]
assert list(S3Path('/test-bucket/').glob('**/*.test')) == [S3Path('/test-bucket/directory/Test.test')]
object_summary = s3.ObjectSummary('test-bucket', 'pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'setup.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'test_pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'build/lib/pathlib.py')
object_summary.put(Body=b'test data')
assert sorted(S3Path.from_uri('s3://test-bucket/').glob('*.py')) == [
S3Path('/test-bucket/pathlib.py'),
S3Path('/test-bucket/setup.py'),
S3Path('/test-bucket/test_pathlib.py')]
assert sorted(S3Path.from_uri('s3://test-bucket/').glob('*/*.py')) == [S3Path('/test-bucket/docs/conf.py')]
assert sorted(S3Path.from_uri('s3://test-bucket/').glob('**/*.py')) == [
S3Path('/test-bucket/build/lib/pathlib.py'),
S3Path('/test-bucket/docs/conf.py'),
S3Path('/test-bucket/pathlib.py'),
S3Path('/test-bucket/setup.py'),
S3Path('/test-bucket/test_pathlib.py')]
assert sorted(S3Path.from_uri('s3://test-bucket/').glob('*cs')) == [
S3Path('/test-bucket/docs/'),
]
def test_rglob(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
assert list(S3Path('/test-bucket/').rglob('*.test')) == [S3Path('/test-bucket/directory/Test.test')]
assert list(S3Path('/test-bucket/').rglob('**/*.test')) == [S3Path('/test-bucket/directory/Test.test')]
object_summary = s3.ObjectSummary('test-bucket', 'pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'setup.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'test_pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'build/lib/pathlib.py')
object_summary.put(Body=b'test data')
assert sorted(S3Path.from_uri('s3://test-bucket/').rglob('*.py')) == [
S3Path('/test-bucket/build/lib/pathlib.py'),
S3Path('/test-bucket/docs/conf.py'),
S3Path('/test-bucket/pathlib.py'),
S3Path('/test-bucket/setup.py'),
S3Path('/test-bucket/test_pathlib.py')]
def test_is_dir(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'setup.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'test_pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'build/lib/pathlib.py')
object_summary.put(Body=b'test data')
assert not S3Path('/test-bucket/fake.test').is_dir()
assert not S3Path('/test-bucket/fake/').is_dir()
assert S3Path('/test-bucket/directory').is_dir()
assert not S3Path('/test-bucket/directory/Test.test').is_dir()
assert not S3Path('/test-bucket/pathlib.py').is_dir()
assert not S3Path('/test-bucket/docs/conf.py').is_dir()
assert S3Path('/test-bucket/docs/').is_dir()
assert S3Path('/test-bucket/build/').is_dir()
assert S3Path('/test-bucket/build/lib').is_dir()
assert not S3Path('/test-bucket/build/lib/pathlib.py').is_dir()
def test_is_file(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'setup.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'test_pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'build/lib/pathlib.py')
object_summary.put(Body=b'test data')
assert not S3Path('/test-bucket/fake.test').is_file()
assert not S3Path('/test-bucket/fake/').is_file()
assert not S3Path('/test-bucket/directory').is_file()
assert S3Path('/test-bucket/directory/Test.test').is_file()
assert S3Path('/test-bucket/pathlib.py').is_file()
assert S3Path('/test-bucket/docs/conf.py').is_file()
assert not S3Path('/test-bucket/docs/').is_file()
assert not S3Path('/test-bucket/build/').is_file()
assert not S3Path('/test-bucket/build/lib').is_file()
assert S3Path('/test-bucket/build/lib/pathlib.py').is_file()
def test_read_line(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data\ntest data')
with S3Path('/test-bucket/directory/Test.test').open("r") as fp:
assert fp.readline() == "test data"
assert fp.readline() == "test data"
assert fp.readline() == ""
def test_read_lines(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data\ntest data')
with S3Path('/test-bucket/directory/Test.test').open("r") as fp:
assert len(fp.readlines()) == 2
def test_read_lines_hint(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data\ntest data')
with S3Path('/test-bucket/directory/Test.test').open("r") as fp:
assert len(fp.readlines(1)) == 1
def test_iter_lines(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data\ntest data')
with S3Path('/test-bucket/directory/Test.test').open("r") as fp:
for line in fp:
assert line == "test data"
def test_write_lines(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
path = S3Path('/test-bucket/directory/Test.test')
with path.open("w") as fp:
fp.writelines(["line 1\n", "line 2\n"])
res = path.read_text().splitlines()
assert len(res) == 2
def test_iterdir(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'setup.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'test_pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'build/lib/pathlib.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/make.bat')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/index.rst')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/Makefile')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_templates/11conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_build/22conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_static/conf.py')
object_summary.put(Body=b'test data')
s3_path = S3Path('/test-bucket/docs')
assert sorted(s3_path.iterdir()) == [
S3Path('/test-bucket/docs/Makefile'),
S3Path('/test-bucket/docs/_build'),
S3Path('/test-bucket/docs/_static'),
S3Path('/test-bucket/docs/_templates'),
S3Path('/test-bucket/docs/conf.py'),
S3Path('/test-bucket/docs/index.rst'),
S3Path('/test-bucket/docs/make.bat'),
]
def test_iterdir_on_buckets(s3_mock):
s3 = boto3.resource('s3')
for index in range(4):
s3.create_bucket(Bucket='test-bucket{}'.format(index))
s3_root_path = S3Path('/')
assert sorted(s3_root_path.iterdir()) == [
S3Path('/test-bucket{}'.format(index))
for index in range(4)
]
def test_open_for_reading(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/directory/Test.test')
file_obj = path.open()
assert file_obj.read() == 'test data'
def test_open_for_write(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
bucket = s3.Bucket('test-bucket')
assert sum(1 for _ in bucket.objects.all()) == 0
path = S3Path('/test-bucket/directory/Test.test')
file_obj = path.open(mode='bw')
assert file_obj.writable()
file_obj.write(b'test data\n')
file_obj.writelines([b'test data'])
assert sum(1 for _ in bucket.objects.all()) == 1
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
streaming_body = object_summary.get()['Body']
assert list(streaming_body.iter_lines()) == [
b'test data',
b'test data'
]
def test_open_binary_read(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/directory/Test.test')
with path.open(mode='br') as file_obj:
assert file_obj.readlines() == [b'test data']
with path.open(mode='rb') as file_obj:
assert file_obj.readline() == b'test data'
assert file_obj.readline() == b''
assert file_obj.readline() == b''
@pytest.mark.skipif(sys.version_info < (3, 5), reason="requires python3.5 or higher")
def test_read_bytes(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/directory/Test.test')
assert path.read_bytes() == b'test data'
def test_open_text_read(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/directory/Test.test')
with path.open(mode='r') as file_obj:
assert file_obj.readlines() == ['test data']
with path.open(mode='rt') as file_obj:
assert file_obj.readline() == 'test data'
assert file_obj.readline() == ''
assert file_obj.readline() == ''
@pytest.mark.skipif(sys.version_info < (3, 5), reason="requires python3.5 or higher")
def test_read_text(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/directory/Test.test')
assert path.read_text() == 'test data'
def test_owner(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'directory/Test.test')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/directory/Test.test')
assert path.owner() == 'webfile'
def test_rename_s3_to_s3(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'docs/conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/make.bat')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/index.rst')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/Makefile')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_templates/11conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_build/22conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_static/conf.py')
object_summary.put(Body=b'test data')
s3.create_bucket(Bucket='target-bucket')
S3Path('/test-bucket/docs/conf.py').rename('/test-bucket/docs/conf1.py')
assert not S3Path('/test-bucket/docs/conf.py').exists()
assert S3Path('/test-bucket/docs/conf1.py').is_file()
path = S3Path('/test-bucket/docs/')
path.rename(S3Path('/target-bucket') / S3Path('folder'))
assert not path.exists()
assert S3Path('/target-bucket/folder/conf1.py').is_file()
assert S3Path('/target-bucket/folder/make.bat').is_file()
assert S3Path('/target-bucket/folder/index.rst').is_file()
assert S3Path('/target-bucket/folder/Makefile').is_file()
assert S3Path('/target-bucket/folder/_templates/11conf.py').is_file()
assert S3Path('/target-bucket/folder/_build/22conf.py').is_file()
assert S3Path('/target-bucket/folder/_static/conf.py').is_file()
def test_replace_s3_to_s3(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'docs/conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/make.bat')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/index.rst')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/Makefile')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_templates/11conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_build/22conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_static/conf.py')
object_summary.put(Body=b'test data')
s3.create_bucket(Bucket='target-bucket')
S3Path('/test-bucket/docs/conf.py').replace('/test-bucket/docs/conf1.py')
assert not S3Path('/test-bucket/docs/conf.py').exists()
assert S3Path('/test-bucket/docs/conf1.py').is_file()
path = S3Path('/test-bucket/docs/')
path.replace(S3Path('/target-bucket') / S3Path('folder'))
assert not path.exists()
assert S3Path('/target-bucket/folder/conf1.py').is_file()
assert S3Path('/target-bucket/folder/make.bat').is_file()
assert S3Path('/target-bucket/folder/index.rst').is_file()
assert S3Path('/target-bucket/folder/Makefile').is_file()
assert S3Path('/target-bucket/folder/_templates/11conf.py').is_file()
assert S3Path('/target-bucket/folder/_build/22conf.py').is_file()
assert S3Path('/target-bucket/folder/_static/conf.py').is_file()
def test_rmdir(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'docs/conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/make.bat')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/index.rst')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/Makefile')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_templates/11conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_build/22conf.py')
object_summary.put(Body=b'test data')
object_summary = s3.ObjectSummary('test-bucket', 'docs/_static/conf.py')
object_summary.put(Body=b'test data')
conf_path = S3Path('/test-bucket/docs/_templates')
assert conf_path.is_dir()
conf_path.rmdir()
assert not conf_path.exists()
path = S3Path('/test-bucket/docs/')
path.rmdir()
assert not path.exists()
def test_mkdir(s3_mock):
s3 = boto3.resource('s3')
S3Path('/test-bucket/').mkdir()
assert s3.Bucket('test-bucket') in s3.buckets.all()
S3Path('/test-bucket/').mkdir(exist_ok=True)
with pytest.raises(FileExistsError):
S3Path('/test-bucket/').mkdir(exist_ok=False)
with pytest.raises(FileNotFoundError):
S3Path('/test-second-bucket/test-directory/file.name').mkdir()
S3Path('/test-second-bucket/test-directory/file.name').mkdir(parents=True)
assert s3.Bucket('test-second-bucket') in s3.buckets.all()
def test_write_text(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'temp_key')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/temp_key')
data = path.read_text()
assert isinstance(data, str)
path.write_text(data)
assert path.read_text() == data
def test_write_bytes(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'temp_key')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/temp_key')
data = path.read_bytes()
assert isinstance(data, bytes)
path.write_bytes(data)
assert path.read_bytes() == data
def test_unlink(s3_mock):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket='test-bucket')
object_summary = s3.ObjectSummary('test-bucket', 'temp_key')
object_summary.put(Body=b'test data')
path = S3Path('/test-bucket/temp_key')
subdir_key = S3Path('/test-bucket/fake_folder/some_key')
subdir_key.write_text("some text")
assert path.exists() is True
assert subdir_key.exists() is True
path.unlink()
assert path.exists() is False
with pytest.raises(FileNotFoundError):
S3Path("/test-bucket/fake_subfolder/fake_subkey").unlink()
with pytest.raises(IsADirectoryError):
S3Path("/test-bucket/fake_folder").unlink()
with pytest.raises(IsADirectoryError):
S3Path("/fake-bucket/").unlink()
| 38.122414
| 113
| 0.684727
|
1635935aee5fd2cb6b374335bc705e887135a440
| 12,648
|
py
|
Python
|
src/opserver/overlay_to_underlay_mapper.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | 3
|
2019-01-11T06:16:40.000Z
|
2021-02-24T23:48:21.000Z
|
src/opserver/overlay_to_underlay_mapper.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/overlay_to_underlay_mapper.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | 18
|
2017-01-12T09:28:44.000Z
|
2019-04-18T20:47:42.000Z
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
#
# Overlay To Underlay Mapper
#
# Utility to get the Underlay information for the Overlay flow(s).
#
import json
from sandesh.viz.constants import *
from opserver_util import OpServerUtils
class OverlayToUnderlayMapperError(Exception):
"""Base Exception class for this module.
All the Exceptions defined in this module should be derived from
this class. The application/module that calls any method in the
OverlayToUnderlayMapper class should catch this base Exception.
"""
pass
class OverlayToUnderlayMapper(object):
def __init__(self, query_json, analytics_api_ip,
analytics_api_port, logger):
self.query_json = query_json
self._analytics_api_ip = analytics_api_ip
self._analytics_api_port = analytics_api_port
self._logger = logger
if self.query_json is not None:
self._start_time = self.query_json['start_time']
self._end_time = self.query_json['end_time']
# If the start_time/end_time in the query is specified as
# relative time, then the actual start_time/end_time for the
# FlowRecordTable query and UFlowData query would be different.
# Since the FlowRecordTable is queried first and the result of
# which is used to query the UFlowData table, the result may
# not be correct if the start_time/end_time is different for
# FlowRecord and UFlowData queries. Therefore, convert the
# relative start/end time to absolute time.
if not str(self._start_time).isdigit():
self._start_time = \
OpServerUtils.convert_to_utc_timestamp_usec(self._start_time)
if not str(self._end_time).isdigit():
self._end_time = \
OpServerUtils.convert_to_utc_timestamp_usec(self._end_time)
# end __init__
def process_query(self):
"""Process the OverlayToUnderlay Flow query and returns
the response."""
flow_record_data = self._get_overlay_flow_data()
uflow_data = self._get_underlay_flow_data(flow_record_data)
return self._send_response(uflow_data)
# end process_query
def _overlay_to_flowrecord_name(self, oname):
try:
fname = OverlayToFlowRecordFields[oname]
except KeyError:
raise _OverlayToFlowRecordFieldsNameError(oname)
return fname
# end _overlay_to_flowrecord_name
def _flowrecord_to_uflowdata_name(self, fname):
try:
ufname = FlowRecordToUFlowDataFields[fname]
except KeyError:
raise _FlowRecordToUFlowDataFieldsNameError(fname)
return ufname
# end _flowrecord_to_uflowdata_name
def _underlay_to_uflowdata_name(self, uname):
try:
ufname = UnderlayToUFlowDataFields[uname]
except KeyError:
raise _UnderlayToUFlowDataFieldsNameError(uname)
return ufname
# end _underlay_to_uflowdata_name
def _get_overlay_flow_data(self):
"""Fetch the overlay flow data from the FlowRecord Table.
Convert the where clause in the OverlayToUnderlay query according
to the schema defined for the FlowRecord Table. Get the overlay
flow data [source vrouter, destination vrouter, flowtuple hash,
encapsulation] from the FlowRecord Table required to query the
underlay data.
"""
# process where clause
try:
where_or_list = self.query_json['where']
except KeyError:
where_or_list = []
flow_record_where = []
for where_and_list in where_or_list:
flow_record_where_and_list = []
for match_term in where_and_list:
fname = self._overlay_to_flowrecord_name(match_term['name'])
match = OpServerUtils.Match(name=fname,
value=match_term['value'],
op=match_term['op'],
value2=match_term.get('value2'))
flow_record_where_and_list.append(match.__dict__)
if match_term.get('suffix') is not None:
fname = self._overlay_to_flowrecord_name(
match_term['suffix']['name'])
match = OpServerUtils.Match(name=fname,
value=match_term['suffix']['value'],
op=match_term['suffix']['op'],
value2=match_term['suffix'].get('value2'))
flow_record_where_and_list.append(match.__dict__)
flow_record_where.append(flow_record_where_and_list)
# populate the select list
flow_record_select = [
FlowRecordNames[FlowRecordFields.FLOWREC_VROUTER_IP],
FlowRecordNames[FlowRecordFields.FLOWREC_OTHER_VROUTER_IP],
FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_SPORT],
FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_PROTO]
]
flow_record_query = OpServerUtils.Query(table=FLOW_TABLE,
start_time=self._start_time,
end_time=self._end_time,
select_fields=flow_record_select,
where=flow_record_where,
dir=1)
return self._send_query(json.dumps(flow_record_query.__dict__))
# end _get_overlay_flow_data
def _get_underlay_flow_data(self, flow_record_data):
"""Fetch the underlay data from the UFlowData table.
Construct the Where clause for the UFlowData query from the
FlowRecord query response. Convert the select clause, sort_fields,
filter clause in the OverlayToUnderlay query according to the schema
defined for the UFlowData table.
"""
if not len(flow_record_data):
return []
# populate where clause for Underlay Flow query
uflow_data_where = []
for row in flow_record_data:
# if any of the column value is None, then skip the row
if any(col == None for col in row.values()):
continue
uflow_data_where_and_list = []
ufname = self._flowrecord_to_uflowdata_name(
FlowRecordNames[FlowRecordFields.FLOWREC_VROUTER_IP])
val = row[FlowRecordNames[FlowRecordFields.FLOWREC_VROUTER_IP]]
sip = OpServerUtils.Match(name=ufname, value=val,
op=OpServerUtils.MatchOp.EQUAL)
uflow_data_where_and_list.append(sip.__dict__)
ufname = self._flowrecord_to_uflowdata_name(
FlowRecordNames[FlowRecordFields.FLOWREC_OTHER_VROUTER_IP])
val = \
row[FlowRecordNames[FlowRecordFields.FLOWREC_OTHER_VROUTER_IP]]
dip = OpServerUtils.Match(name=ufname, value=val,
op=OpServerUtils.MatchOp.EQUAL)
uflow_data_where_and_list.append(dip.__dict__)
ufname = self._flowrecord_to_uflowdata_name(
FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_SPORT])
val = row[FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_SPORT]]
sport = OpServerUtils.Match(name=ufname, value=val,
op=OpServerUtils.MatchOp.EQUAL)
ufname = self._flowrecord_to_uflowdata_name(
FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_PROTO])
val = row[FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_PROTO]]
# get the protocol from tunnel_type
val = OpServerUtils.tunnel_type_to_protocol(val)
protocol = OpServerUtils.Match(name=ufname, value=val,
op=OpServerUtils.MatchOp.EQUAL, suffix=sport)
uflow_data_where_and_list.append(protocol.__dict__)
uflow_data_where.append(uflow_data_where_and_list)
# if the where clause is empty, then no need to send
# the UFlowData query
if not len(uflow_data_where):
return []
# populate UFlowData select
uflow_data_select = []
for select in self.query_json['select_fields']:
uflow_data_select.append(self._underlay_to_uflowdata_name(select))
# sort_fields specified in the query?
uflow_data_sort_fields = None
if self.query_json.get('sort_fields'):
uflow_data_sort_fields = []
for field in self.query_json['sort_fields']:
uflow_data_sort_fields.append(
self._underlay_to_uflowdata_name(field))
uflow_data_sort_type = self.query_json.get('sort')
# does the query contain limit attribute?
uflow_data_limit = self.query_json.get('limit')
# add filter if specified
uflow_data_filter = None
if self.query_json.get('filter') is not None:
uflow_data_filter = list(self.query_json['filter'])
if len(uflow_data_filter):
if not isinstance(uflow_data_filter[0], list):
uflow_data_filter = [uflow_data_filter]
for filter_and in uflow_data_filter:
for match_term in filter_and:
match_term['name'] = self._underlay_to_uflowdata_name(
match_term['name'])
uflow_data_query = OpServerUtils.Query(
table='StatTable.UFlowData.flow',
start_time=self._start_time,
end_time=self._end_time,
select_fields=uflow_data_select,
where=uflow_data_where,
sort=uflow_data_sort_type,
sort_fields=uflow_data_sort_fields,
limit=uflow_data_limit,
filter=uflow_data_filter)
return self._send_query(json.dumps(uflow_data_query.__dict__))
# end _get_underlay_flow_data
def _send_query(self, query):
"""Post the query to the analytics-api server and returns the
response."""
self._logger.debug('Sending query: %s' % (query))
opserver_url = OpServerUtils.opserver_query_url(self._analytics_api_ip,
str(self._analytics_api_port))
resp = OpServerUtils.post_url_http(opserver_url, query, True)
try:
resp = json.loads(resp)
value = resp['value']
except (TypeError, ValueError, KeyError):
raise _QueryError(query)
self._logger.debug('Query response: %s' % str(value))
return value
# end _send_query
def _send_response(self, uflow_data):
"""Converts the UFlowData query response according to the
schema defined for the OverlayToUnderlayFlowMap table."""
underlay_response = {}
underlay_data = []
for row in uflow_data:
underlay_row = {}
for field in self.query_json['select_fields']:
name = self._underlay_to_uflowdata_name(field)
underlay_row[field] = row[name]
underlay_data.append(underlay_row)
underlay_response['value'] = underlay_data
return json.dumps(underlay_response)
# end _send_response
# end class OverlayToUnderlayMapper
class _OverlayToFlowRecordFieldsNameError(OverlayToUnderlayMapperError):
def __init__(self, field):
self.field = field
def __str__(self):
return 'No mapping for <%s> in "OverlayToFlowRecordFields"' \
% (self.field)
# end class _OverlayToFlowRecordFieldsNameError
class _FlowRecordToUFlowDataFieldsNameError(OverlayToUnderlayMapperError):
def __init__(self, field):
self.field = field
def __str__(self):
return 'No mapping for <%s> in "FlowRecordToUFlowDataFields"' \
% (self.field)
# end class _FlowRecordToUFlowDataFieldsNameError
class _UnderlayToUFlowDataFieldsNameError(OverlayToUnderlayMapperError):
def __init__(self, field):
self.field = field
def __str__(self):
return 'No mapping for <%s> in "UnderlayToUFlowDataFields"' \
% (self.field)
# end class _UnderlayToUFlowDataFieldsNameError
class _QueryError(OverlayToUnderlayMapperError):
def __init__(self, query):
self.query = query
def __str__(self):
return 'Error in query processing: %s' % (self.query)
# end class _QueryError
| 42.019934
| 81
| 0.633381
|
b6f6c2406563bc3349ab47fc254cffeef441f7a3
| 26,266
|
py
|
Python
|
tagging/models.py
|
weijia/django-tagging-ng
|
e60116896f090b0c32d6f372839a7933dd76fcfe
|
[
"BSD-3-Clause"
] | null | null | null |
tagging/models.py
|
weijia/django-tagging-ng
|
e60116896f090b0c32d6f372839a7933dd76fcfe
|
[
"BSD-3-Clause"
] | null | null | null |
tagging/models.py
|
weijia/django-tagging-ng
|
e60116896f090b0c32d6f372839a7933dd76fcfe
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Models and managers for generic tagging.
"""
# Python 2.3 compatibility
try:
set
except NameError:
from sets import Set as set
import logging
logger = logging.getLogger('tagging.models')
# from django.contrib.contenttypes import generic
from compat import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import connection, models, IntegrityError
from django.db.models.query import QuerySet
from django.utils.translation import ugettext_lazy as _
from tagging import settings
from tagging.utils import calculate_cloud, get_tag_list, get_queryset_and_model, parse_tag_input
from tagging.utils import LOGARITHMIC
qn = connection.ops.quote_name
if settings.MULTILINGUAL_TAGS:
import multilingual
BaseManager = multilingual.Manager
else:
BaseManager = models.Manager
############
# Managers #
############
class TagManager(BaseManager):
def update_tags(self, obj, tag_names, tag_app=None):
"""
Update tags associated with an object. Old tags that not in the tag_names will be removed
"""
ctype = ContentType.objects.get_for_model(obj)
current_tags = list(self.filter(items__content_type__pk=ctype.pk,
items__object_id=obj.pk))
updated_tag_names = parse_tag_input(tag_names)
if settings.FORCE_LOWERCASE_TAGS:
updated_tag_names = [t.lower() for t in updated_tag_names]
# Remove tags which no longer apply
tags_for_removal = [tag for tag in current_tags \
if tag.name not in updated_tag_names]
if len(tags_for_removal):
TaggedItem._default_manager.filter(content_type__pk=ctype.pk,
object_id=obj.pk,
tag__in=tags_for_removal).delete()
# Add new tags
current_tag_names = [tag.name or tag.name_any for tag in current_tags]
for tag_name in updated_tag_names:
if tag_name not in current_tag_names:
tag, created = self.get_or_create(name=tag_name)
TaggedItem._default_manager.create(tag=tag, object=obj, tag_app=tag_app)
def add_tag(self, obj, tag_name, tag_app=None):
"""
Associates the given object with a tag.
"""
tag_names = parse_tag_input(tag_name)
if not len(tag_names):
raise AttributeError(_('No tags were given: "%s".') % tag_name)
if len(tag_names) > 1:
raise AttributeError(_('Multiple tags were given: "%s".') % tag_name)
tag_name = tag_names[0]
if settings.FORCE_LOWERCASE_TAGS:
tag_name = tag_name.lower()
tag, created = self.get_or_create(name=tag_name)
ctype = ContentType.objects.get_for_model(obj)
TaggedItem._default_manager.get_or_create(
tag=tag, content_type=ctype, object_id=obj.pk, tag_app=tag_app)
def get_for_object(self, obj):
"""
Create a queryset matching all tags associated with the given
object.
"""
ctype = ContentType.objects.get_for_model(obj)
return self.filter(items__content_type__pk=ctype.pk,
items__object_id=obj.pk)
def _get_usage(self, model, counts=False, min_count=None, extra_joins=None, extra_criteria=None, params=None):
"""
Perform the custom SQL query for ``usage_for_model`` and
``usage_for_queryset``.
"""
if min_count is not None: counts = True
model_table = qn(model._meta.db_table)
model_pk = '%s.%s' % (model_table, qn(model._meta.pk.column))
query = """
SELECT DISTINCT %(tag)s.id%(count_sql)s
FROM
%(tag)s
INNER JOIN %(tagged_item)s
ON %(tag)s.id = %(tagged_item)s.tag_id
INNER JOIN %(model)s
ON %(tagged_item)s.object_id = %(model_pk)s
%%s
WHERE %(tagged_item)s.content_type_id = %(content_type_id)s
%%s
GROUP BY %(tag)s.id
%%s""" % {
'tag': qn(self.model._meta.db_table),
'count_sql': counts and (', COUNT(%s)' % model_pk) or '',
'tagged_item': qn(TaggedItem._meta.db_table),
'model': model_table,
'model_pk': model_pk,
'content_type_id': ContentType.objects.get_for_model(model).pk,
}
min_count_sql = ''
if min_count is not None:
min_count_sql = 'HAVING COUNT(%s) >= %%s' % model_pk
params.append(min_count)
cursor = connection.cursor()
cursor.execute(query % (extra_joins, extra_criteria, min_count_sql), params)
tags = []
# TODO add ordering by name right here
for row in cursor.fetchall():
t = self.model.objects.get(pk=row[0])
if counts:
t.count = row[1]
tags.append(t)
tags.sort()
return tags
def usage_for_model(self, model, counts=False, min_count=None, filters=None):
"""
Obtain a list of tags associated with instances of the given
Model class.
If ``counts`` is True, a ``count`` attribute will be added to
each tag, indicating how many times it has been used against
the Model class in question.
If ``min_count`` is given, only tags which have a ``count``
greater than or equal to ``min_count`` will be returned.
Passing a value for ``min_count`` implies ``counts=True``.
To limit the tags (and counts, if specified) returned to those
used by a subset of the Model's instances, pass a dictionary
of field lookups to be applied to the given Model as the
``filters`` argument.
"""
if filters is None: filters = {}
queryset = model._default_manager.filter()
for f in filters.items():
queryset.query.add_filter(f)
usage = self.usage_for_queryset(queryset, counts, min_count)
return usage
def usage_for_queryset(self, queryset, counts=False, min_count=None):
"""
Obtain a list of tags associated with instances of a model
contained in the given queryset.
If ``counts`` is True, a ``count`` attribute will be added to
each tag, indicating how many times it has been used against
the Model class in question.
If ``min_count`` is given, only tags which have a ``count``
greater than or equal to ``min_count`` will be returned.
Passing a value for ``min_count`` implies ``counts=True``.
"""
if getattr(queryset.query, 'get_compiler', None):
# Django 1.2 and up compatible (multiple databases)
compiler = queryset.query.get_compiler(using='default')
extra_joins = ' '.join(compiler.get_from_clause()[0][1:])
where, params = queryset.query.where.as_sql(compiler.quote_name_unless_alias, compiler.connection)
else:
# Django 1.1 and down compatible (single database)
extra_joins = ' '.join(queryset.query.get_from_clause()[0][1:])
where, params = queryset.query.where.as_sql()
if where:
extra_criteria = 'AND %s' % where
else:
extra_criteria = ''
return self._get_usage(queryset.model, counts, min_count, extra_joins, extra_criteria, params)
def related_for_model(self, tags, model, counts=False, min_count=None):
"""
Obtain a list of tags related to a given list of tags - that
is, other tags used by items which have all the given tags.
If ``counts`` is True, a ``count`` attribute will be added to
each tag, indicating the number of items which have it in
addition to the given list of tags.
If ``min_count`` is given, only tags which have a ``count``
greater than or equal to ``min_count`` will be returned.
Passing a value for ``min_count`` implies ``counts=True``.
"""
if min_count is not None: counts = True
tags = get_tag_list(tags)
tag_count = len(tags)
tagged_item_table = qn(TaggedItem._meta.db_table)
query = """
SELECT %(tag)s.id%(count_sql)s
FROM %(tagged_item)s INNER JOIN %(tag)s ON %(tagged_item)s.tag_id = %(tag)s.id
WHERE %(tagged_item)s.content_type_id = %(content_type_id)s
AND %(tagged_item)s.object_id IN
(
SELECT %(tagged_item)s.object_id
FROM %(tagged_item)s, %(tag)s
WHERE %(tagged_item)s.content_type_id = %(content_type_id)s
AND %(tag)s.id = %(tagged_item)s.tag_id
AND %(tag)s.id IN (%(tag_id_placeholders)s)
GROUP BY %(tagged_item)s.object_id
HAVING COUNT(%(tagged_item)s.object_id) = %(tag_count)s
)
AND %(tag)s.id NOT IN (%(tag_id_placeholders)s)
GROUP BY %(tag)s.id
%(min_count_sql)s""" % {
'tag': qn(self.model._meta.db_table),
'count_sql': counts and ', COUNT(%s.object_id)' % tagged_item_table or '',
'tagged_item': tagged_item_table,
'content_type_id': ContentType.objects.get_for_model(model).pk,
'tag_id_placeholders': ','.join(['%s'] * tag_count),
'tag_count': tag_count,
'min_count_sql': min_count is not None and ('HAVING COUNT(%s.object_id) >= %%s' % tagged_item_table) or '',
}
params = [tag.pk for tag in tags] * 2
if min_count is not None:
params.append(min_count)
cursor = connection.cursor()
cursor.execute(query, params)
related = []
for row in cursor.fetchall():
tag = self.model.objects.get(pk=row[0])
if counts is True:
tag.count = row[1]
related.append(tag)
related.sort()
return related
def cloud_for_model(self, model, steps=4, distribution=LOGARITHMIC,
filters=None, min_count=None):
"""
Obtain a list of tags associated with instances of the given
Model, giving each tag a ``count`` attribute indicating how
many times it has been used and a ``font_size`` attribute for
use in displaying a tag cloud.
``steps`` defines the range of font sizes - ``font_size`` will
be an integer between 1 and ``steps`` (inclusive).
``distribution`` defines the type of font size distribution
algorithm which will be used - logarithmic or linear. It must
be either ``tagging.utils.LOGARITHMIC`` or
``tagging.utils.LINEAR``.
To limit the tags displayed in the cloud to those associated
with a subset of the Model's instances, pass a dictionary of
field lookups to be applied to the given Model as the
``filters`` argument.
To limit the tags displayed in the cloud to those with a
``count`` greater than or equal to ``min_count``, pass a value
for the ``min_count`` argument.
"""
tags = list(self.usage_for_model(model, counts=True, filters=filters,
min_count=min_count))
return calculate_cloud(tags, steps, distribution)
def process_rules(self, rules):
for line in rules.split('\n'):
self._process_line(line)
return True
def _process_line(self, line):
logger.debug('processing line "%s"' % line)
def join(tags):
self.join([tag[0] for tag in tags if tag])
if '==' in line:
names = [name.strip() for name in line.split('==')]
try:
tag = self.get(name=names[0])
except Tag.DoesNotExist:
return
for syn_name in names[1:]:
try:
syn = Synonym(name=syn_name, tag=tag)
syn.save()
except IntegrityError:
pass
join([self.filter(name=name)[:1] for name in names])
elif '=' in line:
join([self.filter(name=name.strip())[:1] \
for name in line.split('=')])
elif ':' in line:
parts = line.split(';')
if len(parts) > 0:
changed = False
head = [p.strip() for p in parts[0].split(':')][:2]
tag_from = head[0]
tag_to = (len(head) == 2) and head[1] or head[0]
try:
tag = self.get(name=tag_from)
except Tag.DoesNotExist:
return
if tag.name != tag_to:
tag.name = tag_to
changed = True
names = [tuple(i.strip() for i in p.split(':')) for p in parts[1:]]
for name in names:
if len(name) == 2 and getattr(tag, 'name_%s' % name[0], None) != name[1]:
setattr(tag, 'name_%s' % name[0], name[1])
changed = True
if changed:
tag.save()
def dumpAsText(self):
tags = self.all()
return '\n'.join(filter(lambda x: x, \
[self.dumpSynonymsAsText(t) for t in tags] + \
[self.dumpTagAsText(t) for t in tags]))
def dumpTagAsText(self, tag):
parts = [tag.name, ]
for id, code in multilingual.languages.get_language_choices():
name = tag.get_translation(id, 'name').name
if name:
parts.append('%s: %s' % (code, name))
return '; '.join(parts)
def dumpSynonymsAsText(self, tag):
synonyms = tag.synonyms.all()
if len(synonyms) > 0:
return ' == '.join([tag.name, ] + [s.name for s in synonyms])
return ''
def join(self, query):
"""This method joins multiple tags together."""
from tagging.utils import merge
logger.info('Joining %s' % ','.join([unicode(obj) for obj in query]))
tags = list(query)
if len(tags) < 2:
return
first = tags[0]
tags = tags[1:]
for t in tags:
merge(first, t)
class TaggedItemManager(models.Manager):
"""
FIXME There's currently no way to get the ``GROUP BY`` and ``HAVING``
SQL clauses required by many of this manager's methods into
Django's ORM.
For now, we manually execute a query to retrieve the PKs of
objects we're interested in, then use the ORM's ``__in``
lookup to return a ``QuerySet``.
Now that the queryset-refactor branch is in the trunk, this can be
tidied up significantly.
"""
def get_by_model(self, queryset_or_model, tags):
"""
Create a ``QuerySet`` containing instances of the specified
model associated with a given tag or list of tags.
"""
tags = get_tag_list(tags)
tag_count = len(tags)
if tag_count == 0:
# No existing tags were given
queryset, model = get_queryset_and_model(queryset_or_model)
return model._default_manager.none()
elif tag_count == 1:
# Optimisation for single tag - fall through to the simpler
# query below.
tag = tags[0]
else:
return self.get_intersection_by_model(queryset_or_model, tags)
queryset, model = get_queryset_and_model(queryset_or_model)
content_type = ContentType.objects.get_for_model(model)
opts = self.model._meta
tagged_item_table = qn(opts.db_table)
return queryset.extra(
tables=[opts.db_table],
where=[
'%s.content_type_id = %%s' % tagged_item_table,
'%s.tag_id = %%s' % tagged_item_table,
'%s.%s = %s.object_id' % (qn(model._meta.db_table),
qn(model._meta.pk.column),
tagged_item_table)
],
params=[content_type.pk, tag.pk],
)
def get_intersection_by_model(self, queryset_or_model, tags):
"""
Create a ``QuerySet`` containing instances of the specified
model associated with *all* of the given list of tags.
"""
tags = get_tag_list(tags)
tag_count = len(tags)
queryset, model = get_queryset_and_model(queryset_or_model)
if not tag_count:
return model._default_manager.none()
model_table = qn(model._meta.db_table)
# This query selects the ids of all objects which have all the
# given tags.
query = """
SELECT %(model_pk)s
FROM %(model)s, %(tagged_item)s
WHERE %(tagged_item)s.content_type_id = %(content_type_id)s
AND %(tagged_item)s.tag_id IN (%(tag_id_placeholders)s)
AND %(model_pk)s = %(tagged_item)s.object_id
GROUP BY %(model_pk)s
HAVING COUNT(%(model_pk)s) = %(tag_count)s""" % {
'model_pk': '%s.%s' % (model_table, qn(model._meta.pk.column)),
'model': model_table,
'tagged_item': qn(self.model._meta.db_table),
'content_type_id': ContentType.objects.get_for_model(model).pk,
'tag_id_placeholders': ','.join(['%s'] * tag_count),
'tag_count': tag_count,
}
cursor = connection.cursor()
cursor.execute(query, [tag.pk for tag in tags])
object_ids = [row[0] for row in cursor.fetchall()]
if len(object_ids) > 0:
return queryset.filter(pk__in=object_ids)
else:
return model._default_manager.none()
def get_union_by_model(self, queryset_or_model, tags):
"""
Create a ``QuerySet`` containing instances of the specified
model associated with *any* of the given list of tags.
"""
tags = get_tag_list(tags)
tag_count = len(tags)
queryset, model = get_queryset_and_model(queryset_or_model)
if not tag_count:
return model._default_manager.none()
model_table = qn(model._meta.db_table)
# This query selects the ids of all objects which have any of
# the given tags.
query = """
SELECT %(model_pk)s
FROM %(model)s, %(tagged_item)s
WHERE %(tagged_item)s.content_type_id = %(content_type_id)s
AND %(tagged_item)s.tag_id IN (%(tag_id_placeholders)s)
AND %(model_pk)s = %(tagged_item)s.object_id
GROUP BY %(model_pk)s""" % {
'model_pk': '%s.%s' % (model_table, qn(model._meta.pk.column)),
'model': model_table,
'tagged_item': qn(self.model._meta.db_table),
'content_type_id': ContentType.objects.get_for_model(model).pk,
'tag_id_placeholders': ','.join(['%s'] * tag_count),
}
cursor = connection.cursor()
cursor.execute(query, [tag.pk for tag in tags])
object_ids = [row[0] for row in cursor.fetchall()]
if len(object_ids) > 0:
return queryset.filter(pk__in=object_ids)
else:
return model._default_manager.none()
def get_related(self, obj, queryset_or_model, num=None):
"""
Retrieve a list of instances of the specified model which share
tags with the model instance ``obj``, ordered by the number of
shared tags in descending order.
If ``num`` is given, a maximum of ``num`` instances will be
returned.
"""
queryset, model = get_queryset_and_model(queryset_or_model)
model_table = qn(model._meta.db_table)
content_type = ContentType.objects.get_for_model(obj)
related_content_type = ContentType.objects.get_for_model(model)
query = """
SELECT %(model_pk)s, COUNT(related_tagged_item.object_id) AS %(count)s
FROM %(model)s, %(tagged_item)s, %(tag)s, %(tagged_item)s related_tagged_item
WHERE %(tagged_item)s.object_id = %%s
AND %(tagged_item)s.content_type_id = %(content_type_id)s
AND %(tag)s.id = %(tagged_item)s.tag_id
AND related_tagged_item.content_type_id = %(related_content_type_id)s
AND related_tagged_item.tag_id = %(tagged_item)s.tag_id
AND %(model_pk)s = related_tagged_item.object_id"""
if content_type.pk == related_content_type.pk:
# Exclude the given instance itself if determining related
# instances for the same model.
query += """
AND related_tagged_item.object_id != %(tagged_item)s.object_id"""
query += """
GROUP BY %(model_pk)s
ORDER BY %(count)s DESC
%(limit_offset)s"""
query = query % {
'model_pk': '%s.%s' % (model_table, qn(model._meta.pk.column)),
'count': qn('count'),
'model': model_table,
'tagged_item': qn(self.model._meta.db_table),
'tag': qn(self.model._meta.get_field('tag').rel.to._meta.db_table),
'content_type_id': content_type.pk,
'related_content_type_id': related_content_type.pk,
# Hardcoding this for now just to get tests working again - this
# should now be handled by the query object.
'limit_offset': num is not None and 'LIMIT %s' or '',
}
cursor = connection.cursor()
params = [obj.pk]
if num is not None:
params.append(num)
cursor.execute(query, params)
object_ids = [row[0] for row in cursor.fetchall()]
if len(object_ids) > 0:
# Use in_bulk here instead of an id__in lookup, because id__in would
# clobber the ordering.
object_dict = queryset.in_bulk(object_ids)
return [object_dict[object_id] for object_id in object_ids \
if object_id in object_dict]
else:
return []
##########
# Models #
##########
class Tag(models.Model):
"""
A tag.
"""
if settings.MULTILINGUAL_TAGS:
class Translation(multilingual.Translation):
name = models.CharField(_('name'), max_length=50, unique=True, db_index=True)
else:
name = models.CharField(_('name'), max_length=50, unique=True, db_index=True)
objects = TagManager()
class Meta:
if not settings.MULTILINGUAL_TAGS:
ordering = ('name',)
verbose_name = _('tag')
verbose_name_plural = _('tags')
app_label = 'tagging'
def __unicode__(self):
return self.name or 'tag-with-id: %d' % self.id
def __lt__(self, other):
return self.name < other.name
def delete(self, update=True):
if update:
self._updateLinkedObjects(remove_this=True)
return super(Tag, self).delete()
def save(self, *args, **kwargs):
result = super(Tag, self).save(*args, **kwargs)
self._updateLinkedObjects()
return result
def _updateLinkedObjects(self, remove_this=False):
"""Updates TagField's for all objects with this tag."""
for item in TaggedItem.objects.filter(tag=self):
item._updateLinkedObjects(remove_this=remove_this)
if settings.MULTILINGUAL_TAGS:
"""Monkey-patching for translation getter,
to fallback to another translation."""
from multilingual.translation import getter_generator
_orig_name_getter = Tag.get_name
def _my_get_name(self, language_id=None, fallback=False):
value = _orig_name_getter(self, language_id, fallback)
if value is None and language_id is None:
# print 'BLAH BLAH for lang_id: %s' % language_id
value = _orig_name_getter(self, settings.FALLBACK_LANGUAGE, fallback)
# print 'New value for lang_id=%s is %s' % (settings.FALLBACK_LANGUAGE, value)
return value
_my_get_name.short_description = getattr(Tag.name, 'verbose_name', 'name')
setattr(Tag, 'get_name', _my_get_name)
class TaggedItem(models.Model):
"""
Holds the relationship between a tag and the item being tagged.
"""
tag = models.ForeignKey(Tag, verbose_name=_('tag'), related_name='items')
content_type = models.ForeignKey(ContentType, verbose_name=_('content type'))
object_id = models.PositiveIntegerField(_('object id'), db_index=True)
object = GenericForeignKey('content_type', 'object_id')
timestamp = models.DateTimeField(_('date published'), auto_now_add=True)
tag_app = models.CharField(_(u"Tag creator"), help_text=_(u"Tag creator"), max_length=50, null=True, blank=True)
objects = TaggedItemManager()
class Meta:
# Enforce unique tag association per object
unique_together = (('tag', 'content_type', 'object_id'),)
verbose_name = _('tagged item')
verbose_name_plural = _('tagged items')
app_label = 'tagging'
def __unicode__(self):
return u'%s [%s]' % (self.object, self.tag)
def _updateLinkedObjects(self, remove_this=False):
from tagging.fields import TagField
object_tags = [tag.name or tag.name_any \
for tag in Tag.objects.get_for_object(self.object) \
if not remove_this or tag.id != self.tag_id]
tags_as_string = ', '.join(object_tags)
for field in self.object._meta.fields:
if isinstance(field, TagField):
setattr(self.object, field.attname, tags_as_string)
self.object.save()
break
def delete(self, update=True):
if update:
self._updateLinkedObjects(remove_this=True)
return super(TaggedItem, self).delete()
class Synonym(models.Model):
name = models.CharField(max_length=50, unique=True, db_index=True)
tag = models.ForeignKey(Tag, related_name='synonyms')
def __unicode__(self):
return u'%s, synonym for %s' % (self.name, self.tag)
class Meta:
verbose_name = _("Tag's synonym")
verbose_name_plural = _("Tags' synonyms")
ordering = ('name',)
app_label = 'tagging'
| 38.56975
| 119
| 0.594799
|
158d508ef2cca90b4e6a3130faaa7ff632dce1c3
| 489
|
py
|
Python
|
examples/idioms/programs/179.2689-get-center-of-a-rectangle.py
|
laowantong/paroxython
|
4626798a60eeaa765dbfab9e63e04030c9fcb1d0
|
[
"MIT"
] | 31
|
2020-05-02T13:34:26.000Z
|
2021-06-06T17:25:52.000Z
|
examples/idioms/programs/179.2689-get-center-of-a-rectangle.py
|
laowantong/paroxython
|
4626798a60eeaa765dbfab9e63e04030c9fcb1d0
|
[
"MIT"
] | 108
|
2019-11-18T19:41:52.000Z
|
2022-03-18T13:58:17.000Z
|
examples/idioms/programs/179.2689-get-center-of-a-rectangle.py
|
laowantong/paroxython
|
4626798a60eeaa765dbfab9e63e04030c9fcb1d0
|
[
"MIT"
] | 4
|
2020-05-19T08:57:44.000Z
|
2020-09-21T08:53:46.000Z
|
"""Get center of a rectangle.
Return the center _c of the rectangle with coördinates(_x1,_y1,_x2,_y2)
Source: Bart
"""
# Implementation author: random
# Created on 2019-09-26T13:37:07.573535Z
# Last modified on 2019-09-26T13:37:07.573535Z
# Version 1
# center is a namedtuple, that can be accessed either using x and y or an index (0,1)
#
# e.g. center.x or center[0]
from collections import namedtuple
Point = namedtuple("Point", "x y")
center = Point((x1 + x2) / 2, (y1 + y2) / 2)
| 23.285714
| 85
| 0.705521
|
ee6e99bf0fca4b072e82f77b45a29419986844b5
| 344
|
py
|
Python
|
docs/cours/developpement/programmation-objet/code/ui/dice.py
|
FrancoisBrucker/cours_informatique
|
8e693adefa84770850291fe4562c66d36d77cefa
|
[
"MIT"
] | 3
|
2021-09-15T12:34:56.000Z
|
2021-12-14T09:29:17.000Z
|
docs/cours/developpement/programmation-objet/code/ui/dice.py
|
FrancoisBrucker/cours_informatique
|
8e693adefa84770850291fe4562c66d36d77cefa
|
[
"MIT"
] | 1
|
2020-09-29T14:44:09.000Z
|
2020-09-29T15:39:01.000Z
|
docs/cours/developpement/programmation-objet/code/ui/dice.py
|
FrancoisBrucker/cours_informatique
|
8e693adefa84770850291fe4562c66d36d77cefa
|
[
"MIT"
] | 4
|
2020-10-01T09:12:32.000Z
|
2021-12-14T09:29:09.000Z
|
import random
class Dice:
NUMBER_FACES = 6
def __init__(self, position=1):
self._position = position
def get_position(self):
return self._position
def set_position(self, new_position):
self._position = new_position
def roll(self):
self.set_position(random.randint(1, self.NUMBER_FACES))
| 20.235294
| 63
| 0.668605
|
a8c477819732ba1d9ba54aba2f19b3657b32f95f
| 13,524
|
py
|
Python
|
tools/Polygraphy/polygraphy/comparator/struct.py
|
borisfom/TensorRT
|
42805f078052daad1a98bc5965974fcffaad0960
|
[
"Apache-2.0"
] | 1
|
2022-03-05T08:46:19.000Z
|
2022-03-05T08:46:19.000Z
|
tools/Polygraphy/polygraphy/comparator/struct.py
|
maxpark/TensorRT
|
46253b644142a1d9632ba463422abfc5dcefc371
|
[
"Apache-2.0"
] | null | null | null |
tools/Polygraphy/polygraphy/comparator/struct.py
|
maxpark/TensorRT
|
46253b644142a1d9632ba463422abfc5dcefc371
|
[
"Apache-2.0"
] | 1
|
2022-03-19T16:03:30.000Z
|
2022-03-19T16:03:30.000Z
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from collections import OrderedDict
from polygraphy import config, mod, util
from polygraphy.common.interface import TypedDict, TypedList
from polygraphy.json import Decoder, Encoder, add_json_methods, load_json, save_json
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
class LazyNumpyArray(object):
"""
Represents a lazily loaded NumPy array.
For example, large NumPy arrays may be serialized to temporary files on the disk
to save memory.
"""
def __init__(self, arr):
"""
Args:
arr (np.ndarray): The NumPy array.
"""
self.arr = None
self.tmpfile = None
if config.ARRAY_SWAP_THRESHOLD_MB >= 0 and arr.nbytes > (config.ARRAY_SWAP_THRESHOLD_MB << 20):
self.tmpfile = util.NamedTemporaryFile(suffix=".json")
G_LOGGER.extra_verbose(
"Evicting large array ({:.3f} MiB) from memory and saving to {:}".format(
arr.nbytes / (1024.0 ** 2), self.tmpfile.name
)
)
save_json(arr, self.tmpfile.name)
else:
self.arr = arr
def numpy(self):
"""
Get the NumPy array, deserializing from the disk if it was stored earlier.
Returns:
np.ndarray: The NumPy array
"""
if self.arr is not None:
return self.arr
assert self.tmpfile is not None, "Path and NumPy array cannot both be None!"
return load_json(self.tmpfile.name)
@Encoder.register(LazyNumpyArray)
def encode(lazy_arr):
return {
"values": lazy_arr.numpy(),
}
@Decoder.register(LazyNumpyArray)
def decode(dct):
return LazyNumpyArray(dct["values"])
@mod.export()
class IterationResult(TypedDict(lambda: str, lambda: LazyNumpyArray)):
"""
An ordered dictionary containing the result of a running a single iteration of a runner.
This maps output names to NumPy arrays, and preserves the output ordering from the runner.
NOTE: The ``POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB`` environment variable can be set to enable
the arrays to be swapped to the disk.
Also includes additional fields indicating the name of the runner which produced the
outputs, and the time required to do so.
"""
@staticmethod
def _to_lazy(nparray):
if isinstance(nparray, LazyNumpyArray):
return nparray
return LazyNumpyArray(nparray)
@staticmethod
def _to_lazy_dict(nparray_dict):
if nparray_dict is None:
return None
# Converts a Dict[str, np.ndarray] to a Dict[str, LazyNumpyArray]
lazy = OrderedDict()
for name, out in nparray_dict.items():
lazy[name] = IterationResult._to_lazy(out)
return lazy
def __init__(self, outputs=None, runtime=None, runner_name=None):
"""
Args:
outputs (Dict[str, np.array]): The outputs of this iteration, mapped to their names.
runtime (float):
The time required for this iteration, in seconds.
Only used for logging purposes.
runner_name (str):
The name of the runner that produced this output.
If this is omitted, a default name is generated.
"""
if outputs and config.ARRAY_SWAP_THRESHOLD_MB < 0:
total_size_gb = sum(arr.nbytes for arr in outputs.values() if isinstance(arr, np.ndarray)) / (1024.0 ** 3)
if total_size_gb >= 1:
G_LOGGER.warning(
"It looks like the outputs of this network are very large ({:.3f} GiB).\n"
"To reduce memory usage, you may want to allow Polygraphy to swap these arrays to the disk using "
"the POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB environment variable.".format(total_size_gb)
)
super().__init__(IterationResult._to_lazy_dict(outputs))
self.runtime = runtime
self.runner_name = util.default(runner_name, "custom_runner")
# Convenience methods to preserve np.ndarray in the interface.
def update(self, other):
return super().update(IterationResult._to_lazy_dict(other))
def __setitem__(self, name, arr):
return super().__setitem__(name, IterationResult._to_lazy(arr))
def values(self):
for arr in super().values():
yield arr.numpy()
def items(self):
for name, arr in super().items():
yield name, arr.numpy()
def __getitem__(self, name):
return super().__getitem__(name).numpy()
def __eq__(self, other):
if self.runtime != other.runtime or self.runner_name != other.runner_name:
return False
for key, val in self.items():
if key not in other:
return False
if not np.array_equal(val, other[key]):
return False
return True
@Encoder.register(IterationResult)
def encode(iter_result):
return {
"outputs": iter_result.dct,
"runtime": iter_result.runtime,
"runner_name": iter_result.runner_name,
}
@Decoder.register(IterationResult)
def decode(dct):
return IterationResult(outputs=dct["outputs"], runtime=dct["runtime"], runner_name=dct["runner_name"])
@mod.export()
@add_json_methods("inference results")
class RunResults(TypedList(lambda: tuple)):
"""
Maps runners to per-iteration outputs (in the form of a ``List[IterationResult]``).
For example, if ``results`` is an instance of ``RunResults()``, then
to access the outputs of the first iteration from a specified runner, do:
::
iteration = 0
runner_name = "trt-runner"
outputs = results[runner_name][iteration]
# `outputs` is a `Dict[str, np.ndarray]`
Note: Technically, this is a ``List[Tuple[str, List[IterationResult]]]``, but includes
helpers that make it behave like an OrderedDict that can contain duplicates.
"""
def items(self):
"""
Creates a generator that yields ``Tuple[str, List[IterationResult]]`` - runner names
and corresponding outputs.
"""
for name, iteration_results in self.lst:
yield name, iteration_results
def keys(self):
"""
Creates a generator that yields runner names (str).
"""
for name, _ in self.lst:
yield name
def values(self):
"""
Creates a generator that yields runner outputs (List[IterationResult]).
"""
for _, iteration_results in self.lst:
yield iteration_results
def update(self, other):
"""
Updates the results stored in this instance.
Args:
other (Union[Dict[str, List[IterationResult]], RunResults]):
A dictionary or RunResults instance from which to update this one.
"""
for name, iteration_results in other.items():
self.lst[name] = iteration_results
return self
def add(self, out_list, runtime=None, runner_name=None):
"""
A helper to create a ``List[IterationResult]`` and map it to the specified runner_name.
This method cannot be used to modify an existing entry.
Calling this method is equivalent to:
::
results[runner_name] = []
for out in out_list:
results[runner_name].append(IterationResult(out, runtime, runner_name))
Args:
out_list (List[Dict[str, np.array]]):
One or more set of outputs where each output is a dictionary
of output names mapped to NumPy arrays.
runtime (float):
The time required for this iteration, in seconds.
Only used for logging purposes.
runner_name (str):
The name of the runner that produced this output.
If this is omitted, a default name is generated.
"""
runner_name = util.default(runner_name, "custom_runner")
iter_results = [IterationResult(out, runtime, runner_name) for out in out_list]
self[runner_name] = iter_results
def __getitem__(self, key):
if isinstance(key, int):
return self.lst[key]
for name, iteration_results in self.lst:
if name == key:
return iteration_results
G_LOGGER.critical(
"{:35} does not exist in this RunResults instance. Note: Available runners: {:}".format(
key, list(self.keys())
)
)
def __setitem__(self, key, value):
if isinstance(key, int):
self.lst[key] = value
return
for index, name in enumerate(self.keys()):
if name == key:
self.lst[index] = (key, value)
break
else:
self.append((key, value))
def __contains__(self, val):
if isinstance(val, str) or isinstance(val, bytes):
return val in list(self.keys())
return val in self.lst
def __eq__(self, other):
for (r0, its0), (r1, its1) in zip(self.lst, other.lst):
if r0 != r1:
return False
if its0 != its1:
return False
return True
@Encoder.register(RunResults)
def encode(results):
return {"lst": results.lst}
@Decoder.register(RunResults)
def decode(dct):
return RunResults(list(map(tuple, dct["lst"])))
@mod.export()
class AccuracyResult(TypedDict(lambda: tuple, lambda: list)):
"""
An ordered dictionary including details about the result of ``Comparator.compare_accuracy``.
More specifically, it is an ``OrderedDict[Tuple[str, str], List[OrderedDict[str, bool]]]`` which maps a runner
pair (a tuple containing both runner names) to a list of dictionaries of booleans (or anything that can be
converted into a boolean, such as an ``OutputCompareResult``), indicating whether there was a match in the outputs of
the corresponding iteration. The ``List[OrderedDict[str, bool]]`` is constructed from the dictionaries returned
by ``compare_func`` in ``compare_accuracy``.
For example, to see if there's a match between ``runner0`` and
``runner1`` during the 1st iteration for an output called ``output0``:
::
runner_pair = ("runner0", "runner1")
iteration = 0
output_name = "output0"
match = bool(accuracy_result[runner_pair][iteration][output_name])
If there's a mismatch, you can inspect the outputs from
the results of ``Comparator.run()``, assumed here to be called ``run_results``:
::
runner0_output = run_results["runner0"][iteration][output_name]
runner1_output = run_results["runner1"][iteration][output_name]
"""
def __bool__(self):
"""
Whether all outputs matched for every iteration.
You can use this function to avoid manually checking each output. For example:
::
if accuracy_result:
print("All matched!")
Returns:
bool
"""
return all([bool(match) for outs in self.values() for out in outs for match in out.values()])
def _get_runner_pair(self, runner_pair):
return util.default(runner_pair, list(self.keys())[0])
def percentage(self, runner_pair=None):
"""
Returns the percentage of iterations that matched for the given pair of runners,
expressed as a decimal between 0.0 and 1.0.
Always returns 1.0 when the number of iterations is 0, or when there are no runner comparisons.
Args:
runner_pair (Tuple[str, str]):
A pair of runner names describing which runners to check.
Defaults to the first pair in the dictionary.
"""
if not list(self.keys()):
return 1.0 # No data in this result.
matched, _, total = self.stats(runner_pair)
if not total:
return 1.0 # No iterations
return float(matched) / float(total)
def stats(self, runner_pair=None):
"""
Returns the number of iterations that matched, mismatched, and the total number of iterations.
Args:
runner_pair (Tuple[str, str]):
A pair of runner names describing which runners to check.
Defaults to the first pair in the dictionary.
Returns:
Tuple[int, int, int]: Number of iterations that matched, mismatched, and total respectively.
"""
runner_pair = self._get_runner_pair(runner_pair)
outs = self[runner_pair]
matched = sum([all([match for match in out.values()]) for out in outs])
total = len(outs)
return matched, total - matched, total
| 33.979899
| 121
| 0.622375
|
4cd4eec79ee21ee313cbc8002f6576c87147d7ca
| 6,136
|
py
|
Python
|
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslpolicy_csvserver_binding.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslpolicy_csvserver_binding.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslpolicy_csvserver_binding.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslpolicy_csvserver_binding(base_resource) :
""" Binding class showing the csvserver that can be bound to sslpolicy.
"""
def __init__(self) :
self._boundto = None
self._priority = None
self._activepolicy = None
self._gotopriorityexpression = None
self._labeltype = None
self._labelname = None
self._name = None
self.___count = None
@property
def name(self) :
r"""Name of the SSL policy for which to display detailed information.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the SSL policy for which to display detailed information.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
r"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
r"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
r"""Type of policy label invocation.<br/>Possible values = vserver, service, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
@property
def labelname(self) :
r"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslpolicy_csvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslpolicy_csvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch sslpolicy_csvserver_binding resources.
"""
try :
if not name :
obj = sslpolicy_csvserver_binding()
response = obj.get_resources(service, option_)
else :
obj = sslpolicy_csvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of sslpolicy_csvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslpolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count sslpolicy_csvserver_binding resources configued on NetScaler.
"""
try :
obj = sslpolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of sslpolicy_csvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslpolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
vserver = "vserver"
service = "service"
policylabel = "policylabel"
class sslpolicy_csvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.sslpolicy_csvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslpolicy_csvserver_binding = [sslpolicy_csvserver_binding() for _ in range(length)]
| 27.63964
| 130
| 0.718546
|
23ed45e8337c8f85c89f513117e00ebd19372d5c
| 279
|
py
|
Python
|
appyter/profiles/default/fields/__init__.py
|
MaayanLab/jupyter-template
|
dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5
|
[
"Apache-2.0"
] | null | null | null |
appyter/profiles/default/fields/__init__.py
|
MaayanLab/jupyter-template
|
dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5
|
[
"Apache-2.0"
] | 24
|
2020-04-07T17:04:47.000Z
|
2020-05-27T00:51:25.000Z
|
appyter/profiles/default/fields/__init__.py
|
MaayanLab/jupyter-template
|
dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5
|
[
"Apache-2.0"
] | null | null | null |
''' ```eval_rst
Filters represent classes derived from :class:`appyters.fields.Field`
implementing more specific field types and are rendered in
:mod:`appyters.profiles.default.templates`. The various other profiles
can be used to extend or override those definitions.
``` '''
| 46.5
| 71
| 0.777778
|
8034d6a3b540bf81e1aae129f54d2687d3b5d773
| 985
|
py
|
Python
|
flask_app/schemas.py
|
ahtesham11/flask-sentimental-deploy
|
a4ebb1214e4cb8aa85f99960739ebf1e49a74adc
|
[
"MIT"
] | 1
|
2021-01-03T11:30:35.000Z
|
2021-01-03T11:30:35.000Z
|
flask_app/schemas.py
|
ahtesham11/flask-sentimental-deploy
|
a4ebb1214e4cb8aa85f99960739ebf1e49a74adc
|
[
"MIT"
] | 3
|
2021-03-31T19:44:40.000Z
|
2021-09-08T01:46:44.000Z
|
flask_app/schemas.py
|
nbroad1881/sentimentr
|
cb4f57ce48a43a104d2e56f792a04e667b5e7c88
|
[
"MIT"
] | null | null | null |
from flask_app.db_models import DBArticle, DBScore, Weekly, Tabulator
from flask_app import ma
from marshmallow import fields
class ScoreSchema(ma.SQLAlchemySchema):
class Meta:
model = DBScore
bert = fields.Float()
lstm = fields.Float()
vader = fields.Float()
textblob = fields.Float()
class WeeklySchema(ma.SQLAlchemySchema):
class Meta:
model = Weekly
index = ma.auto_field()
candidate = ma.auto_field()
news_co = ma.auto_field()
datetime = ma.auto_field()
bert = ma.auto_field()
lstm = ma.auto_field()
vader = ma.auto_field()
textblob = ma.auto_field()
class TabulatorSchema(ma.SQLAlchemySchema):
class Meta:
model = Tabulator
title = ma.auto_field()
url = ma.auto_field()
candidate = ma.auto_field()
news_co = ma.auto_field()
datetime = ma.auto_field()
bert = ma.auto_field()
lstm = ma.auto_field()
vader = ma.auto_field()
textblob = ma.auto_field()
| 22.386364
| 69
| 0.659898
|
021da90bf48b40b7dfea764f112310c133801988
| 341
|
py
|
Python
|
Libraries/AnimatSimPy/Scripts/ErrorTesting2.py
|
NeuroRoboticTech/AnimatLabPublicSource
|
c5b23f8898513582afb7891eb994a7bd40a89f08
|
[
"BSD-3-Clause"
] | 8
|
2015-01-09T21:59:50.000Z
|
2021-04-14T14:08:47.000Z
|
Libraries/AnimatSimPy/Scripts/ErrorTesting2.py
|
NeuroRoboticTech/AnimatLabPublicSource
|
c5b23f8898513582afb7891eb994a7bd40a89f08
|
[
"BSD-3-Clause"
] | null | null | null |
Libraries/AnimatSimPy/Scripts/ErrorTesting2.py
|
NeuroRoboticTech/AnimatLabPublicSource
|
c5b23f8898513582afb7891eb994a7bd40a89f08
|
[
"BSD-3-Clause"
] | 2
|
2018-12-21T02:58:30.000Z
|
2020-08-12T11:44:39.000Z
|
import sys
import traceback
def my_excepthook(type, value, tb):
print type.__name__
print value
print "".join(traceback.format_exception(type, value, tb))
sys.excepthook = my_excepthook # see http://docs.python.org/library/sys.html#sys.excepthook
# some code to generate a naturalistic exception
a = "text"
b = 5
error = a + b
| 24.357143
| 91
| 0.733138
|
36d59991f833e606843060cc8e6f945ccb30801c
| 6,126
|
py
|
Python
|
python/todoapp/rptodo/cli.py
|
imjoseangel/sandbox
|
bc4ff74981faf91eb1a1f777d01fcfd13d6f5147
|
[
"MIT"
] | null | null | null |
python/todoapp/rptodo/cli.py
|
imjoseangel/sandbox
|
bc4ff74981faf91eb1a1f777d01fcfd13d6f5147
|
[
"MIT"
] | null | null | null |
python/todoapp/rptodo/cli.py
|
imjoseangel/sandbox
|
bc4ff74981faf91eb1a1f777d01fcfd13d6f5147
|
[
"MIT"
] | null | null | null |
"""This module provides the RP To-Do CLI."""
# rptodo/cli.py
# -*- coding: utf-8 -*-
from pathlib import Path
from typing import List, Optional
import typer
from rptodo import (
ERRORS, __app_name__, __version__, config, database, rptodo
)
app = typer.Typer()
@app.command()
def init(
db_path: str = typer.Option(
str(database.DEFAULT_DB_FILE_PATH),
"--db-path",
"-db",
prompt="to-do database location?",
),
) -> None:
"""Initialize the to-do database."""
app_init_error = config.init_app(db_path)
if app_init_error:
typer.secho(
f'Creating config file failed with "{ERRORS[app_init_error]}"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
db_init_error = database.init_database(Path(db_path))
if db_init_error:
typer.secho(
f'Creating database failed with "{ERRORS[db_init_error]}"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
else:
typer.secho(f"The to-do database is {db_path}", fg=typer.colors.GREEN)
def get_todoer() -> rptodo.Todoer:
if config.CONFIG_FILE_PATH.exists():
db_path = database.get_database_path(config.CONFIG_FILE_PATH)
else:
typer.secho(
'Config file not found. Please, run "rptodo init"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
if db_path.exists():
return rptodo.Todoer(db_path)
else:
typer.secho(
'Database not found. Please, run "rptodo init"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
@app.command()
def add(
description: List[str] = typer.Argument(...),
priority: int = typer.Option(2, "--priority", "-p", min=1, max=3),
) -> None:
"""Add a new to-do with a DESCRIPTION."""
todoer = get_todoer()
todo, error = todoer.add(description, priority)
if error:
typer.secho(
f'Adding to-do failed with "{ERRORS[error]}"', fg=typer.colors.RED
)
raise typer.Exit(1)
else:
typer.secho(
f"""to-do: "{todo['Description']}" was added """
f"""with priority: {priority}""",
fg=typer.colors.GREEN,
)
@app.command(name="list")
def list_all() -> None:
"""List all to-dos."""
todoer = get_todoer()
todo_list = todoer.get_todo_list()
if len(todo_list) == 0:
typer.secho(
"There are no tasks in the to-do list yet", fg=typer.colors.RED
)
raise typer.Exit()
typer.secho("\nto-do list:\n", fg=typer.colors.BLUE, bold=True)
columns = (
"ID. ",
"| Priority ",
"| Done ",
"| Description ",
)
headers = "".join(columns)
typer.secho(headers, fg=typer.colors.BLUE, bold=True)
typer.secho("-" * len(headers), fg=typer.colors.BLUE)
for id, todo in enumerate(todo_list, 1):
desc, priority, done = todo.values()
typer.secho(
f"{id}{(len(columns[0]) - len(str(id))) * ' '}"
f"| ({priority}){(len(columns[1]) - len(str(priority)) - 4) * ' '}"
f"| {done}{(len(columns[2]) - len(str(done)) - 2) * ' '}"
f"| {desc}",
fg=typer.colors.BLUE,
)
typer.secho("-" * len(headers) + "\n", fg=typer.colors.BLUE)
@app.command(name="complete")
def set_done(todo_id: int = typer.Argument(...)) -> None:
"""Complete a to-do by setting it as done using its TODO_ID."""
todoer = get_todoer()
todo, error = todoer.set_done(todo_id)
if error:
typer.secho(
f'Completing to-do # "{todo_id}" failed with "{ERRORS[error]}"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
else:
typer.secho(
f"""to-do # {todo_id} "{todo['Description']}" completed!""",
fg=typer.colors.GREEN,
)
@app.command()
def remove(
todo_id: int = typer.Argument(...),
force: bool = typer.Option(
False,
"--force",
"-f",
help="Force deletion without confirmation.",
),
) -> None:
"""Remove a to-do using its TODO_ID."""
todoer = get_todoer()
def _remove():
todo, error = todoer.remove(todo_id)
if error:
typer.secho(
f'Removing to-do # {todo_id} failed with "{ERRORS[error]}"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
else:
typer.secho(
f"""to-do # {todo_id}: '{todo["Description"]}' was removed""",
fg=typer.colors.GREEN,
)
if force:
_remove()
else:
todo_list = todoer.get_todo_list()
try:
todo = todo_list[todo_id - 1]
except IndexError:
typer.secho("Invalid TODO_ID", fg=typer.colors.RED)
raise typer.Exit(1)
delete = typer.confirm(
f"Delete to-do # {todo_id}: {todo['Description']}?"
)
if delete:
_remove()
else:
typer.echo("Operation canceled")
@app.command(name="clear")
def remove_all(
force: bool = typer.Option(
...,
prompt="Delete all to-dos?",
help="Force deletion without confirmation.",
),
) -> None:
"""Remove all to-dos."""
todoer = get_todoer()
if force:
error = todoer.remove_all().error
if error:
typer.secho(
f'Removing to-dos failed with "{ERRORS[error]}"',
fg=typer.colors.RED,
)
raise typer.Exit(1)
else:
typer.secho("All to-dos were removed", fg=typer.colors.GREEN)
else:
typer.echo("Operation canceled")
def _version_callback(value: bool) -> None:
if value:
typer.echo(f"{__app_name__} v{__version__}")
raise typer.Exit()
@app.callback()
def main(
version: Optional[bool] = typer.Option(
None,
"--version",
"-v",
help="Show the application's version and exit.",
callback=_version_callback,
is_eager=True,
)
) -> None:
return
| 27.594595
| 79
| 0.544727
|
ea71a3408fa25c39c74fab894f08d7cb1119d3cd
| 1,924
|
py
|
Python
|
authors/apps/authentication/backends.py
|
dev-jey/ah-the-phoenix
|
985d216210c0c81ec06e223c6952b0c69fabdcfa
|
[
"BSD-3-Clause"
] | 1
|
2019-04-04T23:49:42.000Z
|
2019-04-04T23:49:42.000Z
|
authors/apps/authentication/backends.py
|
dev-jey/ah-the-phoenix
|
985d216210c0c81ec06e223c6952b0c69fabdcfa
|
[
"BSD-3-Clause"
] | 21
|
2019-01-29T17:41:36.000Z
|
2022-03-11T23:43:20.000Z
|
authors/apps/authentication/backends.py
|
dev-jey/ah-the-phoenix
|
985d216210c0c81ec06e223c6952b0c69fabdcfa
|
[
"BSD-3-Clause"
] | 1
|
2019-11-23T18:27:55.000Z
|
2019-11-23T18:27:55.000Z
|
import jwt
from django.conf import settings
from rest_framework import authentication, exceptions
from .models import User
class JWTAuthentication(authentication.BaseAuthentication):
"""Token authentication using JWT."""
authentication_header_prefix = 'token'
def authenticate(self, request):
"""Checks authorization on every request."""
request.user = None
auth_header = authentication.get_authorization_header(request).split()
if not auth_header:
return None
if len(auth_header) == 1:
message = "Invalid token header. No credentials provided."
raise exceptions.AuthenticationFailed(message)
if len(auth_header) > 2:
message = "Invalid token header. "\
"Token should not contain whitespaces."
raise exceptions.AuthenticationFailed(message)
prefix = auth_header[0].decode('utf-8')
token = auth_header[1].decode('utf-8')
if prefix.lower() != self.authentication_header_prefix:
message = "Invalid token header. Token header should" \
" include the word `token` followed by a whitespace"
raise exceptions.AuthenticationFailed(message)
return self.authenticate_credentials(request, token)
def authenticate_credentials(self, request, token):
"""Authenticate the provided credentials."""
try:
payload = jwt.decode(token, settings.SECRET_KEY)
except exceptions.AuthenticationFailed:
message = "Could not decode token"
raise exceptions.AuthenticationFailed(message)
try:
user = User.objects.get(username=payload['username'])
except exceptions.AuthenticationFailed:
message = "No user matching this token was found"
raise exceptions.AuthenticationFailed(message)
return (user, token)
| 33.172414
| 78
| 0.659563
|
307a40c8083bc57bc5b8de098510032da7b286a8
| 159
|
py
|
Python
|
tests/dummy_django_project/urls.py
|
grejppi/django-wsgi
|
7880d800718d608f7d14f8d2c838f146ed1bc59d
|
[
"BSD-3-Clause"
] | 7
|
2015-12-04T14:15:02.000Z
|
2020-08-04T05:18:40.000Z
|
tests/dummy_django_project/urls.py
|
grejppi/django-wsgi
|
7880d800718d608f7d14f8d2c838f146ed1bc59d
|
[
"BSD-3-Clause"
] | 8
|
2015-12-01T11:16:32.000Z
|
2021-01-29T16:30:59.000Z
|
tests/dummy_django_project/urls.py
|
grejppi/django-wsgi
|
7880d800718d608f7d14f8d2c838f146ed1bc59d
|
[
"BSD-3-Clause"
] | 4
|
2018-04-18T17:00:31.000Z
|
2021-01-29T16:30:20.000Z
|
from django.conf.urls import patterns
from django.http import HttpResponse
urlpatterns = patterns(
'',
('^$', lambda request: HttpResponse()),
)
| 17.666667
| 43
| 0.679245
|
bccab0b4d5b5b439da4d8bfe755097b4a88a5569
| 847
|
py
|
Python
|
lib3to2/fixes/fix_division.py
|
hajs/lib3to2_fork
|
4a2c734398493c5ff72857f3b849895aecdfc3f7
|
[
"Apache-2.0"
] | 3
|
2021-03-29T19:21:08.000Z
|
2021-12-31T09:30:11.000Z
|
VisionAPI/lib/python3.8/site-packages/lib3to2/fixes/fix_division.py
|
aniruddhakj/AnswerScriptEvaluation
|
7b039b84355ecda1d55dc037ccfc4a4d661ad5e3
|
[
"BSD-3-Clause"
] | 1
|
2019-05-07T11:15:34.000Z
|
2019-05-07T11:15:34.000Z
|
env/lib/python2.7/site-packages/lib3to2/fixes/fix_division.py
|
Eric-Muthemba/qontroverse
|
1f12d0e3bbdee628a88bac77dc53426ded220755
|
[
"MIT"
] | 2
|
2019-01-22T01:05:22.000Z
|
2019-09-27T12:32:22.000Z
|
"""
Fixer for division: from __future__ import division if needed
"""
from lib2to3 import fixer_base
from lib3to2.fixer_util import token, future_import
def match_division(node):
"""
__future__.division redefines the meaning of a single slash for division,
so we match that and only that.
"""
slash = token.SLASH
return node.type == slash and not node.next_sibling.type == slash and \
not node.prev_sibling.type == slash
class FixDivision(fixer_base.BaseFix):
def match(self, node):
"""
Since the tree needs to be fixed once and only once if and only if it
matches, then we can start discarding matches after we make the first.
"""
return match_division(node)
def transform(self, node, results):
future_import("division", node)
| 30.25
| 78
| 0.664699
|
d7585fba08c2c20e30436a235bcee7a90a834b42
| 210
|
py
|
Python
|
wiki/wiki/doctype/wiki_sidebar/test_wiki_sidebar.py
|
fproldan/wiki
|
f384223ab2e3551339894c2b727e0e6e68953405
|
[
"MIT"
] | 60
|
2021-05-23T08:33:20.000Z
|
2022-03-28T11:54:58.000Z
|
wiki/wiki/doctype/wiki_sidebar/test_wiki_sidebar.py
|
fproldan/wiki
|
f384223ab2e3551339894c2b727e0e6e68953405
|
[
"MIT"
] | 49
|
2021-05-21T10:00:38.000Z
|
2022-03-31T08:22:23.000Z
|
wiki/wiki/doctype/wiki_sidebar/test_wiki_sidebar.py
|
fproldan/wiki
|
f384223ab2e3551339894c2b727e0e6e68953405
|
[
"MIT"
] | 46
|
2021-05-23T08:33:21.000Z
|
2022-03-31T15:36:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestWikiSidebar(unittest.TestCase):
pass
| 19.090909
| 45
| 0.761905
|
518251b0503a2f77f38c16d4c908fb7dcb1c98a5
| 20,708
|
py
|
Python
|
transformers/data/processors/glue.py
|
mandubian/transformers
|
0cb163865a4c761c226b151283309eedb2b1ca4d
|
[
"Apache-2.0"
] | 3,255
|
2016-08-18T17:53:27.000Z
|
2022-03-29T19:53:43.000Z
|
transformers/data/processors/glue.py
|
mandubian/transformers
|
0cb163865a4c761c226b151283309eedb2b1ca4d
|
[
"Apache-2.0"
] | 141
|
2017-07-17T09:14:37.000Z
|
2022-03-14T00:00:19.000Z
|
transformers/data/processors/glue.py
|
mandubian/transformers
|
0cb163865a4c761c226b151283309eedb2b1ca4d
|
[
"Apache-2.0"
] | 2,580
|
2017-05-14T14:33:41.000Z
|
2022-03-31T15:04:14.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE processors and helpers """
import logging
import os
from .utils import DataProcessor, InputExample, InputFeatures
from ...file_utils import is_tf_available
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
def glue_convert_examples_to_features(examples, tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index))
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
inputs = tokenizer.encode_plus(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length,
)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label))
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield ({'input_ids': ex.input_ids,
'attention_mask': ex.attention_mask,
'token_type_ids': ex.token_type_ids},
ex.label)
return tf.data.Dataset.from_generator(gen,
({'input_ids': tf.int32,
'attention_mask': tf.int32,
'token_type_ids': tf.int32},
tf.int64),
({'input_ids': tf.TensorShape([None]),
'attention_mask': tf.TensorShape([None]),
'token_type_ids': tf.TensorShape([None])},
tf.TensorShape([])))
return features
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['premise'].numpy().decode('utf-8'),
tensor_dict['hypothesis'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence'].numpy().decode('utf-8'),
None,
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence'].numpy().decode('utf-8'),
None,
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['question1'].numpy().decode('utf-8'),
tensor_dict['question2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['question'].numpy().decode('utf-8'),
tensor_dict['sentence'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
glue_tasks_num_labels = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
| 37.446655
| 130
| 0.571953
|
c3bf06adf56465c5650d1f09e7e685c2c81a42c4
| 1,221
|
py
|
Python
|
tools/create_xml_annotations.py
|
krohitm/Faster-RCNN_TF-1
|
fb5edac39a85b8c2b805319a739bf2375847abb2
|
[
"MIT"
] | null | null | null |
tools/create_xml_annotations.py
|
krohitm/Faster-RCNN_TF-1
|
fb5edac39a85b8c2b805319a739bf2375847abb2
|
[
"MIT"
] | null | null | null |
tools/create_xml_annotations.py
|
krohitm/Faster-RCNN_TF-1
|
fb5edac39a85b8c2b805319a739bf2375847abb2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 16 14:48:19 2017
@author: krohitm
"""
import numpy as np
from xml.etree import ElementTree as ET
from xml import etree
if __name__=='__main__':
tree = ET.parse('000007.xml')
root = tree.getroot()
bbox = np.array(np.loadtxt(
'/data0/krohitm/object_boundaries/person_extrapol.csv', dtype=str,
delimiter=',', skiprows=1, ))
data_dict = {}
data_dict['flickrid'] = 'krm'
data_dict['name'] = 'krohitm'
data_dict['pose'] = 'unknown'
data_dict['width'] = '1280'
data_dict['height'] = '720'
data_dict['depth'] = '3'
num_imgs = len(bbox[:,0])
for i in range(num_imgs):
flag = 0
data_dict['filename'] = '{0}.jpg'.format(str(i+1).zfill(7))
data_dict['xmin'] = bbox[i,1]
data_dict['ymin'] = bbox[i,2]
data_dict['xmax'] = bbox[i,3]
data_dict['ymax'] = bbox[i,4]
for node in root.iter():
if node.tag in data_dict:
node.text = data_dict[node.tag]
root[6][0].text = 'person'
tree.write('/home/krohitm/code/Faster-RCNN_TF/data/VOCdevkit2007/VOC2007/Annotations/{0}.xml'.format(
str(i+1).zfill(7)))
| 28.395349
| 109
| 0.591319
|
a54f7fa58c4f9e59a1f859803fe2780ad4bf886d
| 2,869
|
py
|
Python
|
unit-tests/test_subfolder.py
|
Keeper-Security/commander
|
93fee5d2ba56f2288e00ab33003597d00a302b5c
|
[
"MIT"
] | 151
|
2015-11-02T02:04:46.000Z
|
2022-01-20T00:07:01.000Z
|
unit-tests/test_subfolder.py
|
Keeper-Security/commander
|
93fee5d2ba56f2288e00ab33003597d00a302b5c
|
[
"MIT"
] | 145
|
2015-12-31T00:11:35.000Z
|
2022-03-31T19:13:54.000Z
|
unit-tests/test_subfolder.py
|
Keeper-Security/commander
|
93fee5d2ba56f2288e00ab33003597d00a302b5c
|
[
"MIT"
] | 73
|
2015-10-30T00:53:10.000Z
|
2022-03-30T03:50:53.000Z
|
#!/usr/bin/env python3
"""Test keepercommander.subfolder."""
from unittest.mock import Mock
import pytest
import keepercommander.subfolder as subfolder
def BFN(*, type, uid, parent_uid, name, subfolders):
"""Build a mock BaseFolderNode."""
result = Mock(name=name)
result.type = type
result.uid = uid
result.parent_uid = parent_uid
result.name = name
# A list of UID's, not folders.
assert all(isinstance(subfolder, str) for subfolder in subfolders)
result.subfolders = subfolders
return result
def folder_cache():
"""Build a two-node folder_cache. Return it and the root uid."""
cd_tests_uid = 'b' * 22
cd_tests_bfn = BFN(type='user_folder', uid=cd_tests_uid, parent_uid=None, name='cd-tests', subfolders=[])
root_uid = 'a' * 22
root_bfn = BFN(type='/', uid=None, parent_uid=None, name='root', subfolders=[cd_tests_uid])
dict_ = {
root_uid: root_bfn,
cd_tests_uid: cd_tests_bfn,
}
return dict_, root_bfn, cd_tests_bfn
def create_fake_params():
"""Create a fake params instance for testing."""
params = Mock()
(params.folder_cache, root_bfn, cd_tests_bfn) = folder_cache()
params.current_folder = ''
params.root_folder = root_bfn
return params, root_bfn, cd_tests_bfn
(global_params, global_root_bfn, global_cd_tests_bfn) = create_fake_params()
global_test_params = (
('a', global_root_bfn, 'a'),
('/a', global_root_bfn, 'a'),
('/a/b', global_root_bfn, 'a/b'),
('/cd-tests/a', global_cd_tests_bfn, 'a'),
('/cd-tests', global_cd_tests_bfn, ''),
('a//b', global_root_bfn, 'a//b'),
('//a', global_root_bfn, '//a'),
('//a//b', global_root_bfn, '//a//b'),
('/cd-tests/a//b//c', global_cd_tests_bfn, 'a//b//c'),
('/cd-tests/..', global_root_bfn, ''),
('/cd-tests/.', global_cd_tests_bfn, ''),
('/..', global_root_bfn, ''),
('/.', global_root_bfn, ''),
('/./cd-tests', global_cd_tests_bfn, ''),
('/./cd-tests/nonexistent', global_cd_tests_bfn, 'nonexistent'),
('/./cd-tests/./nonexistent', global_cd_tests_bfn, 'nonexistent'),
# The next three were complicating tab completion, so they no longer work.
# ('/./cd-tests/ ', global_cd_tests_bfn, ''),
# ('/ cd-tests', global_cd_tests_bfn, ''),
# ('/ cd-tests ', global_cd_tests_bfn, ''),
# This is a corner case we are willing to ignore
# ('/ /a', global_root_bfn, '//a'),
('/', global_root_bfn, ''),
('//', global_root_bfn, '//'),
)
@pytest.mark.parametrize('input_, expected_folder, expected_final', global_test_params)
def test_subfolder_try_resolve_path(input_, expected_folder, expected_final):
"""Test try_resolve_path."""
actual_folder, actual_final = subfolder.try_resolve_path(global_params, input_)
assert actual_folder is expected_folder
assert actual_final == expected_final
| 33.752941
| 109
| 0.655978
|
d810f87ca800fa805ffbaead4783dd581c546dcd
| 700
|
py
|
Python
|
common/src/stack/command/stack/commands/sync/dhcpd/__init__.py
|
khanfluence/stacki-cumulus-switch
|
df54afb20f6ea6a3a136b3c09b30df54ea79ffcc
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/sync/dhcpd/__init__.py
|
khanfluence/stacki-cumulus-switch
|
df54afb20f6ea6a3a136b3c09b30df54ea79ffcc
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/sync/dhcpd/__init__.py
|
khanfluence/stacki-cumulus-switch
|
df54afb20f6ea6a3a136b3c09b30df54ea79ffcc
|
[
"BSD-3-Clause"
] | null | null | null |
#
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
#
import stack.commands
import subprocess
class Command(stack.commands.sync.command):
"""
Rebuild the DHCPD configuration files on the frontend and restart the
DHCPD service
<example cmd='sync dhcpd'>
Rebuild the DHCPD configuration files on the frontend and restar
t the
DHCPD service
</example>
"""
def run(self, params, args):
self.notify('Sync DHCP\n')
self.report('report.dhcpd')
subprocess.call(['/sbin/service', 'dhcpd', 'restart'],
stdout=open('/dev/null'), stderr=open('/dev/null'))
| 20.588235
| 70
| 0.711429
|
5ee7aee767417c33cd57bd00abdbb8045a173bae
| 4,563
|
py
|
Python
|
scout/parse/variant/clnsig.py
|
eriksjolund/scout
|
b2723735916e0052d4ef092c9de6c2493b932018
|
[
"BSD-3-Clause"
] | null | null | null |
scout/parse/variant/clnsig.py
|
eriksjolund/scout
|
b2723735916e0052d4ef092c9de6c2493b932018
|
[
"BSD-3-Clause"
] | null | null | null |
scout/parse/variant/clnsig.py
|
eriksjolund/scout
|
b2723735916e0052d4ef092c9de6c2493b932018
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
LOG = logging.getLogger(__name__)
def parse_clnsig(variant, transcripts=None):
"""Get the clnsig information
The clinvar format has changed several times and this function will try to parse all of them.
The first format represented the clinical significance terms with numbers. This was then
replaced by strings and the separator changed. At this stage the possibility to connect review
stats to a certain significance term was taken away. So now we can only annotate each
significance term with all review stats.
Also the clinvar accession numbers are in some cases annotated with the info key CLNACC and
sometimes with CLNVID.
This function parses also Clinvar annotations performed by VEP (CSQ field, parsed transcripts required)
Args:
variant(cyvcf2.Variant)
transcripts(iterable(dict))
Returns:
clnsig_accsessions(list(dict)): A list with clnsig accessions
"""
transcripts = transcripts or []
acc = variant.INFO.get("CLNACC", variant.INFO.get("CLNVID", ""))
sig = variant.INFO.get("CLNSIG", "").lower()
revstat = variant.INFO.get("CLNREVSTAT", "").lower()
clnsig_accessions = []
if acc == "" and transcripts:
if transcripts[0].get("clnsig"):
clnsig = set()
for transcript in transcripts:
for annotation in transcript.get("clnsig", []):
clnsig.add(annotation)
for annotation in clnsig:
clnsig_accessions.append({"value": annotation})
return clnsig_accessions
# VEP 97+ annotated clinvar info:
if transcripts[0].get("clinvar_clnvid"):
acc = transcripts[0]["clinvar_clnvid"]
sig = transcripts[0].get("clinvar_clnsig")
revstat = transcripts[0].get("clinvar_revstat")
# There are some versions where clinvar uses integers to represent terms
if isinstance(acc, int) or acc.isdigit():
revstat_groups = []
if revstat:
revstat_groups = [rev.lstrip("_") for rev in revstat.replace("&", ",").split(",")]
sig_groups = []
for significance in sig.split(","):
for term in significance.lstrip("_").split("/"):
sig_groups.append("_".join(term.split(" ")))
for sig_term in sig_groups:
clnsig_accession = {
"value": sig_term,
"accession": int(acc),
"revstat": ",".join(revstat_groups),
}
clnsig_accessions.append(clnsig_accession)
# Test to parse the older format
if acc and not clnsig_accessions:
acc_groups = acc.split("|")
sig_groups = sig.split("|")
revstat_groups = revstat.split("|")
for acc_group, sig_group, revstat_group in zip(acc_groups, sig_groups, revstat_groups):
accessions = acc_group.split(",")
significances = sig_group.split(",")
revstats = revstat_group.split(",")
for accession, significance, revstat in zip(accessions, significances, revstats):
clnsig_accessions.append(
{
"value": int(significance),
"accession": accession,
"revstat": revstat,
}
)
return clnsig_accessions
def is_pathogenic(variant):
"""Check if a variant has the clinical significance to be loaded
We want to load all variants that are in any of the predefined categories regardless of rank
scores etc.
Args:
variant(cyvcf2.Variant)
Returns:
bool: If variant should be loaded based on clinvar or not
"""
load_categories = set(
[
"pathogenic",
"likely_pathogenic",
"conflicting_interpretations_of_pathogenicity",
"conflicting_interpretations",
]
)
# check if VEP-annotated field contains clinvar pathogenicity info
vep_info = variant.INFO.get("CSQ")
if vep_info:
for category in load_categories:
if category in vep_info.lower():
return True
# Otherwise check if clinvar pathogenicity status is in INFO field
clnsig_accessions = parse_clnsig(variant)
for annotation in clnsig_accessions:
clnsig = annotation["value"]
if clnsig in load_categories:
return True
if isinstance(clnsig, int):
if clnsig == 4 or clnsig == 5:
return True
return False
| 34.832061
| 107
| 0.61407
|
b819ae9a8acf0c2158c54fed2da4b7a9b01e9e9a
| 35
|
py
|
Python
|
docpub/api/yuque/__init__.py
|
altairwei/WizPublishTool
|
fe67523266ef10719af5f836c17821dc61ecc40a
|
[
"MIT"
] | null | null | null |
docpub/api/yuque/__init__.py
|
altairwei/WizPublishTool
|
fe67523266ef10719af5f836c17821dc61ecc40a
|
[
"MIT"
] | null | null | null |
docpub/api/yuque/__init__.py
|
altairwei/WizPublishTool
|
fe67523266ef10719af5f836c17821dc61ecc40a
|
[
"MIT"
] | null | null | null |
LARK_HOST = 'https://www.yuque.com'
| 35
| 35
| 0.714286
|
cd6237510fb3cc6657ed522cb1fe131854c75b9b
| 2,580
|
py
|
Python
|
hma_slave.py
|
Lord-of-the-Galaxy/heroku-multi-account
|
9f2d8f7455ed954cc25ed905a966a6326b4d2967
|
[
"MIT"
] | 1
|
2020-06-02T10:42:23.000Z
|
2020-06-02T10:42:23.000Z
|
hma_slave.py
|
Lord-of-the-Galaxy/heroku-multi-account
|
9f2d8f7455ed954cc25ed905a966a6326b4d2967
|
[
"MIT"
] | null | null | null |
hma_slave.py
|
Lord-of-the-Galaxy/heroku-multi-account
|
9f2d8f7455ed954cc25ed905a966a6326b4d2967
|
[
"MIT"
] | null | null | null |
import os, os.path
import psycopg2
from flask import Flask, send_file, request
import hma_conf
# You shouldn't need to modify anything here
app = Flask(__name__)
DB_URL = os.environ['DATABASE_URL']
HMA_KEY = os.environ['HMA_SHARED_KEY']
TABLES = hma_conf.PG_TABLES
conn = psycopg2.connect(DB_URL)
@app.route('/')
def index():
return f"Index, tables: {TABLES}"
@app.route('/pull_db/<tname>', methods=['GET', 'POST'])
def pull_db(tname):
if request.method == 'POST' and 'key' in request.form and request.form['key'] == HMA_KEY:
if tname in TABLES:
if os.path.isfile(f'{tname}.db'):
return send_file(f'{tname}.db', as_attachment=True)
else:
return "Prepare first", 409
else:
return "No such table", 404
elif request.method == 'POST':
if 'key' in request.form:
print("Incorrect key:", request.form['key'])
return "Incorrect Key!", 403
else:
return "Supply shared key!", 403
else:
return "Only POST!", 405
@app.route('/prepare_pull')
def prepare_pull():
cur = conn.cursor()
try:
for tname in TABLES:
with open(f'{tname}.db', 'w') as f:
print(f"Copying {tname}")
cur.copy_to(f, f'"{tname}"')
return "Success"
except IOError:
print("IO ERROR")
return "IO ERROR", 500
finally:
cur.close()
@app.route('/push_db/<tname>', methods=['GET', 'POST'])
def push_db(tname):
if request.method == 'POST' and 'key' in request.form and request.form['key'] == HMA_KEY:
if tname not in TABLES:
return "No such table", 404
if 'file' not in request.files:
return "Upload a DB file", 400
f = request.files['file']
if f.filename == '':
return "Upload non-empty file", 400
if f and f.filename == f'{tname}.db':
print(f"got new DB: {tname}")
cur = conn.cursor()
cur.execute(f'DELETE FROM {tname}')
cur.copy_from(f, f'"{tname}"')
conn.commit()
cur.close()
return "Success!"
else:
return "Use correct name", 400
elif request.method == 'POST':
if 'key' in request.form:
print("Incorrect key:", request.form['key'])
return "Incorrect Key!", 403
else:
return "Supply shared key!", 403
else:
return "Only POST!", 405
if __name__=='__main__':
app.run('0.0.0.0', port=8080, debug=True)
| 28.351648
| 93
| 0.555814
|
efe343ac07c05d238b64d12c94d873affa314689
| 449
|
py
|
Python
|
opencvpydemo/target_tracking/hist.py
|
SosonHuang/opencvPy
|
5bddb9e6d9e2e4aff4e5afbe720fbe9389e1d919
|
[
"MIT"
] | 1
|
2018-11-14T02:54:24.000Z
|
2018-11-14T02:54:24.000Z
|
opencvpydemo/target_tracking/hist.py
|
SosonHuang/opencvPy
|
5bddb9e6d9e2e4aff4e5afbe720fbe9389e1d919
|
[
"MIT"
] | null | null | null |
opencvpydemo/target_tracking/hist.py
|
SosonHuang/opencvPy
|
5bddb9e6d9e2e4aff4e5afbe720fbe9389e1d919
|
[
"MIT"
] | 1
|
2018-04-16T13:57:14.000Z
|
2018-04-16T13:57:14.000Z
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
camera = cv2.VideoCapture(0)
while True:
ret, img = camera.read()
color = ('b','g','r')
for i,col in enumerate(color):
histr = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
#cv2.imshow("frame", img)
# k = cv2.waitKey(30) & 0xff
# if k == 27:
# break
camera.release()
cv2.destroyAllWindows()
| 20.409091
| 54
| 0.621381
|
ef5288dd7d7c3e62adf6d0b574ed5353905779a5
| 412
|
py
|
Python
|
Section_10_Quotes_Scraping/pages/quotes_page.py
|
rodrigoarenas456/CompletePythonGuide
|
505ac20c33856a53f1ffe12809ad78161c78a325
|
[
"MIT"
] | null | null | null |
Section_10_Quotes_Scraping/pages/quotes_page.py
|
rodrigoarenas456/CompletePythonGuide
|
505ac20c33856a53f1ffe12809ad78161c78a325
|
[
"MIT"
] | null | null | null |
Section_10_Quotes_Scraping/pages/quotes_page.py
|
rodrigoarenas456/CompletePythonGuide
|
505ac20c33856a53f1ffe12809ad78161c78a325
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from Section_10_Quotes_Scraping.locators.quotes_page_locators import QuotesPageLocators
from Section_10_Quotes_Scraping.parsers.quote import QuoteParser
class QuotesPage:
def __init__(self, page):
self.soup = BeautifulSoup(page, 'html.parser')
@property
def quotes(self):
return [QuoteParser(e) for e in self.soup.select(QuotesPageLocators.QUOTE)]
| 25.75
| 87
| 0.771845
|
75989dae85ade9cd2552f33e5f2ec728be072874
| 6,214
|
py
|
Python
|
00_data_import.py
|
koenlab/template_eeg_pipeline
|
67bb8bdb52dad0ecb1559e4ec617b31b7b8cdf42
|
[
"BSD-3-Clause"
] | null | null | null |
00_data_import.py
|
koenlab/template_eeg_pipeline
|
67bb8bdb52dad0ecb1559e4ec617b31b7b8cdf42
|
[
"BSD-3-Clause"
] | null | null | null |
00_data_import.py
|
koenlab/template_eeg_pipeline
|
67bb8bdb52dad0ecb1559e4ec617b31b7b8cdf42
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Script: 00_data_import.py
Creator: Joshua D. Koen
Description: This script imports data from sourcedata to bids format.
"""
# Import Libraries
import os
os.chdir(os.path.split(__file__)[0])
import numpy as np
import pandas as pd
import shutil
import json
from random import (randrange, seed)
from mne.io import read_raw_brainvision
from mne import events_from_annotations
import mne
from mne_bids import (BIDSPath, write_raw_bids, mark_bad_channels)
from config import (bids_dir, source_dir, deriv_dir, task,
cols_to_keep, cols_to_add, cols_to_rename,
rename_events, unchanged_markers, bad_chans,
preprocess_opts)
from functions import get_sub_list
# Get subject list to process
sub_list = get_sub_list(source_dir, allow_all=True)
study_seed = int(input('Enter digits for study/project id: '))
# Get Subject List
for sub in sub_list:
# STEP 1: SUBJECT INFORMATION DEFINITION
# Define the Subject and Source Path
sub_id = sub.replace('sub-', '')
source_sub_dir = source_dir / sub
# Handle Bids Path and ID for EEG data
bids_id = sub[-3:]
bids_sub_dir = BIDSPath(subject=bids_id, task=task,
datatype='eeg', root=bids_dir)
# Derivative Paths
deriv_sub_dir = deriv_dir / f'sub-{bids_id}'
deriv_sub_dir.mkdir(parents=True, exist_ok=True)
# Print Info to Screen
print(f'Making BIDS data for sub-{bids_id} ({sub_id}) for task-{task}')
print(f' Source Path: {source_sub_dir}')
print(f' BIDS Path: {bids_sub_dir.directory}')
print(f' Derivative Path: {deriv_sub_dir}')
# Ask for info to specify subject_info
age = int(input('Enter age: '))
sex = int(input('Enter sex/gender (0=unknown, 1=male, 2=female): '))
hand = int(input('Enter handedness (1=right, 2=left, 3=ambidextrous): '))
# STEP 2: BIDS-FY EEG DATA
# Define the source data file
source_vhdr = source_sub_dir / f'{sub_id}_1back.vhdr'
# Read in raw bv from source and anonymize
raw = read_raw_brainvision(
source_vhdr, misc=[preprocess_opts['photosensor_chan']],
eog=['VEOG', 'HEOG'])
# Update line frequency to 60 Hz and indicate it is properly referenced
raw.info['line_freq'] = 60.0
# Anonymize
seed(a=study_seed)
raw.anonymize(
daysback=(365 * randrange(100, 110)) + (randrange(-120, 120)))
# Update subject_info
bdate = raw.info['meas_date'].date()
bdate = bdate.replace(year=bdate.year-age)
subject_info = {
'id': int(bids_id),
'his_id': f'sub-{bids_id}',
'birthday': (bdate.year, bdate.month, bdate.day),
'sex': sex,
'hand': hand,
'last_name': 'mne_anonymize',
'first_name': 'mne_anonymize',
'middle_name': 'mne_anonymize',
}
raw.info['subject_info'] = subject_info
# Extract Events and remove annotations
events, event_id = events_from_annotations(raw)
# Write BIDS Output
if bids_sub_dir.directory.is_dir():
shutil.rmtree(bids_sub_dir.directory)
bids_sub_dir = write_raw_bids(
raw, bids_path=bids_sub_dir,
overwrite=True, verbose=False)
# UPDATE CHANNELS.TSV
# Get bad channels and update
sub_bad_chans = bad_chans.get(bids_id)
if sub_bad_chans is not None:
print(f'{bids_id} has bad channels.')
mark_bad_channels(
sub_bad_chans['channels'], sub_bad_chans['reason'],
bids_path=bids_sub_dir)
# Load *channels.tsv file
bids_sub_dir.update(suffix='channels', extension='.tsv')
chans_data = pd.read_csv(bids_sub_dir.fpath, sep='\t')
# Add EEG Reference
chans_data['reference'] = 'FCz'
# Remove online reference from auxillary channels
for chan in ['VEOG', 'HEOG', 'Photosensor']:
chans_data.loc[chans_data['name'] == chan, ['reference']] = 'n/a'
# Overwrite file
chans_data.to_csv(bids_sub_dir.fpath, sep='\t', index=False)
# STEP 3: PROCESS BEHAVIORAL DATA FILE
# Read in the *beh.tsv behavioral file
beh_source_file = source_sub_dir / f'{sub_id}_1back_beh.tsv'
beh_data = pd.read_csv(beh_source_file, sep='\t')[cols_to_keep]
beh_data.rename(columns=cols_to_rename, inplace=True)
# Replace NaN and -99 with 'n/a' for resp and rt, respectively
beh_data['resp'].fillna('n/a', inplace=True)
beh_data['rt'].replace(-99.0, 'n/a', inplace=True)
# Replace subject id and select needed data columns
beh_data['id'] = bids_id
# Fil in some more values
beh_data.replace(['None', '', '--'], 'n/a', inplace=True)
# Save behavioral data
bids_sub_dir.update(datatype='beh')
bids_sub_dir.directory.mkdir(parents=True, exist_ok=True)
beh_save_file = bids_sub_dir.directory / \
f'sub-{bids_id}_task-{task}_beh.tsv'
beh_data.to_csv(beh_save_file, sep='\t', index=False)
# STEP 4: UPDATE *_EVENTS.TSV WITH BEHAVIORAL DATA
# Load *events.tsv
bids_sub_dir.update(datatype='eeg', suffix='events')
events_data = pd.read_csv(bids_sub_dir.fpath, sep='\t')
# Add new columnas as "n/a" values
events_data[cols_to_add] = 'n/a'
# Update with values
counter = 0 # Keep track of current row in beh_data
for index, row in events_data.iterrows():
trial_type = rename_events[row['trial_type']]
events_data.at[index, 'trial_type'] = trial_type
if trial_type not in unchanged_markers:
this_trial = beh_data.iloc[counter]
for col in cols_to_add:
events_data.at[index, col] = this_trial[col]
counter += 1
# Overwrite *events.tsv
events_data.to_csv(bids_sub_dir.fpath, sep='\t', index=False)
# STEP 5: UPDATE *eeg_json
# Load JSON
bids_sub_dir.update(suffix='eeg', extension='json')
with open(bids_sub_dir.fpath, 'r') as file:
eeg_json = json.load(file)
# Update keys
eeg_json['EEGReference'] = 'FCz'
eeg_json['EEGGround'] = 'Fpz'
eeg_json['EEGPlacementScheme'] = \
[x for x in raw.ch_names if x not in
['VEOG', 'HEOG', 'Photosensor']]
# Save EEG JSON
with open(bids_sub_dir.fpath, 'w') as file:
json.dump(eeg_json, file)
| 33.229947
| 77
| 0.660766
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.