hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73f42c1536b7cbae9884bce03cfe3067637e0ad1 | 3,681 | py | Python | get_stock_data.py | jeremychonggg/Alpaca-Trading-Bot | 82df00e327e2e55f5a0cdf85cd950c49c59bf669 | [
"MIT"
] | null | null | null | get_stock_data.py | jeremychonggg/Alpaca-Trading-Bot | 82df00e327e2e55f5a0cdf85cd950c49c59bf669 | [
"MIT"
] | null | null | null | get_stock_data.py | jeremychonggg/Alpaca-Trading-Bot | 82df00e327e2e55f5a0cdf85cd950c49c59bf669 | [
"MIT"
] | null | null | null | import json
import requests
import pandas as pd
import websocket
# Get Alpaca API Credential
endpoint = "https://data.alpaca.markets/v2"
headers = json.loads(open("key.txt", 'r').read())
def hist_data(symbols, start="2021-01-01", timeframe="1Hour", limit=50, end=""):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
"""
df_data_tickers = {}
for symbol in symbols:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start, "limit" :limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token":'', "symbol":symbol}
while True:
r = requests.get(bar_url, headers = headers, params = params)
r = r.json()
if r["next_page_token"] == None:
data["bars"]+=r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"]+=r["bars"]
data["next_page_token"] = r["next_page_token"]
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/Indiana/Petersburg")
df_data_tickers[symbol] = df_data
return df_data_tickers
def get_historical_data(ticker_list, start_date, end_date=None, limit=10000, timeframe="1Day"):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
* timeframe - Timeframe for the aggregation. Available values are: `1Min`, `1Hour`, `1Day`
https://alpaca.markets/docs/api-documentation/api-v2/market-data/alpaca-data-api-v2/historical/#bars
"""
df_data_tickers = {}
for symbol in ticker_list:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start_date, "end": end_date, "limit": limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token": '', "symbol": symbol}
# r = requests.get(bar_url, headers=headers, params=params)
# r = r.json()
# data["bars"] += r["bars"]
while True:
r = requests.get(bar_url, headers=headers, params=params)
r = r.json()
try:
if r["next_page_token"] == None:
data["bars"] += r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"] += r["bars"]
data["next_page_token"] = r["next_page_token"]
except:
break
# Create a DataFrame for the data["bars"] of each stock
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
try:
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/New_York")
df_data_tickers[symbol] = df_data
except:
pass
print("---- Created for [{}]".format(symbol))
return df_data_tickers
| 39.159574 | 115 | 0.551481 |
73f83bfdf1bdf3cc0ae0369940411280ceef339a | 4,420 | py | Python | yaps/server/subscription.py | victorhook/vqtt | f79f9826ce91bf77a75047c22d7a729d539f83f9 | [
"MIT"
] | null | null | null | yaps/server/subscription.py | victorhook/vqtt | f79f9826ce91bf77a75047c22d7a729d539f83f9 | [
"MIT"
] | null | null | null | yaps/server/subscription.py | victorhook/vqtt | f79f9826ce91bf77a75047c22d7a729d539f83f9 | [
"MIT"
] | 1 | 2021-03-02T19:18:30.000Z | 2021-03-02T19:18:30.000Z | import asyncio
from yaps.api import protocol
from yaps.utils.log import Log
SLEEP_SLOT_TIME = 1 # In seconds.
def is_dead(self) -> bool:
return not self._alive
def die(self) -> None:
if not self._alive:
return
self._alive = False
Log.debug(f'Subscription died {self}')
def _set_identifier(self, topic: str) -> None:
""" Sets the identification of the subscription.
This consists of:
1. Topic
2. File descripter number from reader/writer stream.
"""
self.topic = topic
try:
self.fd = self._writer.get_extra_info('socket').fileno()
except AttributeError:
# Streams are incorrect
Log.err(f'Incorrect streams to subscription to {self.topic}')
self.fd = None
def __repr__(self):
return f'| ID:{self.fd} Topic: {self.topic} |'
| 31.126761 | 78 | 0.563801 |
73fb16f86099e7cc34882ec8e6eb8ce6cb617a74 | 487 | py | Python | processing/1_comset.py | acleclair/ICPC2020_GNN | a8b03de597e8f25c17503c3834c7956ecc8f2247 | [
"MIT"
] | 58 | 2020-04-09T20:29:34.000Z | 2022-03-28T11:38:40.000Z | processing/1_comset.py | acleclair/ICPC2020_GNN | a8b03de597e8f25c17503c3834c7956ecc8f2247 | [
"MIT"
] | 11 | 2020-04-11T14:19:01.000Z | 2021-11-27T07:38:41.000Z | processing/1_comset.py | acleclair/ICPC2020_GNN | a8b03de597e8f25c17503c3834c7956ecc8f2247 | [
"MIT"
] | 14 | 2020-06-15T14:32:03.000Z | 2022-01-23T10:33:15.000Z | import pickle
bad_fid = pickle.load(open('autogenfid.pkl', 'rb'))
comdata = 'com_pp.txt'
good_fid = []
outfile = './output/dataset.coms'
fo = open(outfile, 'w')
for line in open(comdata):
tmp = line.split(',')
fid = int(tmp[0].strip())
if bad_fid[fid]:
continue
com = tmp[1].strip()
com = com.split()
if len(com) > 13 or len(com) < 3:
continue
com = ' '.join(com)
fo.write('{}, <s> {} </s>\n'.format(fid, com))
fo.close()
| 20.291667 | 51 | 0.546201 |
73fb988d86cc41ea1f693ac556af57905fca2bc3 | 22,588 | py | Python | shibuya/cubesym.py | Parcly-Taxel/Shibuya | aa79b47d2a5fc859acb9645ebd635578fe2f145b | [
"MIT"
] | null | null | null | shibuya/cubesym.py | Parcly-Taxel/Shibuya | aa79b47d2a5fc859acb9645ebd635578fe2f145b | [
"MIT"
] | null | null | null | shibuya/cubesym.py | Parcly-Taxel/Shibuya | aa79b47d2a5fc859acb9645ebd635578fe2f145b | [
"MIT"
] | null | null | null | """
Cubic symmetric graphs. Most of the embeddings realised here were taken from MathWorld.
"""
from mpmath import *
from functools import reduce
from shibuya.generators import cu, star_radius, ring_edges, lcf_edges
from shibuya.generators import all_unit_distances, circumcentre
from shibuya.generators import fixparams_unitdist, symmetrise
# F4A = tetrahedron() or complete(4) (not unit-distance)
# F6A = circulant(6, (1, 3)) or mobiusladder(3) (not unit-distance)
# F8A = genpetersen("cube")
# F10A = genpetersen("petersen")
def heawood():
"""Return the symmetric unit-distance embedding of the Heawood graph (F14A)
tucked away in Mathematica's GraphData."""
P = [10485760, 78643200, 263192576, 543686656, 812777472, 942080000, 843317248, 552468480, 208879616, -31170560, -99213312, -76779520, -32795648, 7878144, 17269760, 16256512, 11392032, 4836080, 3014064, 361320, 69498, -165789]
v0 = findroot(lambda v: polyval(P, v), 0.275)
p0 = mpc(0.5, v0)
p1 = mpc(sqrt(1-(v0+0.5)**2)-0.5, -0.5)
p2 = cu(p0, -p0)
p3 = cu(p1, -p1)
p4 = cu(p2, p3)
vertices = [mpc(s*re(v), im(v)) for s in (1, -1) for v in (p0, -p0, p1, -p1, p2, p3, p4)]
return all_unit_distances(vertices)
# F16A = genpetersen("mobiuskantor")
def pappus():
"""Return a unit-distance embedding of the Pappus graph (F18A)."""
u6 = unitroots(6)
r0 = [u*0.5j for u in u6]
z1 = cu(r0[2], r0[0])
r1 = [z1*u for u in u6]
z2 = cu(0, z1)
r2 = [z2*u for u in u6]
vertices = r0 + r1 + r2
edges = ring_edges(6, ((0, 0, 3), (0, 1, 0), (0, 1, -2), (2, 2, 1), (1, 2, 0)))
return (vertices, edges)
# F20A = genpetersen("dodecahedron")
# F20B = genpetersen("desargues")
# F24A = genpetersen("nauru")
def f26a():
"""Return a unit-distance embedding of the F26A graph."""
t0 = findroot(lambda t: f26a_vertices(t)[1], 0.2)
return all_unit_distances(f26a_vertices(t0)[0])
def coxeter():
"""Return a unit-distance embedding of the Coxeter graph (F28A)."""
u7 = unitroots(7)
s1 = star_radius(7)
s2 = star_radius(7, 2)
s3 = star_radius(7, 3)
r0 = [-s2*u for u in u7]
r1 = [s3*u for u in u7]
z2 = cu(r0[0], r1[3])
r2 = [z2*u for u in u7]
z3 = cu(0, z2, s1, 1)
r3 = [z3*u for u in u7]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(7, ((0, 0, 2), (1, 1, 3), (3, 3, 1), (0, 2, 0), (1, 2, -3), (2, 3, 0)))
return (vertices, edges)
def dyck():
"""Return a unit-distance embedding of the Dyck graph (F32A)."""
r0 = unitroots(8)
r1 = [sqrt(2)*u for u in r0]
z2 = cu(r0[1], 0, 1, star_radius(8))
r2 = [z2*u for u in r0]
z3 = cu(0, r1[0], star_radius(8, 3), 1)
r3 = [z3*u for u in r0]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(8, ((0, 1, 1), (0, 1, -1), (0, 2, -1), (2, 2, 1), (1, 3, 0), (3, 3, 3)))
return (vertices, edges)
def dyck2():
"""Return a unit-distance embedding of the Dyck graph with D4 symmetry."""
t0 = findroot(lambda t: dyck2_vertices(t)[1], 0.1)
return all_unit_distances(dyck2_vertices(t0)[0])
def f38a():
"""Return a unit-distance embedding of the F38A graph."""
t0 = findroot(lambda t: f38a_vertices(t)[1], 0.29)
return all_unit_distances(f38a_vertices(t0)[0])
def f40a(x=0.75):
"""Return a unit-distance embedding of F40A (bipartite double cover of F20A).
x can be anything between (sqrt(5)-1)/2 and 1."""
u10 = unitroots(10)
z0 = star_radius(10)
r0 = [z0*u for u in u10]
z1 = cu(r0[1], 0, 1, x)
r1 = [z1*u for u in u10]
z2 = cu(r1[2], r1[-2])
r2 = [z2*u for u in u10]
z3 = cu(0, z2, z0, 1)
r3 = [z3*u for u in u10]
vertices = r0 + r1 + r2 + r3
return all_unit_distances(vertices)
def f42a(mode=0):
"""Return a unit-distance embedding of the F42A graph.
mode (0 or 1) selects between two algebraically related forms."""
x0 = (0.27, 1.36, 0.52) if mode == 0 else (1.24, 0.18, -0.53)
t0 = findroot(lambda *t: f42a_vertices(*t)[1], x0)
return all_unit_distances(f42a_vertices(*t0)[0])
# F48A = genpetersen("f48a") but the resulting embedding is vertex-edge-degenerate, so...
def f48a():
"""Return a non-degenerate unit-distance embedding of the F48A graph."""
R = (2 + 3*sqrt(2) + sqrt(12*sqrt(6)-26)) / 4
r = (2 + 3*sqrt(2) - sqrt(12*sqrt(6)-26)) / 4
L = R-1
l = r-1
u24 = unitroots(24)
ring_R = [u*R for u in u24[::2]]
ring_r = [u*r for u in u24[1::2]]
ring_L = [u*L for u in u24[::2]]
ring_l = [u*l for u in u24[1::2]]
vertices = ring_R + ring_r + ring_L + ring_l
edges = ring_edges(12, ((0, 1, 0), (0, 1, -1), (0, 2, 0), (1, 3, 0), (2, 3, 2), (2, 3, -3)))
return (vertices, edges)
def f50a():
"""Return a unit-distance embedding of the F50A graph, an embedding
found by the computer (specifically the embedding_run() function in embeddingsearch)."""
t0 = findroot(lambda t: f50a_vertices(t)[1], 2)
return (f50a_vertices(t0)[0], lcf_edges(50, [21, -21, -19, 19, -19, 19, -19, 19, 21, -21]))
def f54a(i=2):
"""Return one of three (depending on i in {0, 1, 2}) algebraically related
unit-distance embeddings of the F54A graph."""
px = [[3], [-10, -12], [13, 6, 34], [-17, -5, -14]] # x = a(1-c)
py = [[3], [2, -2, -10], [1, -6, 9], [-19, 41, -10]] # y = c(1-a)
pz = [[3], [5, -8, 2], [11, -14, -13], [-19, 41, -10]] # z = b(1-b)
x = polyroots([polyval(l, 2*cos(pi/9)) for l in px])[i]
y = polyroots([polyval(l, 2*cos(pi/9)) for l in py])[i]
sxy = sqrt((1+x-y)**2 - 4*x)
a = (1+x-y+sxy) / 2
c = (1-x+y+sxy) / 2
z = polyroots([polyval(l, 2*cos(pi/9)) for l in pz])[(1-i)%3]
b = (1 + (-1 if i else 1)*sqrt(1-4*z)) / 2
triple = [a, b, c]
line = [p-d for p in triple for d in (0, 1)]
return all_unit_distances(symmetrise(line, "C9"))
def f56a():
"""Return a unit-distance embedding of the F56A graph.
Note that MathWorld's LCF notation for this is incorrect;
it should be [11, 13, -13, -11]^14."""
t = tan(pi/14)
u = sqrt(polyval([-21, 98, 71], t*t))
z1 = 2*sqrt(14*polyval([31*u, -20, -154*u, 104, 87*u, -68], t))
z2 = 7*t*(t*t-3)**2 - 4*u
a = (z1 + z2) / 64
b = (z1 - z2) / 64
u14 = unitroots(14)
pa = mpc(a, 0.5)
pb = mpc(b, 0.5)
pac, pbc = conj(pa), conj(pb)
d1 = abs(pa - u14[-1]*pb)**2 - 1
d2 = abs(pb - u14[-2]*pa)**2 - 1
vertices = [u*p for u in u14 for p in (pa, pb, pac, pbc)]
return all_unit_distances(vertices)
def klein(a1=4.47, a2=2.42, a3=0.7, s1=1, s2=-1):
"""Return a unit-distance embedding of the cubic Klein graph (F56B)."""
u7 = unitroots(7)
z0 = star_radius(7)
r0 = [z0*u for u in u7]
z1 = z0 + expj(a1)
z2 = z1 + expj(a2)
z3 = z1 + expj(a3)
r1 = [z1*u for u in u7]
r2 = [z2*u for u in u7]
r3 = [z3*u for u in u7]
z4 = cu(*(r2[2], r3[0])[::s1])
z5 = cu(*(r2[0], r3[1])[::s2])
r4 = [z4*u for u in u7]
r5 = [z5*u for u in u7]
z6 = cu(0, r4[0], star_radius(7, 2), 1)
z7 = cu(0, r5[0], star_radius(7, 3), 1)
r6 = [z6*u for u in u7]
r7 = [z7*u for u in u7]
vertices = r0 + r1 + r2 + r3 + r4 + r5 + r6 + r7
edges = ring_edges(7, ((0, 0, 1), (0, 1, 0), (1, 2, 0), (1, 3, 0),
(2, 4, -2), (3, 4, 0), (2, 5, 0), (3, 5, -1),
(4, 6, 0), (5, 7, 0), (6, 6, 2), (7, 7, 3)))
return (vertices, edges)
def f56c():
"""Return a unit-distance embedding of the F56C graph,
the bipartite double cover of the Coxeter graph."""
u14 = unitroots(14)
z0 = star_radius(14, 5)
r0 = [z0*u for u in u14]
z1 = star_radius(14, 3)
r1 = [z1*u for u in u14]
z2 = cu(r1[4], r0[0])
r2 = [z2*u for u in u14]
z3 = cu(0, z2, star_radius(14), 1)
r3 = [z3*u for u in u14]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(14, ((0, 0, 5), (1, 1, 3), (2, 1, 4), (2, 0, 0), (2, 3, 0), (3, 3, 1)))
return (vertices, edges)
def f60a(t=-0.35):
"""Return a unit-distance embedding of the F60A graph."""
u15 = unitroots(15)
z0 = star_radius(15, 7)
r0 = [z0*u for u in u15]
z1 = z0 + expj(t)
r1 = [z1*u for u in u15]
z2 = cu(r1[3], r1[0])
r2 = [z2*u for u in u15]
z3 = cu(0, z2, star_radius(15, 2), 1)
r3 = [z3*u for u in u15]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(15, ((0, 0, 7), (0, 1, 0), (2, 1, 0), (2, 1, 3), (2, 3, 0), (3, 3, 2)))
return (vertices, edges)
def f62a():
"""Return a unit-distance embedding of the F62A graph."""
t0 = [-1.017, -0.819, 2.96, -0.282, -1.091, -0.624, 0.354]
t0 = findroot(lambda *t: f62a_vertices(*t)[1], t0)
return all_unit_distances(f62a_vertices(*t0)[0])
def f64a():
"""Return a unit-distance embedding of the F64A graph."""
t0 = findroot(lambda *t: f64a_vertices(*t)[1], (0.53, 1.6))
return all_unit_distances(f64a_vertices(*t0)[0])
def f72a():
"""Return a unit-distance embedding of the F72A graph."""
t0 = findroot(lambda t: f72a_vertices(t)[1], 2.2)
return all_unit_distances(f72a_vertices(t0)[0])
def f74a():
"""Return a unit-distance embedding of the F74A graph."""
t0 = [2.91, 4.74, 5.5, 4.88, 5, -0.05, 0.07]
t0 = findroot(lambda *t: f74a_vertices(*t)[1], t0)
return all_unit_distances(f74a_vertices(*t0)[0])
def f80a(t=1.39):
"""Return a unit-distance embedding of the F80A graph."""
u20 = unitroots(20)
z0 = star_radius(20, 7)
r0 = [z0*u for u in u20]
z1 = z0 + expj(t)
r1 = [z1*u for u in u20]
z2 = cu(r1[2], r1[0])
r2 = [z2*u for u in u20]
z3 = cu(0, z2, star_radius(20, 3), 1)
r3 = [z3*u for u in u20]
vertices = r0 + r1 + r2 + r3
edges = ring_edges(20, ((0, 0, 7), (0, 1, 0), (2, 1, 0), (2, 1, 2), (2, 3, 0), (3, 3, 3)))
return (vertices, edges)
def f84a():
"""Return a unit-distance embedding of the F84A graph - not degenerate
despite its looks. The graph is notable in having the simple PSL(2,8)
as its automorphism group."""
t0 = findroot(lambda *t: f84a_vertices(*t)[1], (-0.46, -1.44, 0.25, 0.75))
return all_unit_distances(f84a_vertices(*t0)[0])
def f86a():
"""Return a unit-distance embedding of the F86A graph."""
t0 = [3.60383, 3.44007, 4.34048, 5.63174, 3.26345, 0.488743, 0.113378, 0.236693]
t0 = findroot(lambda *t: f86a_vertices(*t)[1], t0)
return all_unit_distances(f86a_vertices(*t0)[0])
def foster(i=5):
"""Return any one of six (depending on 0 <= i <= 5) unit-distance
embeddings of the Foster graph (F90A)."""
n, t0 = [(0, 0.38), (1, 1.35), (2, 0.15), (2, 1.18), (2, 4.68), (3, [1.5, 1.6])][i]
tstar = findroot(lambda t: foster_vertices(n, t)[1], t0)
return (foster_vertices(n, tstar)[0], lcf_edges(90, (17, -9, 37, -37, 9, -17)))
def foster_old():
"""Return the unit-distance embedding of the Foster graph (F90A)
originally in Dounreay."""
r0 = findroot(lambda r: foster_old_vertices(r)[1], 0.35)
vertices = foster_old_vertices(r0)[0]
edges = ring_edges(15, ((0, 1, 0), (1, 2, 0), (2, 3, 0), (3, 4, 0), (4, 5, 0), (5, 0, -1),
(0, 5, -2), (2, 3, -6), (4, 1, -2)))
return (vertices, edges)
def f96a():
"""Return a unit-distance embedding of the F96A graph."""
a0 = findroot(lambda a: f96a_vertices(a)[1], 0.7)
return all_unit_distances(f96a_vertices(a0)[0])
def f96b(a=2.32, b=1.92, c=-0.26, s1=-1, s2=1, s3=1, s4=-1):
"""Return a unit-distance embedding of the F96B graph."""
u12 = unitroots(12)
z2 = star_radius(12, 5)
r2 = [u*z2 for u in u12]
z3 = z2 + expj(a)
r3 = [u*z3 for u in u12]
z1 = z3 + expj(b)
r1 = [u*z1 for u in u12]
z4 = cu(*(u12[3]*z1, z3)[::s1])
r4 = [u*z4 for u in u12]
z5 = z1 + expj(c)
r5 = [u*z5 for u in u12]
z6 = cu(*(u12[-5]*z5, z4)[::s2])
r6 = [u*z6 for u in u12]
z8 = cu(*(u12[-4]*z6, z5)[::s3])
r8 = [u*z8 for u in u12]
z7 = cu(z8, 0, 1, star_radius(12)) if s4 == 1 else cu(0, z8, star_radius(12), 1)
r7 = [u*z7 for u in u12]
vertices = r1 + r2 + r3 + r4 + r5 + r6 + r7 + r8
edges = ring_edges(12, ((1, 1, 5), (1, 2, 0), (2, 0, 0), (3, 0, 3),
(3, 2, 0), (4, 0, 0), (5, 4, -5), (5, 3, 0),
(7, 5, -4), (7, 4, 0), (6, 7, 0), (6, 6, 1)))
return (vertices, edges)
def biggssmith():
"""Return a unit-distance embedding of the BiggsSmith graph (F102A)."""
s1 = star_radius(17)
s2 = star_radius(17, 2)
s4 = star_radius(17, 4)
s8 = star_radius(17, 8)
u17 = unitroots(17)
r1 = [s1*u*1j for u in u17]
r4 = [s4*u*1j for u in u17]
r8 = [s8*u*-1j for u in u17]
sh1 = cu(r1[0], r4[0])
rh1 = [sh1*u for u in u17]
sh2 = cu(sh1, r8[7])
rh2 = [sh2*u for u in u17]
s2 = cu(sh2, 0, 1, s2)
r2 = [s2*u for u in u17]
vertices = r1 + r4 + rh1 + r8 + rh2 + r2
edges = ring_edges(17, ((0, 0, 1), (1, 1, 4), (3, 3, 8), (5, 5, 2),
(0, 2, 0), (1, 2, 0), (2, 4, 0), (4, 5, 0), (4, 3, 7)))
return (vertices, edges)
| 35.91097 | 230 | 0.527625 |
73fbbcb7d6e336ad39011f035279ed591c9a4ab4 | 937 | py | Python | src/create_scatterplot.py | djparente/coevol-utils | 966a1f16872d72886b92cc3fa51f803412acc481 | [
"BSD-3-Clause"
] | 1 | 2016-03-13T05:26:40.000Z | 2016-03-13T05:26:40.000Z | src/create_scatterplot.py | djparente/coevol-utils | 966a1f16872d72886b92cc3fa51f803412acc481 | [
"BSD-3-Clause"
] | null | null | null | src/create_scatterplot.py | djparente/coevol-utils | 966a1f16872d72886b92cc3fa51f803412acc481 | [
"BSD-3-Clause"
] | null | null | null | #!/cygdrive/c/Python27/python.exe
# Daniel J. Parente, Ph.D.
# Swint-Kruse Laboratory
# Physician Scientist Training Program
# University of Kansas Medical Center
# This code is adapted from the example available at
# http://pandasplotting.blogspot.com/2012/04/added-kde-to-scatter-matrix-diagonals.html
# Creates a scatterplot matrix (off-diagonals) with a kernal density estimate (KDE)
# of the distribution of (univariate) data on the diagonal
import numpy as np
import matplotlib.pyplot as plt
import pandas
import sys
infile=sys.argv[1]
outfile=sys.argv[2]
maindata = pandas.read_csv(infile, sep="\t")
plt.rcParams['patch.facecolor'] = 'k' # Make the markers black
# Plot
ax = pandas.tools.plotting.scatter_matrix(maindata, alpha=0.1, marker='k.', figsize=(8,8), diagonal='kde', range_padding=0.1)
# Give a small inter-plot spacing
plt.subplots_adjust(wspace=.05, hspace=.05)
#Save the figure
plt.savefig(outfile, dpi=600) | 28.393939 | 125 | 0.760939 |
73fdabf0cf89f2998b7ab3d1732e81dfc49cf70e | 4,143 | py | Python | core/perspective_projection.py | sam-lb/python-grapher | 657c423fa6e1f2260988749807db9a5beaf1fef2 | [
"MIT"
] | 2 | 2019-08-21T15:02:51.000Z | 2019-09-03T00:26:48.000Z | core/perspective_projection.py | sam-lb/mathgraph3d | 657c423fa6e1f2260988749807db9a5beaf1fef2 | [
"MIT"
] | 6 | 2019-07-28T21:28:11.000Z | 2019-11-05T12:08:23.000Z | core/perspective_projection.py | sam-lb/mathgraph3d | 657c423fa6e1f2260988749807db9a5beaf1fef2 | [
"MIT"
] | null | null | null | import pygame;
import numpy as np;
from math import sin, cos;
pygame.init();
width, height, depth = 640, 480, 800;
camera = [width // 2, height // 2, depth];
units_x, units_y, units_z = 8, 8, 8;
scale_x, scale_y, scale_z = width / units_x, height / units_y, depth / units_z;
screen = pygame.display.set_mode((width, height));
pygame.display.set_caption("3D perspective projection test");
pygame.key.set_repeat(100, 50);
def scale(p):
""" scale a point by the number of pixels per unit in each direction """
return p[0] * scale_x, p[1] * scale_y, p[2] * scale_z;
def translate_to_screen(p):
""" convert from projected cartesian coordinates to canvas coordinates """
return p[0] + width // 2, height // 2 - p[1];
def project(p):
""" project a point onto the 2D plane """
proj_x = (camera[2] * (p[0] - camera[0])) / (camera[2] + p[2]) + camera[0];
proj_y = (camera[2] * (p[1] - camera[1])) / (camera[2] + p[2]) + camera[1];
return proj_x, proj_y;
def screen_point(p):
""" convert a point in 3D cartesian space to a point in 2D canvas space """
return translate_to_screen(project(scale(p)));
def project_triangle(tri):
""" return the screen coordinates of a triangle """
angs = (tx, ty, tz);
return rproj(tri[0], *angs), rproj(tri[1], *angs), rproj(tri[2], *angs);
## return screen_point(tri[0]), screen_point(tri[1]), screen_point(tri[2]);
def project_line(line):
""" return the screen coordinates of a line """
return screen_point(line[0]), screen_point(line[1]);
triangle = ((1, 1, 1), (2, 2, 2), (1, 2, 1));
x_axis = ((-2, 0, 0), (2, 0, 0));
y_axis = ((0, -2, 0), (0, 2, 0));
z_axis = ((0, 0, -2), (0, 0, 2));
tx, ty, tz = 0, 0, 0;
clock = pygame.time.Clock();
running = True;
while running:
screen.fill((255, 255, 200));
proj_triangle = project_triangle(triangle);
pygame.draw.polygon(screen, (255, 0, 200), proj_triangle);
pygame.draw.polygon(screen, (0, 0, 0), proj_triangle, 1);
pygame.draw.rect(screen, (255, 0, 0), (*proj_triangle[0], 10, 10));
pygame.draw.rect(screen, (0, 255, 0), (*proj_triangle[1], 10, 10));
pygame.draw.rect(screen, (0, 0, 255), (*proj_triangle[2], 10, 10));
## proj_ax, proj_ay, proj_az = project_line(x_axis), project_line(y_axis), project_line(z_axis);
## pygame.draw.line(screen, (255, 0, 0), proj_ax[0], proj_ax[1], 1);
## pygame.draw.line(screen, (0, 255, 0), proj_ay[0], proj_ay[1], 1);
## pygame.draw.line(screen, (0, 0, 255), proj_az[0], proj_az[1], 1);
pygame.display.flip();
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False;
break;
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
#camera[0] -= 25;
## camera = list(np.array([camera]).dot(rot_mat_y(0.2).dot(rot_mat_z(0.1)))[0]);
tx += 0.1;
elif event.key == pygame.K_RIGHT:
#camera[0] += 25;
## camera = list(np.array([camera]).dot(rot_mat_z(-0.1))[0]);
tx -= 0.1;
elif event.key == pygame.K_UP:
ty += 0.1;
elif event.key == pygame.K_DOWN:
ty -= 0.1;
elif event.key == pygame.K_SPACE:
print(camera);
elif event.key == pygame.K_ESCAPE:
running = False;
break;
clock.tick(30);
pygame.quit();
| 34.525 | 99 | 0.565291 |
73fea2fbc1c54c3ba581a8b82427643b53be014d | 1,444 | py | Python | manu_sawyer/src/tensorflow_model_is_gripping/grasp_example.py | robertocalandra/the-feeling-of-success | 7bb895897e369ae9f5fcaeed61d401e019a9cdf1 | [
"MIT"
] | 10 | 2018-05-31T04:57:25.000Z | 2021-05-28T11:22:29.000Z | manu_sawyer/src/tensorflow_model_is_gripping/grasp_example.py | robertocalandra/the-feeling-of-success | 7bb895897e369ae9f5fcaeed61d401e019a9cdf1 | [
"MIT"
] | null | null | null | manu_sawyer/src/tensorflow_model_is_gripping/grasp_example.py | robertocalandra/the-feeling-of-success | 7bb895897e369ae9f5fcaeed61d401e019a9cdf1 | [
"MIT"
] | 3 | 2018-05-31T05:00:08.000Z | 2019-02-25T06:32:45.000Z | import grasp_net, grasp_params, h5py, aolib.img as ig, os, numpy as np, aolib.util as ut
net_pr = grasp_params.im_fulldata_v5()
net_pr = grasp_params.gel_im_fulldata_v5()
checkpoint_file = '/home/manu/ros_ws/src/manu_research/manu_sawyer/src/tensorflow_model_is_gripping/training/net.tf-6499'
gpu = '/gpu:0'
db_file = '/media/backup_disk/dataset_manu/ver2/2017-06-22/2017-06-22_212702.hdf5'
with h5py.File(db_file, 'r') as db:
pre, mid, _ = grasp_net.milestone_frames(db)
# sc = lambda x : ig.scale(x, (224, 224))
def sc(x):
""" do a center crop (helps with gelsight) """
x = ig.scale(x, (256, 256))
return ut.crop_center(x, 224)
u = ig.uncompress
crop = grasp_net.crop_kinect
inputs = dict(
gel0_pre=sc(u(db['GelSightA_image'].value[pre])),
gel1_pre=sc(u(db['GelSightB_image'].value[pre])),
gel0_post=sc(u(db['GelSightA_image'].value[mid])),
gel1_post=sc(u(db['GelSightB_image'].value[mid])),
im0_pre=sc(crop(u(db['color_image_KinectA'].value[pre]))),
im0_post=sc(crop(u(db['color_image_KinectA'].value[mid]))),
# these are probably unnecessary
depth0_pre=sc(crop(db['depth_image_KinectA'].value[pre].astype('float32'))),
depth0_post=sc(crop(db['depth_image_KinectA'].value[mid].astype('float32'))))
net = grasp_net.NetClf(net_pr, checkpoint_file, gpu)
prob = net.predict(**inputs)
print 'prob = ', prob
| 40.111111 | 121 | 0.668975 |
73febbee1eb35f8161409705d1117c1808557690 | 3,683 | py | Python | API/models/models.py | Rkanehisa/Stone_Projeto | b022cc7031ba2c3b29181df2720197ca9edc1ab3 | [
"MIT"
] | null | null | null | API/models/models.py | Rkanehisa/Stone_Projeto | b022cc7031ba2c3b29181df2720197ca9edc1ab3 | [
"MIT"
] | null | null | null | API/models/models.py | Rkanehisa/Stone_Projeto | b022cc7031ba2c3b29181df2720197ca9edc1ab3 | [
"MIT"
] | null | null | null | from API.db import db
from datetime import datetime
from passlib.apps import custom_app_context as pwd_context
class Card(db.Model):
__tablename__ = "card"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255), nullable=False)
number = db.Column(db.String(16), nullable=False)
ccv = db.Column(db.String(3), nullable=False)
due_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
expiration_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
limit = db.Column(db.Float, nullable=False)
spent_limit = db.Column(db.Float, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
user = db.relationship("User", back_populates="cards")
| 31.211864 | 114 | 0.65653 |
73fecce467712a52b8aaf68f72a88091d6f9da83 | 418 | py | Python | src/TestDice.py | Yamanama/CodeMonkeyApplication | 4dc24016b96dbed5b8e833d5248dd76d1f3dfc08 | [
"MIT"
] | null | null | null | src/TestDice.py | Yamanama/CodeMonkeyApplication | 4dc24016b96dbed5b8e833d5248dd76d1f3dfc08 | [
"MIT"
] | null | null | null | src/TestDice.py | Yamanama/CodeMonkeyApplication | 4dc24016b96dbed5b8e833d5248dd76d1f3dfc08 | [
"MIT"
] | null | null | null | import unittest
from Dice import Dice
if __name__ == '__main__': # pragma no cover
unittest.main()
| 19.904762 | 62 | 0.636364 |
73fef45c289e3867a6d35ff55ed2c6e15b25c65c | 16,694 | py | Python | accProcess.py | CASSON-LAB/BiobankActivityCSF | d3e6d7283aed72fa329da1e045fa49cc7e6b2e9c | [
"BSD-2-Clause"
] | 3 | 2020-08-03T12:08:34.000Z | 2021-03-16T11:31:01.000Z | accProcess.py | CASSON-LAB/BiobankActivityCSF | d3e6d7283aed72fa329da1e045fa49cc7e6b2e9c | [
"BSD-2-Clause"
] | null | null | null | accProcess.py | CASSON-LAB/BiobankActivityCSF | d3e6d7283aed72fa329da1e045fa49cc7e6b2e9c | [
"BSD-2-Clause"
] | 1 | 2020-08-05T16:13:02.000Z | 2020-08-05T16:13:02.000Z | """Command line tool to extract meaningful health info from accelerometer data."""
import accelerometer.accUtils
import argparse
import collections
import datetime
import accelerometer.device
import json
import os
import accelerometer.summariseEpoch
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from filter_data import data_filter
from import_npy import import_npy
def main():
"""
Application entry point responsible for parsing command line requests
"""
parser = argparse.ArgumentParser(
description="""A tool to extract physical activity information from
raw accelerometer files.""", add_help=True
)
# required
parser.add_argument('rawFile', metavar='input file', type=str,
help="""the <.cwa/.cwa.gz> file to process
(e.g. sample.cwa.gz). If the file path contains
spaces,it must be enclosed in quote marks
(e.g. \"../My Documents/sample.cwa\")
""")
#optional inputs
parser.add_argument('--startTime',
metavar='e.g. 1991-01-01T23:59', default=None,
type=str2date, help="""removes data before this
time in the final analysis
(default : %(default)s)""")
parser.add_argument('--endTime',
metavar='e.g 1991-01-01T23:59', default=None,
type=str2date, help="""removes data after this
time in the final analysis
(default : %(default)s)""")
parser.add_argument('--timeSeriesDateColumn',
metavar='True/False', default=False, type=str2bool,
help="""adds a date/time column to the timeSeries
file, so acceleration and imputation values can be
compared easily. This increases output filesize
(default : %(default)s)""")
parser.add_argument('--processRawFile',
metavar='True/False', default=True, type=str2bool,
help="""False will skip processing of the .cwa file
(the epoch.csv file must already exist for this to
work) (default : %(default)s)""")
parser.add_argument('--epochPeriod',
metavar='length', default=30, type=int,
help="""length in seconds of a single epoch (default
: %(default)ss, must be an integer)""")
parser.add_argument('--sampleRate',
metavar='Hz, or samples/second', default=100,
type=int, help="""resample data to n Hz (default
: %(default)ss, must be an integer)""")
parser.add_argument('--useAbs',
metavar='useAbs', default=False, type=str2bool,
help="""use abs(VM) instead of trunc(VM)
(default : %(default)s)""")
parser.add_argument('--skipFiltering',
metavar='True/False', default=False, type=str2bool,
help="""Skip filtering stage
(default : %(default)s)""")
# calibration parameters
parser.add_argument('--skipCalibration',
metavar='True/False', default=False, type=str2bool,
help="""skip calibration? (default : %(default)s)""")
parser.add_argument('--calOffset',
metavar=('x', 'y', 'z'),default=[0.0, 0.0, 0.0],
type=float, nargs=3,
help="""accelerometer calibration offset (default :
%(default)s)""")
parser.add_argument('--calSlope',
metavar=('x', 'y', 'z'), default=[1.0, 1.0, 1.0],
type=float, nargs=3,
help="""accelerometer calibration slope linking
offset to temperature (default : %(default)s)""")
parser.add_argument('--calTemp',
metavar=('x', 'y', 'z'), default=[0.0, 0.0, 0.0],
type=float, nargs=3,
help="""mean temperature in degrees Celsius of
stationary data for calibration
(default : %(default)s)""")
parser.add_argument('--meanTemp',
metavar="temp", default=20.0, type=float,
help="""mean calibration temperature in degrees
Celsius (default : %(default)s)""")
parser.add_argument('--stationaryStd',
metavar='mg', default=13, type=int,
help="""stationary mg threshold (default
: %(default)s mg))""")
parser.add_argument('--calibrationSphereCriteria',
metavar='mg', default=0.3, type=float,
help="""calibration sphere threshold (default
: %(default)s mg))""")
# activity parameters
parser.add_argument('--mgMVPA',
metavar="mg", default=100, type=int,
help="""MVPA threshold (default : %(default)s)""")
parser.add_argument('--mgVPA',
metavar="mg", default=425, type=int,
help="""VPA threshold (default : %(default)s)""")
# calling helper processess and conducting multi-threadings
parser.add_argument('--rawDataParser',
metavar="rawDataParser", default="AxivityAx3Epochs",
type=str,
help="""file containing a java program to process
raw .cwa binary file, must end with .class (omitted)
(default : %(default)s)""")
parser.add_argument('--javaHeapSpace',
metavar="amount in MB", default="", type=str,
help="""amount of heap space allocated to the java
subprocesses,useful for limiting RAM usage (default
: unlimited)""")
# activity classification arguments
parser.add_argument('--activityClassification',
metavar='True/False', default=True, type=str2bool,
help="""Use pre-trained random forest to predict
activity type
(default : %(default)s)""")
parser.add_argument('--activityModel', type=str,
default="activityModels/doherty2018.tar",
help="""trained activity model .tar file""")
parser.add_argument('--rawOutput',
metavar='True/False', default=False, type=str2bool,
help="""output calibrated and resampled raw data to
a .csv.gz file? NOTE: requires ~50MB per day.
(default : %(default)s)""")
parser.add_argument('--npyOutput',
metavar='True/False', default=True, type=str2bool,
help="""output calibrated and resampled raw data to
.npy file? NOTE: requires ~60MB per day.
(default : %(default)s)""")
parser.add_argument('--fftOutput',
metavar='True/False', default=False, type=str2bool,
help="""output FFT epochs to a .csv file? NOTE:
requires ~0.1GB per day. (default : %(default)s)""")
# optional outputs
parser.add_argument('--outputFolder', metavar='filename',default="",
help="""folder for all of the output files, \
unless specified using other options""")
parser.add_argument('--summaryFolder', metavar='filename',default="",
help="folder for -summary.json summary stats")
parser.add_argument('--epochFolder', metavar='filename', default="",
help="""folder -epoch.csv.gz - must be an existing
file if "-processRawFile" is set to False""")
parser.add_argument('--timeSeriesFolder', metavar='filename', default="",
help="folder for -timeSeries.csv.gz file")
parser.add_argument('--nonWearFolder', metavar='filename',default="",
help="folder for -nonWearBouts.csv.gz file")
parser.add_argument('--stationaryFolder', metavar='filename', default="",
help="folder -stationaryPoints.csv.gz file")
parser.add_argument('--rawFolder', metavar='filename', default="",
help="folder for raw .csv.gz file")
parser.add_argument('--verbose',
metavar='True/False', default=False, type=str2bool,
help="""enable verbose logging? (default :
%(default)s)""")
parser.add_argument('--deleteIntermediateFiles',
metavar='True/False', default=True, type=str2bool,
help="""True will remove extra "helper" files created
by the program (default : %(default)s)""")
parser.add_argument('--intensityDistribution',
metavar='True/False', default=False, type=str2bool,
help="""Save intensity distribution
(default : %(default)s)""")
#
# check that enough command line arguments are entered
#
if len(sys.argv) < 2:
msg = "\nInvalid input, please enter at least 1 parameter, e.g."
msg += "\npython accProcess.py data/sample.cwa.gz \n"
accelerometer.accUtils.toScreen(msg)
parser.print_help()
sys.exit(-1)
processingStartTime = datetime.datetime.now()
args = parser.parse_args()
##########################
# check input/output files/dirs exist and validate input args
##########################
if args.processRawFile is False:
#! TODO: this breaks for .cwa.gz files
if len(args.rawFile.split('.')) < 2:
args.rawFile += ".cwa" # TODO edge case since we still need a name?
elif not os.path.isfile(args.rawFile):
if args.rawFile:
print("error: specified file " + args.rawFile + " does not exist. Exiting..")
else:
print("error: no file specified. Exiting..")
sys.exit(-2)
# get file extension
rawFilePath, rawFileName = os.path.split(args.rawFile)
rawFileName = rawFileName.split('.')[0] # remove any extension
# check target output folders exist
for path in [args.summaryFolder, args.nonWearFolder, args.epochFolder,
args.stationaryFolder, args.timeSeriesFolder, args.outputFolder]:
if len(path) > 0 and not os.access(path, os.F_OK):
print("error: " + path + " is not a valid directory")
sys.exit(-3)
# assign output file names
if args.outputFolder == "" and rawFilePath != "":
args.outputFolder = rawFilePath + '/'
if args.summaryFolder == "":
args.summaryFolder = args.outputFolder
if args.nonWearFolder == "":
args.nonWearFolder = args.outputFolder
if args.epochFolder == "":
args.epochFolder = args.outputFolder
if args.stationaryFolder == "":
args.stationaryFolder = args.outputFolder
if args.timeSeriesFolder == "":
args.timeSeriesFolder = args.outputFolder
if args.rawFolder == "":
args.rawFolder = args.outputFolder
args.summaryFile = args.summaryFolder + rawFileName + "-summary.json"
args.nonWearFile = args.nonWearFolder + rawFileName + "-nonWearBouts.csv.gz"
args.epochFile = args.epochFolder + rawFileName + "-epoch.csv.gz"
args.stationaryFile = args.stationaryFolder + rawFileName + "-stationaryPoints.csv"
args.tsFile = args.timeSeriesFolder + rawFileName + "-timeSeries.csv.gz"
args.rawOutputFile = args.rawFolder + rawFileName + ".csv.gz"
args.npyOutputFile = args.rawFolder + rawFileName + ".npy"
# check user specified end time is not before start time
if args.startTime and args.endTime:
if args.startTime >= args.endTime:
print("start and end time arguments are invalid!")
print("startTime:", args.startTime.strftime("%Y-%m-%dT%H:%M"))
print("endTime:", args.endTime.strftime("%Y-%m-%dT%H:%M"))
sys.exit(-4)
# print processing options to screen
print("processing file " + args.rawFile + "' with these arguments:\n")
for key, value in sorted(vars(args).items()):
if not (isinstance(value, str) and len(value)==0):
print(key.ljust(15), ':', value)
print("\n")
##########################
# start processing file
##########################
summary = {}
# now process the .CWA file
if args.processRawFile:
summary['file-name'] = args.rawFile
accelerometer.device.processRawFileToEpoch(args.rawFile, args.epochFile,
args.stationaryFile, summary, skipCalibration=args.skipCalibration,
stationaryStd=args.stationaryStd, xIntercept=args.calOffset[0],
yIntercept=args.calOffset[1], zIntercept=args.calOffset[2],
xSlope=args.calSlope[0], ySlope=args.calSlope[1],
zSlope=args.calSlope[2], xTemp=args.calTemp[0],
yTemp=args.calTemp[1], zTemp=args.calTemp[2],
meanTemp=args.meanTemp, rawDataParser=args.rawDataParser,
javaHeapSpace=args.javaHeapSpace, skipFiltering=args.skipFiltering,
sampleRate=args.sampleRate, epochPeriod=args.epochPeriod,
useAbs=args.useAbs, activityClassification=args.activityClassification,
rawOutput=args.rawOutput, rawOutputFile=args.rawOutputFile,
npyOutput=args.npyOutput, npyOutputFile=args.npyOutputFile,
fftOutput=args.fftOutput, startTime=args.startTime,
endTime=args.endTime, verbose=args.verbose)
print(args.rawFile)
else:
summary['file-name'] = args.epochFile
data, time = import_npy(args.rawFile)
# Place your code here
##########################
# remove helper files and close program
##########################
if args.deleteIntermediateFiles:
try:
os.remove(args.stationaryFile)
os.remove(args.epochFile)
os.remove(args.rawFile[:-4] + '.npy')
except:
accelerometer.accUtils.toScreen('could not delete helper file')
# finally, print out processing summary message
processingEndTime = datetime.datetime.now()
processingTime = (processingEndTime - processingStartTime).total_seconds()
accelerometer.accUtils.toScreen("in total, processing took " + \
str(processingTime) + " seconds")
def str2bool(v):
"""
Used to parse true/false values from the command line. E.g. "True" -> True
"""
return v.lower() in ("yes", "true", "t", "1")
def str2date(v):
"""
Used to parse date values from the command line. E.g. "1994-11-30T12:00" -> time.datetime
"""
eg = "1994-11-30T12:00" # example date
if v.count("-")!=eg.count("-"):
print("ERROR: not enough dashes in date")
elif v.count("T")!=eg.count("T"):
print("ERROR: no T seperator in date")
elif v.count(":")!=eg.count(":"):
print("ERROR: no ':' seperator in date")
elif len(v.split("-")[0])!=4:
print("ERROR: year in date must be 4 numbers")
elif len(v.split("-")[1])!=2 and len(v.split("-")[1])!=1:
print("ERROR: month in date must be 1-2 numbers")
elif len(v.split("-")[2].split("T")[0])!=2 and len(v.split("-")[2].split("T")[0])!=1:
print("ERROR: day in date must be 1-2 numbers")
else:
return pd.datetime.strptime(v, "%Y-%m-%dT%H:%M")
print("please change your input date:")
print('"'+v+'"')
print("to match the example date format:")
print('"'+eg+'"')
raise ValueError("date in incorrect format")
if __name__ == '__main__':
main() # Standard boilerplate to call the main() function to begin the program.
| 49.684524 | 93 | 0.547622 |
fb3f354629384e19edefd222de21d0f75d624bfc | 382 | py | Python | VetsApp/views.py | Sabrinax3/Pet-Clinic-1 | 776955d118a46c8d4eaa74de22ea0280b82debc9 | [
"MIT"
] | 2 | 2020-04-13T14:26:54.000Z | 2022-01-19T01:30:25.000Z | VetsApp/views.py | Sabrinax3/Pet-Clinic-1 | 776955d118a46c8d4eaa74de22ea0280b82debc9 | [
"MIT"
] | 2 | 2020-05-29T18:52:55.000Z | 2020-05-30T02:06:28.000Z | VetsApp/views.py | Sabrinax3/Pet-Clinic-1 | 776955d118a46c8d4eaa74de22ea0280b82debc9 | [
"MIT"
] | 8 | 2020-04-11T08:30:44.000Z | 2020-05-30T03:26:13.000Z | from django.shortcuts import render
from .models import VetsInfoTable
# Create your views here.
| 15.916667 | 53 | 0.641361 |
fb40883451f136e23adf11a2d1d1d175606ca586 | 542 | py | Python | generate.py | IsaacPeters/Closest-Pair-of-Points | 3c71efcbeae12b0b187117a671b782e392ea71b2 | [
"MIT"
] | 1 | 2021-07-18T03:59:55.000Z | 2021-07-18T03:59:55.000Z | generate.py | IsaacPeters/Closest-Pair-of-Points | 3c71efcbeae12b0b187117a671b782e392ea71b2 | [
"MIT"
] | null | null | null | generate.py | IsaacPeters/Closest-Pair-of-Points | 3c71efcbeae12b0b187117a671b782e392ea71b2 | [
"MIT"
] | null | null | null | import sys
import math
import random
# Figure out what we should name our output file, and how big it should be
if len(sys.argv) != 3: # Make sure we get a file argument, and only that
print("Incorrect number of arguments found, should be \"generate <file> 10^<x>\"")
for i in range(10):
with open("./gen/%s%d" % (sys.argv[1], i), "w") as file:
for x in range(pow(10, int(sys.argv[2]))):
xNum = random.randint(1, 10000)
yNum = random.randint(1, 10000)
file.write("%d %d\n" % (xNum, yNum)) | 38.714286 | 86 | 0.612546 |
fb40d608ac0b102b80003c2f549912877d9e3d53 | 963 | py | Python | wagtail/snippets/urls.py | brownaa/wagtail | c97bc56c6822eb1b6589d5c33e07f71acfc48845 | [
"BSD-3-Clause"
] | 2 | 2021-03-18T21:41:05.000Z | 2021-03-18T21:41:08.000Z | wagtail/snippets/urls.py | brownaa/wagtail | c97bc56c6822eb1b6589d5c33e07f71acfc48845 | [
"BSD-3-Clause"
] | 13 | 2015-05-08T12:27:10.000Z | 2020-01-23T14:45:57.000Z | wagtail/snippets/urls.py | brownaa/wagtail | c97bc56c6822eb1b6589d5c33e07f71acfc48845 | [
"BSD-3-Clause"
] | 2 | 2020-09-03T20:12:32.000Z | 2021-03-29T08:29:23.000Z | from django.urls import path
from wagtail.snippets.views import chooser, snippets
app_name = 'wagtailsnippets'
urlpatterns = [
path('', snippets.index, name='index'),
path('choose/', chooser.choose, name='choose_generic'),
path('choose/<slug:app_label>/<slug:model_name>/', chooser.choose, name='choose'),
path('choose/<slug:app_label>/<slug:model_name>/<str:pk>/', chooser.chosen, name='chosen'),
path('<slug:app_label>/<slug:model_name>/', snippets.list, name='list'),
path('<slug:app_label>/<slug:model_name>/add/', snippets.create, name='add'),
path('<slug:app_label>/<slug:model_name>/<str:pk>/', snippets.edit, name='edit'),
path('<slug:app_label>/<slug:model_name>/multiple/delete/', snippets.delete, name='delete-multiple'),
path('<slug:app_label>/<slug:model_name>/<str:pk>/delete/', snippets.delete, name='delete'),
path('<slug:app_label>/<slug:model_name>/<str:pk>/usage/', snippets.usage, name='usage'),
]
| 45.857143 | 105 | 0.688474 |
fb413304ce562fca6f9892396c8901821a208e1e | 1,974 | py | Python | ding/envs/env/tests/test_env_implementation_check.py | jayyoung0802/DI-engine | efbb35ddaf184d1009291e6842fbbae09f193492 | [
"Apache-2.0"
] | 1 | 2022-03-21T16:15:39.000Z | 2022-03-21T16:15:39.000Z | ding/envs/env/tests/test_env_implementation_check.py | jayyoung0802/DI-engine | efbb35ddaf184d1009291e6842fbbae09f193492 | [
"Apache-2.0"
] | null | null | null | ding/envs/env/tests/test_env_implementation_check.py | jayyoung0802/DI-engine | efbb35ddaf184d1009291e6842fbbae09f193492 | [
"Apache-2.0"
] | null | null | null | import pytest
from easydict import EasyDict
import numpy as np
import gym
from copy import deepcopy
from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure
from ding.envs.env.tests import DemoEnv
| 37.961538 | 109 | 0.691489 |
fb421f779844bb484b1c9c0a35a1b99901994f6f | 18,474 | py | Python | tests/evergreen/metrics/test_buildmetrics.py | jamesbroadhead/evergreen.py | 08418bf53bb7cd8de8a68aa6ae0847f28e9e0a30 | [
"Apache-2.0"
] | null | null | null | tests/evergreen/metrics/test_buildmetrics.py | jamesbroadhead/evergreen.py | 08418bf53bb7cd8de8a68aa6ae0847f28e9e0a30 | [
"Apache-2.0"
] | null | null | null | tests/evergreen/metrics/test_buildmetrics.py | jamesbroadhead/evergreen.py | 08418bf53bb7cd8de8a68aa6ae0847f28e9e0a30 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
"""Unit tests for src/evergreen/metrics/buildmetrics.py."""
from __future__ import absolute_import
from unittest.mock import MagicMock
import pytest
import evergreen.metrics.buildmetrics as under_test
from evergreen.errors.exceptions import ActiveTaskMetricsException
from evergreen.task import Task
| 41.053333 | 81 | 0.691783 |
fb427d81000b8506419aa7780e97ffc579670c50 | 813 | py | Python | tools_d2/convert-pretrain-model-to-d2.py | nguyentritai2906/panoptic-deeplab | 6bbe17801488a417ed9586acab285ee6a05d68cb | [
"Apache-2.0"
] | 506 | 2020-06-12T01:07:56.000Z | 2022-03-26T00:56:52.000Z | tools_d2/convert-pretrain-model-to-d2.py | MrMa-T/panoptic-deeplab | cf8e20bbbf1cf540c7593434b965a93c4a889890 | [
"Apache-2.0"
] | 85 | 2020-06-12T04:51:31.000Z | 2022-03-23T16:19:44.000Z | tools_d2/convert-pretrain-model-to-d2.py | MrMa-T/panoptic-deeplab | cf8e20bbbf1cf540c7593434b965a93c4a889890 | [
"Apache-2.0"
] | 102 | 2020-06-12T06:45:44.000Z | 2022-03-22T14:03:04.000Z | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle as pkl
import sys
import torch
"""
Usage:
# download your pretrained model:
wget https://github.com/LikeLy-Journey/SegmenTron/releases/download/v0.1.0/tf-xception65-270e81cf.pth -O x65.pth
# run the conversion
./convert-pretrained-model-to-d2.py x65.pth x65.pkl
# Then, use x65.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/x65.pkl"
PIXEL_MEAN: [128, 128, 128]
PIXEL_STD: [128, 128, 128]
INPUT:
FORMAT: "RGB"
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")
res = {"model": obj, "__author__": "third_party", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
| 24.636364 | 114 | 0.681427 |
fb4386dbb22354c808375368f8d1474f3605a181 | 2,953 | py | Python | test_geo.py | OrrEos/IA-Flood-Warning-Project | af485560050c6e387aabf0bd7500b13de62f810f | [
"MIT"
] | null | null | null | test_geo.py | OrrEos/IA-Flood-Warning-Project | af485560050c6e387aabf0bd7500b13de62f810f | [
"MIT"
] | null | null | null | test_geo.py | OrrEos/IA-Flood-Warning-Project | af485560050c6e387aabf0bd7500b13de62f810f | [
"MIT"
] | 1 | 2022-01-24T09:57:24.000Z | 2022-01-24T09:57:24.000Z | import random
from floodsystem.utils import sorted_by_key # noqa
from floodsystem.geo import stations_by_distance, stations_within_radius, rivers_with_station, stations_by_river,rivers_by_station_number
from floodsystem.stationdata import build_station_list
'''def test_geo():
#Task 1A
#does the function give an output & if it's a list:
out = build_station_list()
assert type(out) == list
#checking that list is a reasonable length
assert len(out) >1700
assert len(out) <2500'''
#Task 1B
#Task 1D
#Task1E
| 29.828283 | 137 | 0.679986 |
fb43e8774473bcb7c7cfe41180999e085bda6d33 | 724 | py | Python | app.py | aws-samples/aws-cdk-service-catalog-pipeline | e6e3eab0dec3fc41e7621971453131fd0d5b6e32 | [
"MIT-0"
] | null | null | null | app.py | aws-samples/aws-cdk-service-catalog-pipeline | e6e3eab0dec3fc41e7621971453131fd0d5b6e32 | [
"MIT-0"
] | null | null | null | app.py | aws-samples/aws-cdk-service-catalog-pipeline | e6e3eab0dec3fc41e7621971453131fd0d5b6e32 | [
"MIT-0"
] | null | null | null | #!/usr/bin/env python3
import os
import aws_cdk as cdk
# For consistency with TypeScript code, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from cdk_pipelines.cdk_pipelines import CdkPipelineStack
app = cdk.App()
CdkPipelineStack(app, "AWSomeServiceCatalogPipeline",
description="CI/CD CDK Pipelines for Service Catalog Example",
env={
'region': app.node.try_get_context("region"),
'account': app.node.try_get_context("pipeline_account")
}
)
app.synth()
| 31.478261 | 79 | 0.727901 |
fb4452119142d00f8ea5508e610548a9fa55bde5 | 1,565 | py | Python | Generator/Sheet3/PDF.py | trngb/watools | 57b9074d59d856886675aa26014bfd6673d5da76 | [
"Apache-2.0"
] | 11 | 2018-09-25T08:58:26.000Z | 2021-02-13T18:58:05.000Z | Generator/Sheet3/PDF.py | trngbich/watools | 57b9074d59d856886675aa26014bfd6673d5da76 | [
"Apache-2.0"
] | 1 | 2020-07-03T02:36:41.000Z | 2021-03-21T22:20:47.000Z | Generator/Sheet3/PDF.py | trngbich/watools | 57b9074d59d856886675aa26014bfd6673d5da76 | [
"Apache-2.0"
] | 16 | 2018-09-28T22:55:11.000Z | 2021-02-22T13:03:56.000Z | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Generator/Sheet3
"""
import os
def Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV_a, Dir_Basin_CSV_b):
"""
This functions create the monthly and yearly sheet 3 in pdf format, based on the csv files.
Parameters
----------
Dir_Basin : str
Path to all the output data of the Basin
Basin : str
Name of the basin
Simulation : int
Defines the simulation
Dir_Basin_CSV_a : str
Data path pointing to the CSV output files for sheet a
Dir_Basin_CSV_b : str
Data path pointing to the CSV output files for sheet b
"""
# import wa module
from watools.Sheets import create_sheet3
# Create output folder for PDF files
Dir_Basin_PDF = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation, "PDF")
if not os.path.exists(Dir_Basin_PDF):
os.mkdir(Dir_Basin_PDF)
# Create output filename for PDFs
FileName_Splitted = Dir_Basin_CSV_a.split('_')
Year = str(FileName_Splitted[-1].split('.')[0])
outFile_a = os.path.join(Dir_Basin_PDF,'Sheet3a_Sim%s_%s_%s.pdf' %(Simulation, Basin, Year))
outFile_b = os.path.join(Dir_Basin_PDF,'Sheet3b_Sim%s_%s_%s.pdf' %(Simulation, Basin, Year))
# Create PDFs
sheet3a_fh, sheet3b_fh = create_sheet3(Basin, str(Year), ['km3/year', 'kg/ha/year', 'kg/m3'], [Dir_Basin_CSV_a, Dir_Basin_CSV_b], [outFile_a, outFile_b])
return()
| 33.297872 | 157 | 0.681789 |
fb446c8a40864dcf38289e8b379abbb25374263e | 2,481 | py | Python | bonita/commands/user.py | dantebarba/bonita-cli | f750a6a1ff802e5197644b2363aea406bf29b6bf | [
"WTFPL"
] | 2 | 2017-09-02T08:05:03.000Z | 2018-09-17T13:48:03.000Z | bonita/commands/user.py | dantebarba/bonita-cli | f750a6a1ff802e5197644b2363aea406bf29b6bf | [
"WTFPL"
] | null | null | null | bonita/commands/user.py | dantebarba/bonita-cli | f750a6a1ff802e5197644b2363aea406bf29b6bf | [
"WTFPL"
] | null | null | null | """The user command."""
from json import dumps
from .base import Base
from bonita.api.bonita_client import BonitaClient
| 33.986301 | 112 | 0.563886 |
fb4520cbe2999728bc66894639cfa8a36d53fb16 | 1,809 | py | Python | pyActionRec/action_flow.py | Xiatian-Zhu/anet2016_cuhk | a0df08cbbe65013e9a259d5412c33a99c2c84127 | [
"BSD-2-Clause"
] | 253 | 2016-07-01T22:57:55.000Z | 2022-03-01T10:59:31.000Z | pyActionRec/action_flow.py | Xiatian-Zhu/anet2016_cuhk | a0df08cbbe65013e9a259d5412c33a99c2c84127 | [
"BSD-2-Clause"
] | 39 | 2016-08-31T08:42:24.000Z | 2021-12-11T06:56:47.000Z | pyActionRec/action_flow.py | Xiatian-Zhu/anet2016_cuhk | a0df08cbbe65013e9a259d5412c33a99c2c84127 | [
"BSD-2-Clause"
] | 101 | 2016-07-01T22:57:57.000Z | 2022-03-08T07:26:53.000Z | from config import ANET_CFG
import sys
sys.path.append(ANET_CFG.DENSE_FLOW_ROOT+'/build')
from libpydenseflow import TVL1FlowExtractor
import action_caffe
import numpy as np
if __name__ == "__main__":
import cv2
im1 = cv2.imread('../data/img_1.jpg')
im2 = cv2.imread('../data/img_2.jpg')
f = FlowExtractor(0)
flow_frames = f.extract_flow([im1, im2])
from pylab import *
plt.figure()
plt.imshow(flow_frames[0])
plt.figure()
plt.imshow(flow_frames[1])
plt.figure()
plt.imshow(im1)
plt.show()
print flow_frames
| 30.15 | 113 | 0.611388 |
fb4743a6ee0568d1f98e6dec89a2138670b26a6f | 9,921 | py | Python | aqme/qdesc.py | patonlab/aqme | 080d8e85ee905718ddf78f7fdee2ee308a293ad1 | [
"MIT"
] | null | null | null | aqme/qdesc.py | patonlab/aqme | 080d8e85ee905718ddf78f7fdee2ee308a293ad1 | [
"MIT"
] | null | null | null | aqme/qdesc.py | patonlab/aqme | 080d8e85ee905718ddf78f7fdee2ee308a293ad1 | [
"MIT"
] | null | null | null | #####################################################.
# This file stores all the functions #
# used for genrating all parameters #
#####################################################.
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import rdMolTransforms
import os
import pandas as pd
from aqme.csearch import getDihedralMatches
| 65.269737 | 238 | 0.603467 |
fb49836ffdfff81dfa3d877748d3c2b47f98f7fb | 3,446 | py | Python | googlecode-issues-exporter/generate_user_map.py | ballschin52/support-tools | 85be996e89d292c7f20031dde88198acc63d5e6c | [
"Apache-2.0"
] | 41 | 2016-05-03T02:27:07.000Z | 2021-10-14T13:54:16.000Z | googlecode-issues-exporter/generate_user_map.py | ballschin52/support-tools | 85be996e89d292c7f20031dde88198acc63d5e6c | [
"Apache-2.0"
] | 7 | 2016-05-05T13:53:37.000Z | 2021-06-27T20:25:13.000Z | googlecode-issues-exporter/generate_user_map.py | ballschin52/support-tools | 85be996e89d292c7f20031dde88198acc63d5e6c | [
"Apache-2.0"
] | 30 | 2016-05-05T13:26:21.000Z | 2021-10-13T09:39:21.000Z | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for generating a user mapping from Google Code user to BitBucket user.
"""
import argparse
import json
import sys
import issues
def addIfNotPresent(users, user):
"""Adds a user if it is not already set."""
if user not in users:
users[user] = user
def _CreateUsersDict(issue_data, project_name):
"""Extract users from list of issues into a dict.
Args:
issue_data: Issue data
project_name: The name of the project being exported.
Returns:
Dict of users associated with a list of issues
"""
users = {}
for issue in issue_data:
googlecode_issue = issues.GoogleCodeIssue(
issue, project_name, OptionalMap())
reporting_user = googlecode_issue.GetAuthor()
addIfNotPresent(users, reporting_user)
assignee_user = googlecode_issue.GetOwner()
addIfNotPresent(users, assignee_user)
googlecode_comments = googlecode_issue.GetComments()
for comment in googlecode_comments:
googlecode_comment = issues.GoogleCodeComment(googlecode_issue, comment)
commenting_user = googlecode_comment.GetAuthor()
addIfNotPresent(users, commenting_user)
return {
"users": users
}
def Generate(issue_file_path, project_name):
"""Generates a user map for the specified issues. """
issue_data = None
user_file = open(issue_file_path)
user_data = json.load(user_file)
user_projects = user_data["projects"]
for project in user_projects:
if project_name in project["name"]:
issue_data = project["issues"]["items"]
break
if issue_data is None:
raise issues.ProjectNotFoundError(
"Project %s not found" % project_name)
users = _CreateUsersDict(issue_data, project_name)
with open("users.json", "w") as users_file:
user_json = json.dumps(users, sort_keys=True, indent=4,
separators=(",", ": "), ensure_ascii=False)
users_file.write(unicode(user_json))
print "\nCreated file users.json.\n"
def main(args):
"""The main function.
Args:
args: The command line arguments.
Raises:
issues.ProjectNotFoundError: The user passed in an invalid project name.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--issue_file_path", required=True,
help="The path to the file containing the issues from"
"Google Code.")
parser.add_argument("--project_name", required=True,
help="The name of the Google Code project you wish to"
"export")
parsed_args, _ = parser.parse_known_args(args)
Generate(parsed_args.issue_file_path, parsed_args.project_name)
if __name__ == "__main__":
main(sys.argv)
| 28.716667 | 78 | 0.702844 |
fb4998788840ae0b088496c0b1aec6536f521b03 | 952 | py | Python | apps/auth/views/wxlogin.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 349 | 2020-08-04T10:21:01.000Z | 2022-03-23T08:31:29.000Z | apps/auth/views/wxlogin.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 2 | 2021-01-07T06:17:05.000Z | 2021-04-01T06:01:30.000Z | apps/auth/views/wxlogin.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 70 | 2020-08-24T06:46:14.000Z | 2022-03-25T13:23:27.000Z | from flask import Blueprint
from apps.auth.business.wxlogin import WxLoginBusiness
from apps.auth.extentions import validation, parse_json_form
from library.api.render import json_detail_render
wxlogin = Blueprint("wxlogin", __name__)
| 25.052632 | 65 | 0.668067 |
fb4ad13207c5ca10ca59d1294d3d67f91a07e8bb | 4,374 | py | Python | servidor/jornada_teorica.py | angeloide78/wShifts | d88a3284c8a3829a7fbda127eb23c4d5392033f3 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2019-12-21T22:07:11.000Z | 2021-09-24T15:08:45.000Z | servidor/jornada_teorica.py | angeloide78/wShifts | d88a3284c8a3829a7fbda127eb23c4d5392033f3 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-03-24T23:10:40.000Z | 2018-03-24T23:10:40.000Z | servidor/jornada_teorica.py | angeloide78/wShifts | d88a3284c8a3829a7fbda127eb23c4d5392033f3 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-01-26T21:47:10.000Z | 2022-01-26T21:47:10.000Z | # -*- coding: utf-8 -*
# ALGG 03-01-2017 Creacin de mdulo jornada_teorica.
| 32.887218 | 79 | 0.486968 |
fb4e27ed9f0165f7474ca1a89bce202114f9a019 | 3,645 | py | Python | app.py | jasoncordis/spotify-flask | 1e7f2955ab9d825a5f32f494b2966f18c460f311 | [
"Apache-2.0"
] | 58 | 2017-04-20T17:25:25.000Z | 2021-02-05T21:41:25.000Z | app.py | jasoncordis/spotify-flask | 1e7f2955ab9d825a5f32f494b2966f18c460f311 | [
"Apache-2.0"
] | 3 | 2018-04-08T22:03:25.000Z | 2020-05-07T06:03:21.000Z | app.py | jasoncordis/spotify-flask | 1e7f2955ab9d825a5f32f494b2966f18c460f311 | [
"Apache-2.0"
] | 17 | 2017-12-03T04:26:48.000Z | 2021-01-26T21:18:27.000Z | '''
This code was based on these repositories,
so special thanks to:
https://github.com/datademofun/spotify-flask
https://github.com/drshrey/spotify-flask-auth-example
'''
from flask import Flask, request, redirect, g, render_template, session
from spotify_requests import spotify
app = Flask(__name__)
app.secret_key = 'some key for session'
# ----------------------- AUTH API PROCEDURE -------------------------
def valid_token(resp):
return resp is not None and not 'error' in resp
# -------------------------- API REQUESTS ----------------------------
def make_search(search_type, name):
if search_type not in ['artist', 'album', 'playlist', 'track']:
return render_template('index.html')
data = spotify.search(search_type, name)
api_url = data[search_type + 's']['href']
items = data[search_type + 's']['items']
return render_template('search.html',
name=name,
results=items,
api_url=api_url,
search_type=search_type)
if __name__ == "__main__":
app.run(debug=True, port=spotify.PORT)
| 27.613636 | 72 | 0.609877 |
fb4e569bf8fd09a1c7d6371a76f1b851a6a2772b | 7,275 | py | Python | ckanext-datagathering/ckanext/datagathering/commands/migrate.py | smallmedia/iod-ckan | dfd85b41286fe86924ec16b0a88efc7292848ceb | [
"Apache-2.0"
] | 4 | 2017-06-12T15:18:30.000Z | 2019-10-11T15:12:43.000Z | ckanext-datagathering/ckanext/datagathering/commands/migrate.py | smallmedia/iod-ckan | dfd85b41286fe86924ec16b0a88efc7292848ceb | [
"Apache-2.0"
] | 64 | 2017-05-14T22:15:53.000Z | 2020-03-08T15:26:49.000Z | ckanext-datagathering/ckanext/datagathering/commands/migrate.py | smallmedia/iod-ckan | dfd85b41286fe86924ec16b0a88efc7292848ceb | [
"Apache-2.0"
] | 2 | 2018-09-08T08:02:25.000Z | 2020-04-24T13:02:06.000Z | from ckan import model
from ckan.lib.cli import CkanCommand
from ckan.lib.munge import munge_title_to_name, substitute_ascii_equivalents
from ckan.logic import get_action
from ckan.lib.helpers import render_markdown
from ckan.plugins import toolkit
import logging
log = logging.getLogger(__name__)
| 39.538043 | 121 | 0.576082 |
fb4fa127bfbce18cd4fdeeaf2d1ebf19b58badc3 | 13,826 | py | Python | run.py | openmg/mg-phm | e3bb05d6352f90ee40fdc4415ad8e1ed5857196f | [
"Apache-2.0"
] | null | null | null | run.py | openmg/mg-phm | e3bb05d6352f90ee40fdc4415ad8e1ed5857196f | [
"Apache-2.0"
] | null | null | null | run.py | openmg/mg-phm | e3bb05d6352f90ee40fdc4415ad8e1ed5857196f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import cv2
from scipy.misc import imsave
import matplotlib.pyplot as plt
import analysis
import imageprocess
import datainterface
import imagemosaicking
town_names = ['', '', '', '', '', \
'', '', '', '']
for index in range(len(town_names)):
town_names[index] = unicode(town_names[index], 'utf8')
geojson_path = 'testdata/'
#12
winter12933 = 'LC81290332016343LGN00_MTL'
winter12833 = 'LC81280332016336LGN00_MTL'
winter12834 = 'LC81280342016336LGN00_MTL'
winter12 = (winter12933, winter12833, winter12834)
#01
winter12933 = 'LC81290332017025LGN00_MTL'
winter12833 = 'LC81280332017002LGN00_MTL'
winter12834 = 'LC81280342017002LGN00_MTL'
winter01 = (winter12933, winter12833, winter12834)
#02
winter12933 = 'LC81290332017089LGN00_MTL'#32
winter12833 = 'LC81280332017034LGN00_MTL'
winter12834 = 'LC81280342017034LGN00_MTL'
winter02 = (winter12933, winter12833, winter12834)
#06
summer12933 = 'LC81290332016151LGN00_MTL'
summer12833 = 'LC81280332016176LGN00_MTL'
summer12834 = 'LC81280342016176LGN00_MTL'
summer06 = (summer12933, summer12833, summer12834)
#07
summer12933 = 'LC81290332016183LGN00_MTL'
summer12833 = 'LC81280332016208LGN00_MTL'
summer12834 = 'LC81280342016208LGN00_MTL'
summer07 = (summer12933, summer12833, summer12834)
#08
summer12933 = 'LC81290332016247LGN00_MTL'
summer12833 = 'LC81280332016240LGN00_MTL'
summer12834 = 'LC81280342016240LGN00_MTL'
summer08 = (summer12933, summer12833, summer12834)
cases = (summer08,)
case_name = ('Aug',)
#cases = (winter12, winter01, winter02, summer06, summer07, summer08)
#case_name = ('Nov','Jan','Feb','Jun','Jul','Aug',)
#cases = (wintercode,)
#case_name = ('winter')
for ii in range(len(cases)):
case = cases[ii]
# image load
imgcode1 = case[0]
imgcode2 = case[1]
imgcode3 = case[2]
path1 = 'testdata/1-12933/'
path2 = 'testdata/2-12833/'
path3 = 'testdata/3-12834/'
corner1 = datainterface.get_corner(imgcode1, path1)
corner2 = datainterface.get_corner(imgcode2, path2)
corner3 = datainterface.get_corner(imgcode3, path3)
img1 = datainterface.get_band(imgcode1, 4, path1)
img2 = datainterface.get_band(imgcode2, 4, path2)
img3 = datainterface.get_band(imgcode3, 4, path3)
bqa1 = datainterface.get_bqa(imgcode1, path1)
bqa2 = datainterface.get_bqa(imgcode2, path2)
bqa3 = datainterface.get_bqa(imgcode3, path3)
file_date1 = datainterface.get_date(imgcode1, path1)
file_date2 = datainterface.get_date(imgcode2, path2)
file_date3 = datainterface.get_date(imgcode3, path3)
# image analysis
ndvi1, vfc1 = analysis.get_plant(imgcode1, path1)
ndvi2, vfc2 = analysis.get_plant(imgcode2, path2)
ndvi3, vfc3 = analysis.get_plant(imgcode3, path3)
print 'complete ndvi calculation...'
Ts1 = analysis.get_temperature(imgcode1, path1)
Ts2 = analysis.get_temperature(imgcode2, path2)
Ts3 = analysis.get_temperature(imgcode3, path3)
print 'complete Ts calculation...'
tvdi1, cover1 = analysis.get_drought(ndvi1, Ts1, bqa1)
tvdi2, cover2 = analysis.get_drought(ndvi2, Ts2, bqa2)
tvdi3, cover3 = analysis.get_drought(ndvi3, Ts3, bqa3)
print 'complete tvdi calculation...'
ndvi1_d = cv2.resize(ndvi1,None,fx=0.1,fy=0.1)
ndvi2_d = cv2.resize(ndvi2,None,fx=0.1,fy=0.1)
ndvi3_d = cv2.resize(ndvi3,None,fx=0.1,fy=0.1)
vfc1_d = cv2.resize(vfc1,None,fx=0.1,fy=0.1)
vfc2_d = cv2.resize(vfc2,None,fx=0.1,fy=0.1)
vfc3_d = cv2.resize(vfc3,None,fx=0.1,fy=0.1)
Ts1_d = cv2.resize(Ts1,None,fx=0.1,fy=0.1)
Ts2_d = cv2.resize(Ts2,None,fx=0.1,fy=0.1)
Ts3_d = cv2.resize(Ts3,None,fx=0.1,fy=0.1)
tvdi1_d = cv2.resize(tvdi1,None,fx=0.1,fy=0.1)
tvdi2_d = cv2.resize(tvdi2,None,fx=0.1,fy=0.1)
tvdi3_d = cv2.resize(tvdi3,None,fx=0.1,fy=0.1)
print 'complete image analyzing...'
save_filename = 'output/' + case_name[ii] + '_' + 'ndvi1' + '.png'
imsave(save_filename, ndvi1)
save_filename = 'output/' + case_name[ii] + '_' + 'vfc1' + '.png'
imsave(save_filename, vfc1)
save_filename = 'output/' + case_name[ii] + '_' + 'ndvi2' + '.png'
imsave(save_filename, ndvi2)
save_filename = 'output/' + case_name[ii] + '_' + 'vfc2' + '.png'
imsave(save_filename, vfc2)
save_filename = 'output/' + case_name[ii] + '_' + 'ndvi3' + '.png'
imsave(save_filename, ndvi3)
save_filename = 'output/' + case_name[ii] + '_' + 'vfc3' + '.png'
imsave(save_filename, vfc3)
save_filename = 'output/' + case_name[ii] + '_' + 'Ts1' + '.png'
imsave(save_filename, Ts1)
save_filename = 'output/' + case_name[ii] + '_' + 'Ts2' + '.png'
imsave(save_filename, Ts2)
save_filename = 'output/' + case_name[ii] + '_' + 'Ts3' + '.png'
imsave(save_filename, Ts3)
save_filename = 'output/' + case_name[ii] + '_' + 'tvdi1' + '.png'
imsave(save_filename, tvdi1)
save_filename = 'output/' + case_name[ii] + '_' + 'tvdi2' + '.png'
imsave(save_filename, tvdi2)
save_filename = 'output/' + case_name[ii] + '_' + 'tvdi3' + '.png'
imsave(save_filename, tvdi3)
save_filename = 'output/d' + case_name[ii] + '_' + 'ndvi1_d' + '.png'
imsave(save_filename, ndvi1_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'vfc1_d' + '.png'
imsave(save_filename, vfc1_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'ndvi2_d' + '.png'
imsave(save_filename, ndvi2_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'vfc2_d' + '.png'
imsave(save_filename, vfc2_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'ndvi3_d' + '.png'
imsave(save_filename, ndvi3_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'vfc3_d' + '.png'
imsave(save_filename, vfc3_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'Ts1_d' + '.png'
imsave(save_filename, Ts1_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'Ts2_d' + '.png'
imsave(save_filename, Ts2_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'Ts3_d' + '.png'
imsave(save_filename, Ts3_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'tvdi1_d' + '.png'
imsave(save_filename, tvdi1_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'tvdi2_d' + '.png'
imsave(save_filename, tvdi2_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'tvdi3_d' + '.png'
imsave(save_filename, tvdi3_d)
# image mosaicking
imgall_origin, corner_origin = imagemosaicking.cut_img_easy(img1, img2, img3, corner1, corner2, corner3)
imgall_ndvi, corner_ndvi = imagemosaicking.cut_img_easy(ndvi1, ndvi2, ndvi3, corner1, corner2, corner3)
imgall_vfc, corner_vfc = imagemosaicking.cut_img_easy(vfc1, vfc2, vfc3, corner1, corner2, corner3)
imgall_Ts, corner_Ts = imagemosaicking.cut_img_easy(Ts1, Ts2, Ts3, corner1, corner2, corner3)
imgall_tvdi, corner_tvdi = imagemosaicking.cut_img_easy(tvdi1, tvdi2, tvdi3, corner1, corner2, corner3)
imgall_tvdi_cover, corner_cover = imagemosaicking.cut_img_easy(cover1, cover2, cover3, corner1, corner2, corner3)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_origin' + '.png'
imsave(save_filename, imgall_origin)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_ndvi' + '.png'
imsave(save_filename, imgall_ndvi)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_vfc' + '.png'
imsave(save_filename, imgall_vfc)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_Ts' + '.png'
imsave(save_filename, imgall_Ts)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_tvdi' + '.png'
imsave(save_filename, imgall_tvdi)
imgall_origin_d = cv2.resize(imgall_origin, None, fx=0.2, fy=0.2)
imgall_ndvi_d = cv2.resize(imgall_ndvi, None, fx=0.2, fy=0.2)
imgall_vfc_d = cv2.resize(imgall_vfc, None, fx=0.2, fy=0.2)
imgall_Ts_d = cv2.resize(imgall_Ts, None, fx=0.2, fy=0.2)
imgall_tvdi_d = cv2.resize(imgall_tvdi, None, fx=0.2, fy=0.2)
imgall_tvdi_cover_d = cv2.resize(imgall_tvdi_cover, None, fx=0.2, fy=0.2)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_origin_d' + '.png'
imsave(save_filename, imgall_origin_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_ndvi_d' + '.png'
imsave(save_filename, imgall_ndvi_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_vfc_d' + '.png'
imsave(save_filename, imgall_vfc_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_Ts_d' + '.png'
imsave(save_filename, imgall_Ts_d)
save_filename = 'output/d' + case_name[ii] + '_' + 'imgall_tvdi_d' + '.png'
imsave(save_filename, imgall_tvdi_d)
print 'complete image mosaicking...'
# image filtering
filter_box = 20
imgall_origin_filtered = imageprocess.mean_filter(imgall_origin_d, filter_box)
imgall_ndvi_filtered = imageprocess.mean_filter(imgall_ndvi_d, filter_box)
imgall_vfc_filtered = imageprocess.mean_filter(imgall_vfc_d, filter_box)
imgall_Ts_filtered = imageprocess.mean_filter(imgall_Ts_d, filter_box)
imgall_tvdi_filtered = imageprocess.mean_filter(imgall_tvdi_d, filter_box)
print 'complete image filtering...'
"""
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_origin_filtered' + '.png'
imsave(save_filename, imgall_origin_filtered)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_ndvi_filtered' + '.png'
imsave(save_filename, imgall_ndvi_filtered)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_vfc_filtered' + '.png'
imsave(save_filename, imgall_vfc_filtered)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_Ts_filtered' + '.png'
imsave(save_filename, imgall_Ts_filtered)
save_filename = 'output/' + case_name[ii] + '_' + 'imgall_tvdi_filtered' + '.png'
imsave(save_filename, imgall_tvdi_filtered)
"""
filter_box = 5
imgall_origin = imageprocess.mean_filter(imgall_origin_d, filter_box)
imgall_ndvi = imageprocess.mean_filter(imgall_ndvi_d, filter_box)
imgall_vfc = imageprocess.mean_filter(imgall_vfc_d, filter_box)
imgall_Ts = imageprocess.mean_filter(imgall_Ts_d, filter_box)
imgall_tvdi = imageprocess.mean_filter(imgall_tvdi_d, filter_box)
print 'complete image filtering...'
# density divide
vfc_3d = analysis.vfc_divide(imgall_vfc, imgall_ndvi)
tvdi_3d = analysis.tvdi_divide(imgall_tvdi, imgall_ndvi, imgall_tvdi_cover_d)
print 'complete density divide...'
save_filename = 'output/' + case_name[ii] + '_' + 'vfc_3d' + '.png'
imsave(save_filename, vfc_3d)
save_filename = 'output/' + case_name[ii] + '_' + 'tvdi_3d' + '.png'
imsave(save_filename, tvdi_3d)
"""
#pn_poly
county_cover = np.zeros_like(imgall_origin)
for town_num in range(len(town_names)):
print town_num + 1
geo_filename = geojson_path + town_names[town_num] + '.geojson'
geodata = datainterface.geojson_read(geo_filename)
town_cover = imageprocess.pn_poly(imgall_origin, corner_origin, geodata)
county_cover += town_cover
town_origin = town_cover * imgall_origin
town_vfc = town_cover * imgall_vfc
town_Ts = town_cover * imgall_Ts
town_tvdi = town_cover * imgall_tvdi
town_vfc_4d = np.zeros((vfc_3d.shape[0], vfc_3d.shape[1], 4))
town_tvdi_4d = np.zeros((tvdi_3d.shape[0], tvdi_3d.shape[1], 4))
for i in range(3):
town_vfc_4d[:, :, i] = vfc_3d[:, :, i] / 255.0
town_tvdi_4d[:, :, i] = tvdi_3d[:, :, i] / 255.0
town_vfc_4d[:,:,3] = town_cover
town_tvdi_4d[:,:,3] = town_cover
var_names = ('town_origin', 'town_vfc', 'town_Ts', 'town_tvdi',\
'town_vfc_4d', 'town_tvdi_4d')
for var_name in var_names:
save_filename = 'output/' + case_name[ii] + town_names[town_num] + var_name + '_' + '.png'
print 'saving images of '+ town_names[town_num] + var_name + '...'
if (var_name != 'town_vfc_4d') and (var_name != 'town_tvdi_4d'):
imsave(save_filename, eval(var_name) * town_cover)
else:
# img_temp = np.zeros((town_cover.shape[0], town_cover.shape[1],4))
# img_temp[:,:,0:3] = eval(var_name)
# img_temp[:,:,3] = town_cover
# imsave(save_filename, img_temp)
imsave(save_filename, eval(var_name))
print 'saving images of county...'
county_origin = county_cover * imgall_origin
county_vfc = county_cover * imgall_vfc
county_Ts = county_cover * imgall_Ts
county_tvdi = county_cover * imgall_tvdi
county_vfc_4d = np.zeros((vfc_3d.shape[0], vfc_3d.shape[1], 4))
county_tvdi_4d = np.zeros((tvdi_3d.shape[0], tvdi_3d.shape[1], 4))
for i in range(3):
county_vfc_4d[:, :, i] = vfc_3d[:, :, i] / 255.0
county_tvdi_4d[:, :, i] = tvdi_3d[:, :, i] / 255.0
county_vfc_4d[:,:,3] = county_cover
county_tvdi_4d[:,:,3] = county_cover
# save county
var_names = ('county_origin', 'county_vfc', 'county_Ts', 'county_tvdi',\
'county_vfc_4d', 'county_tvdi_4d')
for var_name in var_names:
print var_name
save_filename = 'output/' + case_name[ii] + var_name + '_' + '.png'
print 'saving images of ' + var_name +'...'
if (var_name != 'county_vfc_4d') and (var_name != 'county_tvdi_4d'):
imsave(save_filename, eval(var_name) * county_cover)
else:
imsave(save_filename, eval(var_name))
# img_temp = np.zeros((county_cover.shape[0], county_cover.shape[1],4))
# img_temp[:,:,0:3] = eval(var_name)
# img_temp[:,:,3] = county_cover
# imsave(save_filename, img_temp)
"""
| 41.39521 | 117 | 0.667872 |
fb4fc030e59a7a7273510289cf7ba58993b6464b | 2,593 | py | Python | tests/tests_lambda.py | schwin007/Lambda-Metric-Shipper | 8659794cfbf54fe74eaa8bb3f956555d101af604 | [
"Apache-2.0"
] | null | null | null | tests/tests_lambda.py | schwin007/Lambda-Metric-Shipper | 8659794cfbf54fe74eaa8bb3f956555d101af604 | [
"Apache-2.0"
] | 2 | 2019-04-05T21:38:16.000Z | 2019-12-25T07:15:37.000Z | tests/tests_lambda.py | schwin007/Lambda-Metric-Shipper | 8659794cfbf54fe74eaa8bb3f956555d101af604 | [
"Apache-2.0"
] | 6 | 2018-07-29T11:41:35.000Z | 2020-12-02T12:22:52.000Z | import logging
import os
import unittest
from logging.config import fileConfig
from src.lambda_function import validate_configurations as validate
# create logger assuming running from ./run script
fileConfig('tests/logging_config.ini')
logger = logging.getLogger(__name__)
if __name__ == '__main__':
unittest.main() | 35.520548 | 103 | 0.687235 |
fb4fd1b54e5406173715c8f8b6132187b8fbeda2 | 1,954 | py | Python | script/QA_LSTM.py | xjtushilei/Answer_Selection | 4a827f64e5361eab951713c2350632c5278404dd | [
"MIT"
] | 4 | 2017-06-19T01:15:55.000Z | 2020-02-29T03:45:26.000Z | script/QA_LSTM_v2.py | xjtushilei/Answer_Selection | 4a827f64e5361eab951713c2350632c5278404dd | [
"MIT"
] | null | null | null | script/QA_LSTM_v2.py | xjtushilei/Answer_Selection | 4a827f64e5361eab951713c2350632c5278404dd | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow
from keras import Input, optimizers
from keras import backend as K
from keras.engine import Model
from keras import layers
from keras.layers import Bidirectional, LSTM, merge, Reshape, Lambda, Dense, BatchNormalization
K.clear_session()
print("...")
# tendorflow
config = tensorflow.ConfigProto()
config.gpu_options.allow_growth = True
session = tensorflow.Session(config=config)
question_max_len = 40
answer_max_len = 40
embedding_dim = 300
input_question = Input(shape=(question_max_len, embedding_dim))
input_answer = Input(shape=(answer_max_len, embedding_dim))
# lstm
question_lstm = Bidirectional(LSTM(64))
answer_lstm = Bidirectional(LSTM(64))
encoded_question = question_lstm(input_question)
encoded_answer = answer_lstm(input_answer)
cos_distance = merge([encoded_question, encoded_answer], mode='cos', dot_axes=1)
cos_distance = Reshape((1,))(cos_distance)
cos_similarity = Lambda(lambda x: 1 - x)(cos_distance)
predictions = Dense(1, activation='sigmoid')(cos_similarity)
model = Model([input_question, input_answer], [predictions])
sgd = optimizers.SGD(lr=0.1, clipvalue=0.5)
model.compile(optimizer=sgd,
loss='binary_crossentropy',
metrics=['binary_accuracy'])
model.summary()
#
questions = np.load('train' + '_' + 'questions' + '.npy')
answers = np.load('train' + '_' + 'answers' + '.npy')
labels = np.load('train' + '_' + 'labels' + '.npy')
# dev
dev_questions = np.load('dev' + '_' + 'questions' + '.npy')
dev_answers = np.load('dev' + '_' + 'answers' + '.npy')
dev_labels = np.load('dev' + '_' + 'labels' + '.npy')
#
model.fit([questions, answers], [labels],
epochs=2,
batch_size=256,
validation_data=([dev_questions, dev_answers], [dev_labels]))
#
print('')
predict = model.predict([dev_questions, dev_answers], verbose=1, batch_size=256)
print(predict)
np.save('predict.npy', predict)
| 31.516129 | 95 | 0.720061 |
fb515bfaa92002625ae59283942eea3a360391f0 | 467 | py | Python | lib/mysocket.py | vanphuong12a2/pposter | fac6e289985909de059150ca860677dba9ade6c9 | [
"MIT"
] | null | null | null | lib/mysocket.py | vanphuong12a2/pposter | fac6e289985909de059150ca860677dba9ade6c9 | [
"MIT"
] | null | null | null | lib/mysocket.py | vanphuong12a2/pposter | fac6e289985909de059150ca860677dba9ade6c9 | [
"MIT"
] | null | null | null | from flask_socketio import SocketIO
NOTI = 'notification'
| 23.35 | 81 | 0.62955 |
fb51e6590cca3f878c6c2b90af8869c140eb763b | 2,128 | py | Python | server/tests/steps/sql_translator/test_filter.py | davinov/weaverbird | 3f907f080729ba70be8872d6c5ed0fdcec9b8a9a | [
"BSD-3-Clause"
] | 54 | 2019-11-20T15:07:39.000Z | 2022-03-24T22:13:51.000Z | server/tests/steps/sql_translator/test_filter.py | ToucanToco/weaverbird | 7cbd3cc612437a876470cc872efba69526694d62 | [
"BSD-3-Clause"
] | 786 | 2019-10-20T11:48:37.000Z | 2022-03-23T08:58:18.000Z | server/tests/steps/sql_translator/test_filter.py | davinov/weaverbird | 3f907f080729ba70be8872d6c5ed0fdcec9b8a9a | [
"BSD-3-Clause"
] | 10 | 2019-11-21T10:16:16.000Z | 2022-03-21T10:34:06.000Z | import pytest
from weaverbird.backends.sql_translator.metadata import SqlQueryMetadataManager
from weaverbird.backends.sql_translator.steps import translate_filter
from weaverbird.backends.sql_translator.types import SQLQuery
from weaverbird.pipeline.conditions import ComparisonCondition
from weaverbird.pipeline.steps import FilterStep
| 40.150943 | 111 | 0.706767 |
fb5248790acad20b7f6c753089b4b879cf218187 | 4,322 | py | Python | nose2/plugins/loader/testcases.py | leth/nose2 | a8fb776a0533264e0b123fc01237b9d2a039e9d0 | [
"BSD-2-Clause"
] | null | null | null | nose2/plugins/loader/testcases.py | leth/nose2 | a8fb776a0533264e0b123fc01237b9d2a039e9d0 | [
"BSD-2-Clause"
] | null | null | null | nose2/plugins/loader/testcases.py | leth/nose2 | a8fb776a0533264e0b123fc01237b9d2a039e9d0 | [
"BSD-2-Clause"
] | null | null | null | """
Load tests from :class:`unittest.TestCase` subclasses.
This plugin implements :func:`loadTestsFromName` and
:func:`loadTestsFromModule` to load tests from
:class:`unittest.TestCase` subclasses found in modules or named on the
command line.
"""
# Adapted from unittest2/loader.py from the unittest2 plugins branch.
# This module contains some code copied from unittest2/loader.py and other
# code developed in reference to that module and others within unittest2.
# unittest2 is Copyright (c) 2001-2010 Python Software Foundation; All
# Rights Reserved. See: http://docs.python.org/license.html
import logging
import unittest
from nose2 import events, util
__unittest = True
log = logging.getLogger(__name__)
| 37.258621 | 85 | 0.635817 |
fb52ea45a86609e7040cf2f5adb9df43b0bf1496 | 265 | py | Python | todo/main.py | shuayb/simple-todo | 7a6c840d38ada098b5cc3458d652c7db02ffd791 | [
"MIT"
] | null | null | null | todo/main.py | shuayb/simple-todo | 7a6c840d38ada098b5cc3458d652c7db02ffd791 | [
"MIT"
] | null | null | null | todo/main.py | shuayb/simple-todo | 7a6c840d38ada098b5cc3458d652c7db02ffd791 | [
"MIT"
] | null | null | null | from app import app, db
import models
import views
if __name__ == '__main__':
app.run()
# No need to do (debug=True), as in config.py, debug = true is already set.
# app.run(debug=True)
# app.run(debug=True, use_debugger=False, use_reloader=False)
| 26.5 | 79 | 0.683019 |
fb53c7de261609a0deb36f13cdae3c4c1cc92433 | 789 | py | Python | Libraries/DUTs/Community/di_vsphere/pysphere/revertToNamedSnapshot.py | nneul/iTest-assets | 478659d176891e45d81f7fdb27440a86a21965bb | [
"MIT"
] | 10 | 2017-12-28T10:15:56.000Z | 2020-10-19T18:13:58.000Z | Libraries/DUTs/Community/di_vsphere/pysphere/revertToNamedSnapshot.py | nneul/iTest-assets | 478659d176891e45d81f7fdb27440a86a21965bb | [
"MIT"
] | 37 | 2018-03-07T00:48:37.000Z | 2021-03-22T20:03:48.000Z | Libraries/DUTs/Community/di_vsphere/pysphere/revertToNamedSnapshot.py | nneul/iTest-assets | 478659d176891e45d81f7fdb27440a86a21965bb | [
"MIT"
] | 27 | 2018-03-06T19:56:01.000Z | 2022-03-23T04:18:23.000Z | import sys
sys.path.append("./pysphere")
from pysphere import VIServer
from pysphere.resources.vi_exception import VIException, VIApiException, \
FaultTypes
import sys
if len(sys.argv) != 6:
sys.exit("error = please check arguments")
serverName = sys.argv[1]
login = sys.argv[2]
passwd = sys.argv[3]
vm_name = sys.argv[4]
snap_name = sys.argv[5]
server = VIServer()
server.connect(serverName, login, passwd)
myVm = server.get_vm_by_name(vm_name)
try:
revertTask = myVm.revert_to_named_snapshot(snap_name)
server.disconnect()
except (VIException), err:
print "RevertResult = " + err.message
sys.exit(1)
if revertTask is None:
print "RevertResult = success"
else:
print "RevertResult = failure" | 29.222222 | 75 | 0.673004 |
fb56942879beca982f2985123f64367d7b06b779 | 1,431 | py | Python | easy_rec/python/utils/fg_util.py | xia-huang-411303/EasyRec | 7b2050dddc0bfec9e551e2199a36414a3ee82588 | [
"Apache-2.0"
] | 61 | 2021-08-19T06:10:03.000Z | 2021-10-09T06:44:54.000Z | easy_rec/python/utils/fg_util.py | xia-huang-411303/EasyRec | 7b2050dddc0bfec9e551e2199a36414a3ee82588 | [
"Apache-2.0"
] | 41 | 2021-09-08T03:02:42.000Z | 2021-09-29T09:00:57.000Z | easy_rec/python/utils/fg_util.py | xia-huang-411303/EasyRec | 7b2050dddc0bfec9e551e2199a36414a3ee82588 | [
"Apache-2.0"
] | 11 | 2021-08-20T06:19:08.000Z | 2021-10-02T14:55:39.000Z | import json
import logging
import tensorflow as tf
from easy_rec.python.protos.dataset_pb2 import DatasetConfig
from easy_rec.python.protos.feature_config_pb2 import FeatureConfig
from easy_rec.python.utils.config_util import get_compatible_feature_configs
from easy_rec.python.utils.convert_rtp_fg import load_input_field_and_feature_config # NOQA
if tf.__version__ >= '2.0':
tf = tf.compat.v1
| 34.071429 | 92 | 0.811321 |
fb56b6a8fcadb6c716511c7be794553961db8e2e | 529 | py | Python | modules/api/functional_test/live_tests/conftest.py | exoego/vinyldns | aac4c2afe4c599ac8c96ad3a826f3a6dff887104 | [
"Apache-2.0"
] | null | null | null | modules/api/functional_test/live_tests/conftest.py | exoego/vinyldns | aac4c2afe4c599ac8c96ad3a826f3a6dff887104 | [
"Apache-2.0"
] | 1 | 2019-02-06T21:38:12.000Z | 2019-02-06T21:38:12.000Z | modules/api/functional_test/live_tests/conftest.py | exoego/vinyldns | aac4c2afe4c599ac8c96ad3a826f3a6dff887104 | [
"Apache-2.0"
] | null | null | null | import pytest
| 18.241379 | 62 | 0.725898 |
fb57d98140afeca2dc5e728adfc2de4c920c0f82 | 15,642 | py | Python | calculate_best_ball_scores.py | arnmishra/sleeper-best-ball | 926d673eebe3a0f114a60f4749dcc451db792b4d | [
"MIT"
] | null | null | null | calculate_best_ball_scores.py | arnmishra/sleeper-best-ball | 926d673eebe3a0f114a60f4749dcc451db792b4d | [
"MIT"
] | null | null | null | calculate_best_ball_scores.py | arnmishra/sleeper-best-ball | 926d673eebe3a0f114a60f4749dcc451db792b4d | [
"MIT"
] | null | null | null | from enum import Enum
import requests
import argparse
import nflgame
def get_user_id_to_team_name(league_id):
"""
Gets a map of fantasy player user id to their team name
"""
user_id_to_team_name = {}
r = requests.get("https://api.sleeper.app/v1/league/%s/users" % league_id)
user_data = r.json()
for user in user_data:
user_id_to_team_name[user['user_id']] = user['display_name']
return user_id_to_team_name
def get_roster_id_to_owner(user_id_to_team_name, league_id):
"""
Gets a map of the roster id to the fantasy owner team name
"""
roster_id_to_owner = {}
r = requests.get('https://api.sleeper.app/v1/league/%s/rosters' % league_id)
roster_info = r.json()
for roster in roster_info:
name = user_id_to_team_name[roster['owner_id']]
roster_id_to_owner[roster['roster_id']] = name
return roster_id_to_owner
def get_owner_to_roster(player_id_to_custom_id, roster_id_to_owner, league_id, week):
"""
Gets a map of the owner team name to the roster players
Also determines which two teams are in each matchup by getting a map of
matchu pid to the two owners playing the game
"""
owner_to_roster = {}
matchup_id_to_owners = {}
r = requests.get('https://api.sleeper.app/v1/league/%s/matchups/%s' %
(league_id, week))
rosters = r.json()
for roster in rosters:
owner = roster_id_to_owner[roster['roster_id']]
player_ids = roster['players']
custom_ids = [player_id_to_custom_id[player_id] for player_id in player_ids]
owner_to_roster[owner] = custom_ids
matchup_id = roster['matchup_id']
if matchup_id in matchup_id_to_owners:
matchup_id_to_owners[matchup_id].append(owner)
else:
matchup_id_to_owners[matchup_id] = [owner]
return owner_to_roster, matchup_id_to_owners
def get_player_id(first_name, last_name, team):
"""
Returns a custom player ID of first initial + last name + team
i.e. for Tom Brady in New England that is T.Brady-NE
"""
if (team == None):
team = 'None'
return first_name[0] + "." + last_name + "-" + team
def get_custom_id_to_info():
"""
Gets a map of player name/team to position
"""
custom_id_to_info = {}
player_id_to_custom_id = {}
r = requests.get('https://api.sleeper.app/v1/players/nfl')
players = r.json()
for player_id in players:
player = players[player_id]
if player['fantasy_positions']:
position = player['fantasy_positions'][0]
if position in ('RB', 'WR', 'QB', 'TE'):
custom_id = get_player_id(player['first_name'], player['last_name'], player['team'])
if not custom_id:
continue
player_id_to_custom_id[player_id] = custom_id
custom_id_to_info[custom_id] = position
return custom_id_to_info, player_id_to_custom_id
def get_player_to_points(year, week, custom_id_to_info):
"""
Gets a map of player ID to a tuple of the player's points and position
"""
player_id_to_points = {}
games = nflgame.games(int(year), week=int(week))
players = nflgame.combine_game_stats(games)
for player in players:
custom_id = player.name + "-" + player.team
if (custom_id in custom_id_to_info):
points = calculate_player_points(player)
player_id_to_points[custom_id] = (points, custom_id_to_info[custom_id])
print (player_id_to_points)
return player_id_to_points
def get_points(rbs, wrs, qbs, tes, roster_count):
"""
Gets the number of points a set of players makes up given the roster counts
"""
flex = rbs[roster_count['rb']:] + \
wrs[roster_count['wr']:] + \
tes[roster_count['te']:]
flex.sort(reverse=True)
return sum(rbs[:roster_count['rb']]) + \
sum(wrs[:roster_count['wr']]) + \
sum(qbs[:roster_count['qb']]) + \
sum(tes[:roster_count['te']]) + \
sum(flex[:roster_count['flex']])
def get_owner_to_score(owner_to_roster, player_to_points, roster_count):
"""
Gets a map of the owner to their fantasy score
"""
owner_to_score = {}
for owner in owner_to_roster:
rbs = []
wrs = []
qbs = []
tes = []
for player in owner_to_roster[owner]:
if player in player_to_points:
points, position = player_to_points[player]
if position == 'RB':
rbs.append(points)
elif position == 'WR':
wrs.append(points)
elif position == 'QB':
qbs.append(points)
elif position == 'TE':
tes.append(points)
rbs.sort(reverse=True)
wrs.sort(reverse=True)
qbs.sort(reverse=True)
tes.sort(reverse=True)
owner_to_score[owner] = get_points(rbs, wrs, qbs, tes, roster_count)
return owner_to_score
def get_owner_to_weekly_record(matchup_id_to_owners, final_owner_to_score):
"""
Gets a map of the owner to their best ball record
"""
owner_to_record = {}
for matchup_id in matchup_id_to_owners:
owner_1 = matchup_id_to_owners[matchup_id][0]
owner_2 = matchup_id_to_owners[matchup_id][1]
score_1 = final_owner_to_score[owner_1]
score_2 = final_owner_to_score[owner_2]
if score_1 > score_2:
owner_to_record[owner_1] = [1, 0, 0]
owner_to_record[owner_2] = [0, 1, 0]
elif score_1 == score_2:
owner_to_record[owner_1] = [0, 0, 1]
owner_to_record[owner_2] = [0, 0, 1]
else:
owner_to_record[owner_1] = [0, 1, 0]
owner_to_record[owner_2] = [1, 0, 0]
return owner_to_record
if __name__ == "__main__":
"Parses all the arguments into variables"
args = parse_args()
league_id = args['league_id']
year = args['year']
week = args['week']
end_week = args['end_week']
roster_count = {}
roster_count['rb'] = args['num_rb']
roster_count['wr'] = args['num_wr']
roster_count['qb'] = args['num_qb']
roster_count['te'] = args['num_te']
roster_count['flex'] = args['num_flex']
# Gets a map of the user id to the owner team name
user_id_to_team_name = get_user_id_to_team_name(league_id)
# Gets a map of the roster id to the owner team name
roster_id_to_owner = get_roster_id_to_owner(user_id_to_team_name, league_id)
# Gets a map of each player id to their name and position
custom_id_to_info, player_id_to_custom_id = get_custom_id_to_info()
# A map to track the owner name to its best ball score
final_owner_to_score = {}
# A map of each owner to their best ball record
final_owner_to_record = {}
# A map of each owner to their best ball rank
final_owner_to_rank = {}
# A map of each owner to number of top 6 best ball performances
final_owner_to_top_half_or_bottom = {}
num_teams = len(user_id_to_team_name)
if week:
# If we are getting it for an individual week, calculate that data
# Get the number of fantasy points each player scored that week
player_to_points = get_player_to_points(year, week, custom_id_to_info)
# Gets the map of each owner to their players and which two teams are playing each other
owner_to_roster, matchup_id_to_owners = get_owner_to_roster(
player_id_to_custom_id, roster_id_to_owner, league_id, week)
# Gets the best ball score for each owner
final_owner_to_score = get_owner_to_score(owner_to_roster, player_to_points, roster_count)
# Gets the best ball record for each owner
final_owner_to_record = get_owner_to_weekly_record(
matchup_id_to_owners, final_owner_to_score)
# Sorts the teams by score and determines if they are top 6
sorted_by_score = sorted(final_owner_to_score.items(), key=lambda kv: kv[1])
for i in range(len(sorted_by_score)):
owner = sorted_by_score[i][0]
final_owner_to_rank[owner] = [num_teams-i]
if(i >= 6):
final_owner_to_top_half_or_bottom[owner] = 1
else:
# If we are getting it for the whole season, calculate that data for each week
for week in range(1, end_week + 1):
# Get the number of fantasy points each player scored that week
player_to_points = get_player_to_points(year, week, custom_id_to_info)
# Gets the map of each owner to their players and which two teams are playing each other
owner_to_roster, matchup_id_to_owners = get_owner_to_roster(
player_id_to_custom_id, roster_id_to_owner, league_id, week)
# Gets the best ball score for each owner
owner_to_score = get_owner_to_score(owner_to_roster, player_to_points, roster_count)
# Gets the best ball record for each owner
owner_to_record = get_owner_to_weekly_record(
matchup_id_to_owners, owner_to_score)
# Adds the total scores and records for each team
for owner in owner_to_score:
if owner in final_owner_to_score:
final_owner_to_score[owner] += owner_to_score[owner]
records = final_owner_to_record[owner]
new_record = owner_to_record[owner]
final_owner_to_record[owner] = [sum(x) for x in zip(records, new_record)]
else:
final_owner_to_score[owner] = owner_to_score[owner]
final_owner_to_record[owner] = owner_to_record[owner]
# Creates list of tuple of (owner, score) sorted by score
sorted_by_score = sorted(final_owner_to_score.items(), key=lambda kv: kv[1])
# Sorts the teams by score and determines if they are top 6
for i in range(num_teams):
owner = sorted_by_score[i][0]
if owner in final_owner_to_rank:
final_owner_to_rank[owner].append(num_teams-i)
else:
final_owner_to_rank[owner] = [num_teams-i]
if(i >= 6):
if owner in final_owner_to_top_half_or_bottom:
final_owner_to_top_half_or_bottom[owner] += 1
else:
final_owner_to_top_half_or_bottom[owner] = 1
# Prints out all the information sorted as the user wants
for owner in final_owner_to_record:
final_owner_to_record[owner] = ("-").join([str(elem) for elem in final_owner_to_record[owner]])
final_owner_to_rank[owner] = round(float(sum(final_owner_to_rank[owner])) / len(final_owner_to_rank[owner]), 2)
if owner not in final_owner_to_top_half_or_bottom:
final_owner_to_top_half_or_bottom[owner] = 0
if args['sort_by'] == 'record':
sorted_records = final_owner_to_record.items()
sorted_records = sorted(sorted_records, key=lambda tup: int(tup[1].split("-")[0])) # sort by the records
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format('Team', 'Record(W-L-T)', 'Score', 'Top 6 Performances', 'Average Rank'))
for record in sorted_records:
owner = record[0]
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format(owner, record[1], final_owner_to_score[owner], final_owner_to_top_half_or_bottom[owner], final_owner_to_rank[owner]))
elif args['sort_by'] == 'rank':
sorted_rank = final_owner_to_rank.items()
sorted_rank = sorted(sorted_rank, key=lambda tup: tup[1], reverse=True) # sort by the ranks
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format('Team', 'Average Rank', 'Score', 'Record(W-L-T)', 'Top 6 Performances'))
for rank in sorted_rank:
owner = rank[0]
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format(owner, rank[1], final_owner_to_score[owner], final_owner_to_record[owner], final_owner_to_top_half_or_bottom[owner]))
elif args['sort_by'] == 'top6':
sorted_top6 = final_owner_to_top_half_or_bottom.items()
sorted_top6 = sorted(sorted_top6, key=lambda tup: tup[1]) # sort by the top 6 performances
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format('Team', 'Top 6 Performances', 'Score', 'Record(W-L-T)', 'Average Rank'))
for top6 in sorted_top6:
owner = top6[0]
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format(owner, top6[1], final_owner_to_score[owner], final_owner_to_record[owner], final_owner_to_rank[owner]))
elif args['sort_by'] == 'score':
sorted_scores = final_owner_to_score.items()
sorted_scores = sorted(sorted_scores, key=lambda tup: tup[1]) # sort by the scores
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format('Team', 'Score', 'Record(W-L-T)', 'Top 6 Performances', 'Average Rank'))
for score in sorted_scores:
owner = score[0]
print("{0:<20}{1:<20}{2:<20}{3:<20}{4:<20}".format(owner, score[1], final_owner_to_record[owner], final_owner_to_top_half_or_bottom[owner], final_owner_to_rank[owner]))
else:
print("Please enter either 'score', 'record', 'rank', or 'top6' for the sort option. %s isn't recognized" % args['sort_by'])
| 47.256798 | 180 | 0.637642 |
fb593493c97b14b708bc0b8b5a7f5e7166948d28 | 10,489 | py | Python | Kelp/kelp.py | trondkr/particleDistributions | 1f5be088150db92c985c00210951ab62521bf694 | [
"MIT"
] | null | null | null | Kelp/kelp.py | trondkr/particleDistributions | 1f5be088150db92c985c00210951ab62521bf694 | [
"MIT"
] | 1 | 2019-07-11T15:02:32.000Z | 2019-07-11T16:57:03.000Z | Kelp/kelp.py | trondkr/particleDistributions | 1f5be088150db92c985c00210951ab62521bf694 | [
"MIT"
] | 1 | 2019-12-23T06:49:29.000Z | 2019-12-23T06:49:29.000Z | #!/usr/bin/env python
from datetime import datetime, timedelta
import numpy as np
from opendrift.readers import reader_basemap_landmask
from opendrift.readers import reader_ROMS_native
from kelp.kelpClass import PelagicPlanktonDrift
from opendrift.readers import reader_netCDF_CF_generic
import logging
import gdal
import os
from netCDF4 import Dataset, datetime, date2num,num2date
from numpy.random import RandomState
import random
import glob
import matplotlib.pyplot as plt
try:
import ogr
import osr
except Exception as e:
print(e)
raise ValueError('OGR library is needed to read shapefiles.')
#########################
# SETUP FOR KELP PROJECT
#########################
startTime=datetime(2016,4,10,12,0,0)
endTime=datetime(2016,5,26,23,0,0)
startReleaseTime=startTime
endReleaseTime=datetime(2016,4,12,12,0,0)
releaseParticles=4 # Per timestep multiplied by gaussian bell (so maximum is releaseParticles and minimum is close to zero)
lowDepth, highDepth = -7, -2 # in negative meters
verticalBehavior=False
hoursBetweenTimestepInROMSFiles=1
#kinoDirectory='/work/users/trondk/KINO/FORWARD/Run/RESULTS/'+str(startTime.year)+'/'
kinoDirectory='/work/shared/nn9297k/Nordfjord/'
kinoDirectory='/imr/vol1/NorFjords5/Malangen-160m_AUG2015-AUG2016/'
svimDirectory='/work/shared/imr/SVIM/'+str(startTime.year)+'/'
firstkino = int(date2num(startTime,units="days since 1948-01-01 00:00:00",calendar="standard"))
lastkino = int(date2num(endTime,units="days since 1948-01-01 00:00:00",calendar="standard"))
apattern = 'norfjords_160m_his.nc4_%s*'%(startTime.year)
argument="%s%s"%(kinoDirectory,apattern)
pattern_kino = glob.glob(argument)
pattern_kino.sort()
print(pattern_kino)
pattern_svim='ocean_avg_*.nc'
shapefile='/work/shared/nn9297k/Kelp/Shapefile/KelpExPol_utenNASAland.shp'
print("=> Using shapefile %s"%(shapefile))
s = ogr.Open(shapefile)
for layer in s:
polygons=[x+1 for x in range(layer.GetFeatureCount()-1)]
#polygons=[1,2,3,4,7] #N.Trench,Dogger bank C, Dogger bank, German bight, Viking bank
#polygons=[2] #N.Trench,Dogger bank C, Dogger bank, German bight, Viking bank
for polygonIndex in polygons:
feature = layer.GetFeature(polygonIndex-1)
print("Area",feature.GetGeometryRef().GetArea())
geom = feature.GetGeometryRef()
points = geom.GetGeometryCount()
ring = geom.GetGeometryRef(0)
print("jj",polygonIndex, points)
if ring.GetPointCount() > 3:
outputFilename, animationFilename, plotFilename = createOutputFilenames(startTime,endTime,polygonIndex,shapefile,verticalBehavior)
print("Result files will be stored as:\nnetCDF=> %s\nmp4=> %s"%(outputFilename,animationFilename))
createAndRunSimulation(lowDepth,highDepth,endTime,
layer,polygonIndex,shapefile,
outputFilename,animationFilename,plotFilename,releaseParticles,
kinoDirectory,pattern_kino,svimDirectory,pattern_svim,verticalBehavior)
| 38.992565 | 218 | 0.695586 |
fb59a435d0311305f0e15444f804e1c503ccd050 | 6,818 | py | Python | evernotebot/bot/storage.py | AuroraDysis/evernote-telegram-bot | eca7b7c53d2e034e366f1e715211dbe98b4991f7 | [
"MIT"
] | 1 | 2021-03-29T07:31:22.000Z | 2021-03-29T07:31:22.000Z | evernotebot/bot/storage.py | AuroraDysis/evernote-telegram-bot | eca7b7c53d2e034e366f1e715211dbe98b4991f7 | [
"MIT"
] | null | null | null | evernotebot/bot/storage.py | AuroraDysis/evernote-telegram-bot | eca7b7c53d2e034e366f1e715211dbe98b4991f7 | [
"MIT"
] | null | null | null | import json
import sqlite3
import typing
from typing import Optional, Dict
from copy import deepcopy
from contextlib import suppress
from bson.objectid import ObjectId
from pymongo import MongoClient
from pymongo.errors import ConfigurationError
| 37.668508 | 93 | 0.589909 |
fb59b4889f363415f77eaf0d9d1624d307371014 | 13,486 | py | Python | tests/models/programdb/opstress/opstress_integration_test.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 26 | 2019-05-15T02:03:47.000Z | 2022-02-21T07:28:11.000Z | tests/models/programdb/opstress/opstress_integration_test.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 815 | 2019-05-10T12:31:52.000Z | 2022-03-31T12:56:26.000Z | tests/models/programdb/opstress/opstress_integration_test.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 9 | 2019-04-20T23:06:29.000Z | 2022-01-24T21:21:04.000Z | # pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.models.opstress.opstress_integration_test.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Class for testing operating stress integrations."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models import RAMSTKOpStressRecord, RAMSTKOpStressTable
| 39.899408 | 88 | 0.710811 |
fb5a0434cd62419bd4c146c19aafa8c01ca37afb | 1,159 | py | Python | rssant_async/views.py | landlordlycat/rssant | 12d9182154a3ffaa35310f1258de4be4822cf1e6 | [
"BSD-3-Clause"
] | null | null | null | rssant_async/views.py | landlordlycat/rssant | 12d9182154a3ffaa35310f1258de4be4822cf1e6 | [
"BSD-3-Clause"
] | null | null | null | rssant_async/views.py | landlordlycat/rssant | 12d9182154a3ffaa35310f1258de4be4822cf1e6 | [
"BSD-3-Clause"
] | null | null | null | import os
from validr import T
from aiohttp.web import json_response
from aiohttp.web_request import Request
from rssant_common import timezone
from rssant_common.image_token import ImageToken, ImageTokenDecodeError
from rssant_config import CONFIG
from .rest_validr import ValidrRouteTableDef
from .image_proxy import image_proxy
routes = ValidrRouteTableDef()
| 26.340909 | 71 | 0.724763 |
fb5bb09cbb3eed2ec9bb972fd01943b3b7af90ee | 2,785 | py | Python | tests/test_matching.py | grickly-nyu/grickly | 39fbf796ea5918d0183b3aa1b3ae23dcb3d84f22 | [
"MIT"
] | 3 | 2021-02-04T02:53:35.000Z | 2021-07-22T01:09:36.000Z | tests/test_matching.py | grickly-nyu/grickly | 39fbf796ea5918d0183b3aa1b3ae23dcb3d84f22 | [
"MIT"
] | 17 | 2021-02-19T23:25:29.000Z | 2021-05-16T04:18:00.000Z | tests/test_matching.py | grickly-nyu/grickly | 39fbf796ea5918d0183b3aa1b3ae23dcb3d84f22 | [
"MIT"
] | null | null | null | from testing_config import BaseTestConfig
from application.models import User
from application.models import Chatroom
import json
from application.utils import auth
| 29.315789 | 76 | 0.560144 |
fb5d367b92efd326d4327262afe891263095720b | 1,633 | py | Python | examples/pycaffe/layers/aggregation_cross_entropy_layer.py | HannaRiver/all-caffe | eae31715d903c1e3ef7035702d66b23d9cdf45c3 | [
"BSD-2-Clause"
] | null | null | null | examples/pycaffe/layers/aggregation_cross_entropy_layer.py | HannaRiver/all-caffe | eae31715d903c1e3ef7035702d66b23d9cdf45c3 | [
"BSD-2-Clause"
] | null | null | null | examples/pycaffe/layers/aggregation_cross_entropy_layer.py | HannaRiver/all-caffe | eae31715d903c1e3ef7035702d66b23d9cdf45c3 | [
"BSD-2-Clause"
] | null | null | null | import sys
sys.path.insert(0, '/home/hena/caffe-ocr/buildcmake/install/python')
sys.path.insert(0, '/home/hena/tool/protobuf-3.1.0/python')
import caffe
import math
import numpy as np
| 29.160714 | 82 | 0.515615 |
fb5dda247bf82e8dba4c4c4fbaea1e533adc2c8f | 2,530 | py | Python | pyroms/sta_hgrid.py | ChuningWang/pyroms2 | 090a1a6d614088612f586f80b335ddb0dc0077a2 | [
"MIT"
] | null | null | null | pyroms/sta_hgrid.py | ChuningWang/pyroms2 | 090a1a6d614088612f586f80b335ddb0dc0077a2 | [
"MIT"
] | null | null | null | pyroms/sta_hgrid.py | ChuningWang/pyroms2 | 090a1a6d614088612f586f80b335ddb0dc0077a2 | [
"MIT"
] | null | null | null | """
Tools for creating and working with Line (Station) Grids
"""
from typing import Union
import pyproj
import numpy as np
_atype = Union[type(None), np.ndarray]
_ptype = Union[type(None), pyproj.Proj]
| 24.326923 | 75 | 0.527273 |
fb5e69fb9347917ede848ed32aab714b5ee1edac | 3,364 | py | Python | lstchain/visualization/camera.py | misabelber/cta-lstchain | 08fc4dccfe8a05a77fa46fb4ffb6e26f439c0a93 | [
"BSD-3-Clause"
] | null | null | null | lstchain/visualization/camera.py | misabelber/cta-lstchain | 08fc4dccfe8a05a77fa46fb4ffb6e26f439c0a93 | [
"BSD-3-Clause"
] | null | null | null | lstchain/visualization/camera.py | misabelber/cta-lstchain | 08fc4dccfe8a05a77fa46fb4ffb6e26f439c0a93 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from ..reco.disp import disp_vector
import astropy.units as u
import matplotlib.pyplot as plt
from ctapipe.visualization import CameraDisplay
__all__ = [
'overlay_disp_vector',
'overlay_hillas_major_axis',
'overlay_source',
'display_dl1_event',
]
def display_dl1_event(event, camera_geometry, tel_id=1, axes=None, **kwargs):
"""
Display a DL1 event (image and pulse time map) side by side
Parameters
----------
event: ctapipe event
tel_id: int
axes: list of `matplotlib.pyplot.axes` of shape (2,) or None
kwargs: kwargs for `ctapipe.visualization.CameraDisplay`
Returns
-------
axes: `matplotlib.pyplot.axes`
"""
if axes is None:
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
image = event.dl1.tel[tel_id].image
peak_time = event.dl1.tel[tel_id].peak_time
if image is None or peak_time is None:
raise Exception(f"There is no calibrated image or pulse time map for telescope {tel_id}")
d1 = CameraDisplay(camera_geometry, image, ax=axes[0], **kwargs)
d1.add_colorbar(ax=axes[0])
d2 = CameraDisplay(camera_geometry, peak_time, ax=axes[1], **kwargs)
d2.add_colorbar(ax=axes[1])
return axes
def overlay_source(display, source_pos_x, source_pos_y, **kwargs):
"""
Display the source (event) position in the camera
Parameters
----------
display: `ctapipe.visualization.CameraDisplay`
source_pos_x: `astropy.units.Quantity`
source_pos_y: `astropy.units.Quantity`
kwargs: args for `matplotlib.pyplot.scatter`
Returns
-------
`matplotlib.pyplot.axes`
"""
kwargs['marker'] = 'x' if 'marker' not in kwargs else kwargs['marker']
kwargs['color'] = 'red' if 'color' not in kwargs else kwargs['color']
display.axes.scatter(source_pos_x, source_pos_y, **kwargs)
def overlay_disp_vector(display, disp, hillas, **kwargs):
"""
Overlay disp vector on a CameraDisplay
Parameters
----------
display: `ctapipe.visualization.CameraDisplay`
disp: `DispContainer`
hillas: `ctapipe.containers.HillasParametersContainer`
kwargs: args for `matplotlib.pyplot.quiver`
"""
assert np.isfinite([hillas.x.value, hillas.y.value]).all()
if not np.isfinite([disp.dx.value, disp.dy.value]).all():
disp_vector(disp)
display.axes.quiver(hillas.x, hillas.y,
disp.dx, disp.dy,
units='xy', scale=1*u.m,
angles='xy',
**kwargs,
)
display.axes.quiver(hillas.x.value, hillas.y.value, disp.dx.value, disp.dy.value, units='xy', scale=1)
def overlay_hillas_major_axis(display, hillas, **kwargs):
"""
Overlay hillas ellipse major axis on a CameraDisplay.
Parameters
----------
display: `ctapipe.visualization.CameraDisplay`
hillas: `ctapipe.containers.HillaParametersContainer`
kwargs: args for `matplotlib.pyplot.plot`
"""
kwargs['color'] = 'black' if 'color' not in kwargs else kwargs['color']
length = hillas.length * 2
x = -length + 2 * length * np.arange(10) / 10
display.axes.plot(hillas.x + x * np.cos(hillas.psi.to(u.rad).value),
hillas.y + x * np.sin(hillas.psi.to(u.rad).value),
**kwargs,
)
| 29.769912 | 106 | 0.633472 |
fb5ee7c913a1ddd435fb481e4af6d53922603786 | 14,537 | py | Python | QFlow-2.0/QFlow/Process_Data.py | jpzwolak/QFlow-suite | d34d74d8690908137adbce0e71587884758b5ecf | [
"MIT"
] | null | null | null | QFlow-2.0/QFlow/Process_Data.py | jpzwolak/QFlow-suite | d34d74d8690908137adbce0e71587884758b5ecf | [
"MIT"
] | null | null | null | QFlow-2.0/QFlow/Process_Data.py | jpzwolak/QFlow-suite | d34d74d8690908137adbce0e71587884758b5ecf | [
"MIT"
] | 1 | 2022-02-16T22:25:22.000Z | 2022-02-16T22:25:22.000Z | import numpy as np
import random
from scipy.stats import skew as scipy_skew
from skimage.transform import resize as skimage_resize
from QFlow import config
## set of functions for loading and preparing a dataset for training.
def get_num_min_class(labels):
'''
Get the number of the minimum represented class in label vector.
Used for resampling data.
input:
labels: np.ndarray of labels
outputs:
num_samples: int number of samples for minimum class
'''
# use argmax as example's class
argmax_labels = np.argmax(labels, axis=-1)
# max of num_samples is all one label
num_samples = labels.shape[0]
for i in range(labels.shape[-1]):
lab_elems = np.sum(argmax_labels==i)
if lab_elems < num_samples:
num_samples = lab_elems
return num_samples
def resample_data(features, state_labels, labels=None, seed=None):
'''
Resample data to be evenly distributed across classes in labels by cutting
number of examples for each class to be equal to the number of examples
in the least represented class. (classes assumed to be last axis of
labels). Shuffles after resampling.
inputs:
features: ndarray of features to be resampled. Resample along first axis.
state_labels: ndarray of labels to be used for resampling
labels: ndarray of labels to be resampled.
return_state: bool specifying whether to return state labels
seed: Seed of random number generator for shuffling idxs during resample
and for shuffling resampled features and labels.
outputs:
features: list of resampled features
labels: list of resampled labels
'''
rng = np.random.default_rng(seed)
num_samples = get_num_min_class(state_labels)
features_resamp = []; state_labels_resamp = []; labels_resamp = []
for i in range(state_labels.shape[-1]):
s_idxs = state_labels.argmax(axis=-1)==i
# first get full array of single state
features_s_full = features[s_idxs]
state_labels_s_full = state_labels[s_idxs]
if labels is not None:
labels_s_full = labels[s_idxs]
# then get idxs (0-length), shuffle, and slice to num_samples
# shuffle idxs to be sure labels and features are shuffled together
idxs = list(range(features_s_full.shape[0]))
rng.shuffle(idxs)
features_resamp.append(features_s_full[idxs[:num_samples]])
state_labels_resamp.append(state_labels_s_full[idxs[:num_samples]])
if labels is not None:
labels_resamp.append(labels_s_full[idxs[:num_samples]])
features_resamp_arr = np.concatenate(features_resamp, axis=0)
state_labels_resamp_arr = np.concatenate(state_labels_resamp, axis=0)
if labels is not None:
labels_resamp_arr = np.concatenate(labels_resamp, axis=0)
idxs = list(range(features_resamp_arr.shape[0]))
rng.shuffle(idxs)
if labels is not None:
return features_resamp_arr[idxs], labels_resamp_arr[idxs]
elif labels is None:
return features_resamp_arr[idxs], state_labels_resamp_arr[idxs]
def noise_mag_to_class(state_labels, noise_mags,
low_thresholds=None, high_thresholds=None):
'''
Function to convert noise magnitudes to noise classes.
Noise class thresholds are defined here. Thresholds for states
order is: no dot, left dot, central dot, right dot, double dot
Default low thresholds is the linear extrapolation to 100 % accuracy
of an average noisy-trained model vs. noise_mag. Default high
thresholds are from linear extrapolation to 0 % accuracy of an
average noisy trained model vs. noise_mag.
inputs:
state_labels: list of state labels. shape assumed to be
(num_examples, num_states).
noise_mags: list of float noise_mags for state_labels. shape assumed
to be (num_examples, ).
low_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
high_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
'''
# set number of noise classes and states.
# length of thresholds must be equal to num_states.
# no num_quality_classes != 3 are supported.
num_quality_classes = config.NUM_QUALITY_CLASSES
num_states = config.NUM_STATES
# set default thresholds
if high_thresholds is None:
high_thresholds = [1.22, 1.00, 1.21, 0.68, 2.00]
if low_thresholds is None:
low_thresholds = [0.31, 0.32, 0.41, 0.05, 0.47]
low_thresholds = np.array(low_thresholds)
high_thresholds = np.array(high_thresholds)
quality_classes = np.zeros(noise_mags.shape+(num_quality_classes,))
# use fractional labels by taking weighted average after
# applying thresholds
num_states = state_labels.shape[-1]
# get per state classes then sum across last axis later
per_state_classes = np.zeros(
noise_mags.shape + (num_quality_classes,) + (num_states,))
# use boolean indexing to define classes from noise mags/threshold arrays
for i in range(num_states):
per_state_classes[noise_mags <= low_thresholds[i],0, i] = 1
per_state_classes[(noise_mags > low_thresholds[i]) &\
(noise_mags <= high_thresholds[i]), 1, i] = 1
per_state_classes[noise_mags > high_thresholds[i], 2, i] = 1
# multiply each first axis element then sum across last axes
quality_classes = np.einsum('ijk,ik->ij', per_state_classes, state_labels)
return quality_classes
def get_data(f, train_test_split=0.9,
dat_key='sensor', label_key='state',
resample=True, seed=None,
low_thresholds=None, high_thresholds=None):
'''
Reads in the subregion data and converts it to a format useful for training
Note that the data is shuffled after reading in.
inputs:
f: one of:
str path to .npz file containing cropped data
dict of cropped data.
train_test_split: float fraction of data to use for training.
resample: bool specifying whether to resample data to get even state
representation.
seed: int random seed for file shuffling.
label_key: string key for data used for the label. One of:
'data_quality', 'noise_mag_factor', 'state'.
low_threshold: list of noise levels to use for high/moderate signal
to noise ratio threshold.
high_threshold: list of noise levels to use for moderate/low signal
to noise ratio threshold.
outputs:
train_data: np.ndarray of training data.
train_labels: np.ndarray of training labels.
eval_data: np.ndarray of training data.
eval_labels: np.ndarray of training labels.
'''
# treat f as path, or if TypeError treat as dict.
try:
dict_of_dicts = np.load(f, allow_pickle = True)
file_on_disk = True
except TypeError:
dict_of_dicts = f
file_on_disk = False
files = list(dict_of_dicts.keys())
random.Random(seed).shuffle(files)
inp = []
oup_state = []
# if we want a nonstate label load it so we can resample
if label_key!='state':
oup_labels = []
else:
oup_labels = None
train_labels = None
eval_labels = None
# if label is noise class, we need to get noise mag labels first
# then process to turn the mag into a class label
if label_key == 'data_quality':
data_quality = True
label_key = 'noise_mag_factor'
else:
data_quality = False
for file in files:
# for compressed data, file is the key of the dict of dicts
if file_on_disk:
data_dict = dict_of_dicts[file].item()
else:
data_dict = dict_of_dicts[file]
dat = data_dict[dat_key]
# generates a list of arrays
inp.append(dat.reshape(config.SUB_SIZE,config.SUB_SIZE,1))
oup_state.append(data_dict['state']) # generates a list of arrays
if oup_labels is not None:
oup_labels.append(data_dict[label_key])
inp = np.array(inp) # converts the list to np.array
oup_state = np.array(oup_state) # converts the list to np.array
if oup_labels is not None:
oup_labels = np.array(oup_labels)
# split data into train and evaluatoin data/labels
n_samples = inp.shape[0]
print("Total number of samples :", n_samples)
n_train = int(train_test_split * n_samples)
train_data = inp[:n_train]
print("Training data info:", train_data.shape)
train_states = oup_state[:n_train]
if oup_labels is not None:
train_labels = oup_labels[:n_train]
eval_data = inp[n_train:]
print("Evaluation data info:", eval_data.shape)
eval_states = oup_state[n_train:]
if oup_labels is not None:
eval_labels = oup_labels[n_train:]
# convert noise mag to class before resampling/getting noise mags if
# needed because resampling doesnt return state labels
if data_quality:
train_labels = noise_mag_to_class(
train_states, train_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
eval_labels = noise_mag_to_class(
eval_states, eval_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
# resample to make state representation even
if resample:
train_data, train_labels = resample_data(
train_data, train_states, train_labels)
eval_data, eval_labels = resample_data(
eval_data, eval_states, eval_labels)
elif not resample and label_key=='state':
train_labels = train_states
eval_labels = eval_states
# expand dim of labels to make sure that they have proper shape
if oup_labels is not None and len(train_labels.shape)==1:
np.expand_dims(train_labels, 1)
if oup_labels is not None and len(eval_labels.shape)==1:
np.expand_dims(eval_labels, 1)
return train_data, train_labels, eval_data, eval_labels
## preprocess functions
def gradient(x):
'''
Take gradient of an ndarray in specified direction. Thin wrapper around
np.gradient(). Also note that x -> axis=1 and y-> axis=0
input:
x: An numpy ndarray to take the gradient of
output:
numpy ndarray containing gradient in x direction.
'''
return np.gradient(x, axis=1)
def apply_threshold(x, threshold_val=10, threshold_to=0):
'''
Thresholds an numpy ndarray to remove
Args:
x = numpy array with data to be filtered
threshold_val = percentile below which to set values to zero
'''
x[x < np.abs(np.percentile(x.flatten(),threshold_val))] = threshold_to
return x
def apply_clipping(x, clip_val=3, clip_to='clip_val'):
'''
Clip input symmetrically at clip_val number of std devs.
Do not zscore norm x, but apply thresholds using normed x
'''
x_clipped = np.copy(x)
mean = np.mean(x)
std = np.std(x)
norm_x = (x - mean) / std
# set clipped values to either the mean or clip threshold
if clip_to.lower() == 'clip_val':
x_clipped[norm_x < -clip_val] = -clip_val * std + mean
x_clipped[norm_x > clip_val] = clip_val * std + mean
elif clip_to.lower() == 'mean':
x_clipped[norm_x < -clip_val] = mean
x_clipped[norm_x > clip_val] = mean
else:
raise KeyError('"clip_to" option not valid: ' +str(clip_to) +\
'Valid options: clip_val, mean')
return x_clipped
def autoflip_skew(data):
'''
Autoflip a numpy ndarray based on the skew of the values
(effective for gradient data).
'''
skew_sign = np.sign(scipy_skew(np.ravel(data)))
return data*skew_sign
def zscore_norm(x):
'''
Takes a numpy ndarray and returns a z-score normalized version
'''
return (x-x.mean())/x.std()
| 35.982673 | 81 | 0.65323 |
fb5eea86a746925440911830c3d41121077c7f7a | 472 | py | Python | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/star/star_needs_assignment_target_py35.py | ciskoinch8/vimrc | 5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8 | [
"MIT"
] | 463 | 2015-01-15T08:17:42.000Z | 2022-03-28T15:10:20.000Z | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/star/star_needs_assignment_target_py35.py | ciskoinch8/vimrc | 5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8 | [
"MIT"
] | 52 | 2015-01-06T02:43:59.000Z | 2022-03-14T11:15:21.000Z | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/star/star_needs_assignment_target_py35.py | ciskoinch8/vimrc | 5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8 | [
"MIT"
] | 249 | 2015-01-07T22:49:49.000Z | 2022-03-18T02:32:06.000Z | """
Test PEP 0448 -- Additional Unpacking Generalizations
https://www.python.org/dev/peps/pep-0448/
"""
# pylint: disable=superfluous-parens, unnecessary-comprehension
UNPACK_TUPLE = (*range(4), 4)
UNPACK_LIST = [*range(4), 4]
UNPACK_SET = {*range(4), 4}
UNPACK_DICT = {'a': 1, **{'b': '2'}}
UNPACK_DICT2 = {**UNPACK_DICT, "x": 1, "y": 2}
UNPACK_DICT3 = {**{'a': 1}, 'a': 2, **{'a': 3}}
UNPACK_IN_COMP = {elem for elem in (*range(10))} # [star-needs-assignment-target]
| 29.5 | 81 | 0.641949 |
fb5ffc354d2d854524531b1d4f70227336db8f87 | 238 | py | Python | src/ikazuchi/errors.py | t2y/ikazuchi | 7023111e92fa47360c50cfefd1398c554475f2c6 | [
"Apache-2.0"
] | null | null | null | src/ikazuchi/errors.py | t2y/ikazuchi | 7023111e92fa47360c50cfefd1398c554475f2c6 | [
"Apache-2.0"
] | null | null | null | src/ikazuchi/errors.py | t2y/ikazuchi | 7023111e92fa47360c50cfefd1398c554475f2c6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
| 19.833333 | 44 | 0.693277 |
fb607c62040621d2cd1122da4b43413ea79de0be | 4,331 | py | Python | engine/resources.py | gerizim16/MP2_GRP19 | 591fbb47fec6c5471d4e63151f494641452b4cb7 | [
"CC0-1.0"
] | 1 | 2020-09-25T02:46:00.000Z | 2020-09-25T02:46:00.000Z | engine/resources.py | gerizim16/MP2_GRP19 | 591fbb47fec6c5471d4e63151f494641452b4cb7 | [
"CC0-1.0"
] | null | null | null | engine/resources.py | gerizim16/MP2_GRP19 | 591fbb47fec6c5471d4e63151f494641452b4cb7 | [
"CC0-1.0"
] | null | null | null | import pyglet
print('Loading resources')
def center_image(image):
"""Sets an image's anchor point to its center"""
image.anchor_x = image.width / 2
image.anchor_y = image.height / 2
# Tell pyglet where to find the resources
pyglet.resource.path = ['./resources', './resources/backgrounds']
pyglet.resource.reindex()
images = list()
# Load the three main resources and get them to draw centered
tank_body_img = pyglet.resource.image('tank_body.png')
images.append(tank_body_img)
tank_head_img = pyglet.resource.image('tank_head.png')
images.append(tank_head_img)
boxlife_img = pyglet.resource.image('boxlife.png')
images.append(boxlife_img)
boxlife_dead_img = pyglet.resource.image('boxlife_dead.png')
images.append(boxlife_dead_img)
wheel_img = pyglet.resource.image('wheel.png')
images.append(wheel_img)
thread_img = pyglet.resource.image('thread.png')
images.append(thread_img)
motorbike_chassis_img = pyglet.resource.image('motorbike_chassis.png')
images.append(motorbike_chassis_img)
mb_wheel_img = pyglet.resource.image('mb_wheel.png')
images.append(mb_wheel_img)
mb_holder_img = pyglet.resource.image('mb_holder.png')
images.append(mb_holder_img)
vbv_chassis_img = pyglet.resource.image('vbv_chassis.png')
images.append(vbv_chassis_img)
vbv_wheels_img = pyglet.resource.image('vbv_wheels.png')
images.append(vbv_wheels_img)
vbv_platform_img = pyglet.resource.image('vbv_platform.png')
images.append(vbv_platform_img)
vb_net_img = pyglet.resource.image('vb_net.png')
images.append(vb_net_img)
vb_ball_img = pyglet.resource.image('vb_ball.png')
images.append(vb_ball_img)
game1_button_img = pyglet.resource.image('game1.png')
images.append(game1_button_img)
game1_button_hover_img = pyglet.resource.image('game1_hover.png')
images.append(game1_button_hover_img)
game2_button_img = pyglet.resource.image('game2.png')
images.append(game2_button_img)
game2_button_hover_img = pyglet.resource.image('game2_hover.png')
images.append(game2_button_hover_img)
game3_button_img = pyglet.resource.image('game3.png')
images.append(game3_button_img)
game3_button_hover_img = pyglet.resource.image('game3_hover.png')
images.append(game3_button_hover_img)
game1_hs_button_img = pyglet.resource.image('game1_hs.png')
images.append(game1_hs_button_img)
game1_hs_button_hover_img = pyglet.resource.image('game1_hs_hover.png')
images.append(game1_hs_button_hover_img)
game2_hs_button_img = pyglet.resource.image('game2_hs.png')
images.append(game2_hs_button_img)
game2_hs_button_hover_img = pyglet.resource.image('game2_hs_hover.png')
images.append(game2_hs_button_hover_img)
menu_button_img = pyglet.resource.image('menu.png')
images.append(menu_button_img)
gravity_button_img = pyglet.resource.image('gravity.png')
images.append(gravity_button_img)
fullscreen_button_img = pyglet.resource.image('fullscreen.png')
images.append(fullscreen_button_img)
restart_button_img = pyglet.resource.image('restart_button.png')
images.append(restart_button_img)
enter_button_img = pyglet.resource.image('enter_button.png')
images.append(enter_button_img)
enter_button_hover_img = pyglet.resource.image('enter_button_hover.png')
images.append(enter_button_hover_img)
circle_meter_img = pyglet.resource.image('circle_meter.png')
images.append(circle_meter_img)
pointer_img = pyglet.resource.image('pointer.png')
images.append(pointer_img)
finishflag_img = pyglet.resource.image('finishflag.png')
images.append(finishflag_img)
goal_meter_img = pyglet.resource.image('goal_meter.png')
images.append(goal_meter_img)
bg_goal_meter_img = pyglet.resource.image('bg_goal_meter.png')
images.append(bg_goal_meter_img)
background_img = pyglet.resource.image('background.png')
images.append(background_img)
for image in images:
center_image(image)
# load backgrounds
parallax_bgs = list()
layer_counts = (3, 2, 2, 2, 3, 4)
for bg_i, layer_count in enumerate(layer_counts):
bg_set = list()
for layer_i in range(layer_count):
bg_set.append(pyglet.resource.image('{}layer_{}.png'.format(bg_i, layer_i)))
parallax_bgs.append(tuple(bg_set))
parallax_bgs = tuple(parallax_bgs)
# Load sfx without streaming
engine_sfx = pyglet.media.load('./resources/engine_sfx.wav', streaming=False)
bg_music = pyglet.media.load('./resources/bg_music.wav', streaming=False)
print('Resource loading successful')
| 30.286713 | 84 | 0.803048 |
fb61944ce32d6c5a99c9e008904e108e5bfd2d77 | 2,517 | py | Python | search_for_similar_images__perceptual_hash__phash/ui/SelectDirBox.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | search_for_similar_images__perceptual_hash__phash/ui/SelectDirBox.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | search_for_similar_images__perceptual_hash__phash/ui/SelectDirBox.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/gil9red/VideoStreamingWithEncryption/blob/37cf7f501460a286ec44a20db7b2403e8cb05d97/server_GUI_Qt/inner_libs/gui/SelectDirBox.py
import os
from PyQt5.QtWidgets import QWidget, QLineEdit, QLabel, QPushButton, QHBoxLayout, QFileDialog, QStyle
from PyQt5.QtCore import pyqtSignal
if __name__ == '__main__':
from PyQt5.QtWidgets import QApplication
app = QApplication([])
mw = SelectDirBox()
mw.valueChanged.connect(
lambda value: print(f'Selected directory: {value}')
)
mw.show()
app.exec()
| 28.931034 | 156 | 0.684148 |
fb623387f3a45681b01c77927c90b4d6cbbd3ef4 | 3,503 | py | Python | ImageStabilizer.py | arthurscholz/UMN-AFM-Scripts | 86b4d11f9f70f378200899c930d1fa38ad393c66 | [
"MIT"
] | null | null | null | ImageStabilizer.py | arthurscholz/UMN-AFM-Scripts | 86b4d11f9f70f378200899c930d1fa38ad393c66 | [
"MIT"
] | null | null | null | ImageStabilizer.py | arthurscholz/UMN-AFM-Scripts | 86b4d11f9f70f378200899c930d1fa38ad393c66 | [
"MIT"
] | null | null | null | import numpy as np
import picoscript
import cv2
import HeightTracker
import atexit
print "Setting Parameters..."
zDacRange = 0.215 # Sensor specific number
windowSize = 3e-6 # window size in meters
windowBLHCX = 3.5e-6 # window bottom left hand corner X-axis in meters
windowBLHCY = 3.5e-6 # window bottom left hand corner Y-axis in meters
imageBuffer = 0 # buffer for tracking image (0-7)
binary = True
servoRange = picoscript.GetServoTopographyRange()
imageRange = servoRange * zDacRange
MAX_SHORT = 2**15
# Calculates scan offset for new image. Takes an image, roi template and bottom
# left hand corner
if __name__ == "__main__":
atexit.register(picoscript.Disconnect)
heighttrack = HeightTracker.Track()
heighttrack.start()
RunStabilize = True
print "Waiting for current scan to end..."
picoscript.WaitForStatusScanning(False)
print "Starting stabilization..."
while True:
if RunStabilize:
Stabilize()
position = picoscript.GetStatusApproachPosition()
picoscript.ScanStartDown()
picoscript.WaitForStatusScanning(True)
picoscript.WaitForStatusScanning(False)
RunStabilize = position == picoscript.GetStatusApproachPosition()
| 30.198276 | 96 | 0.663717 |
fb6262762a9edf203b455a0bed2e167c184ce590 | 1,947 | py | Python | Twitter Data Extraction.py | scottblender/twitter-covid-19-vaccine-analysis | a4d273b8b885fc33db075dfc910fa39645fa3789 | [
"MIT"
] | null | null | null | Twitter Data Extraction.py | scottblender/twitter-covid-19-vaccine-analysis | a4d273b8b885fc33db075dfc910fa39645fa3789 | [
"MIT"
] | null | null | null | Twitter Data Extraction.py | scottblender/twitter-covid-19-vaccine-analysis | a4d273b8b885fc33db075dfc910fa39645fa3789 | [
"MIT"
] | null | null | null | import snscrape.modules.twitter as sntwitter
import pandas as pd
# Creating list to append tweet data to
tweets_list2 = []
# Using TwitterSearchScraper to scrape data and append tweets to list
for i,tweet in enumerate(sntwitter.TwitterSearchScraper('covid vaccine until:2021-05-24').get_items()):
if i>100000:
break
tweets_list2.append([tweet.date, tweet.id, tweet.content, tweet.user.username, tweet.user.verified, tweet.user.followersCount, tweet.user.friendsCount, tweet.likeCount, tweet.retweetCount, tweet.quoteCount, tweet.user.created, tweet.user.location, tweet.user.displayname, tweet.lang, tweet.coordinates, tweet.place])
# Creating a dataframe from the tweets list above
tweets_df2 = pd.DataFrame(tweets_list2, columns=['Datetime', 'Tweet Id', 'Text', 'Username', 'Verified', 'Followers Count', 'Friends Count', 'Like Count', 'Retweet Count', 'Quote Count', 'Created','Location','Display Name', 'Language', 'Coordinates', 'Place'])
tweets_df2.to_csv('First Extract.csv')
# Creating list to append tweet data to
tweets_list2 = []
# Using TwitterSearchScraper to scrape data and append tweets to list
for i,tweet in enumerate(sntwitter.TwitterSearchScraper('covid vaccine until:2021-05-13').get_items()):
if i>100000:
break
tweets_list2.append([tweet.date, tweet.id, tweet.content, tweet.user.username, tweet.user.verified, tweet.user.followersCount, tweet.user.friendsCount, tweet.likeCount, tweet.retweetCount, tweet.quoteCount, tweet.user.created, tweet.user.location, tweet.user.displayname, tweet.lang, tweet.coordinates, tweet.place])
# Creating a dataframe from the tweets list above
tweets_df3 = pd.DataFrame(tweets_list2, columns=['Datetime', 'Tweet Id', 'Text', 'Username', 'Verified', 'Followers Count', 'Friends Count', 'Like Count', 'Retweet Count', 'Quote Count', 'Created','Location','Display Name', 'Language', 'Coordinates', 'Place'])
tweets_df3.to_csv('Second Extract.csv')
| 69.535714 | 320 | 0.757062 |
fb62f3f5a5769a80a5d13a6f4d1ccd457d5f9675 | 138 | py | Python | hugs/__init__.py | Bogdanp/hugs | e7f16f15369fbe3da11d89882d76c7ef432f3709 | [
"BSD-3-Clause"
] | 22 | 2017-07-20T18:02:27.000Z | 2021-06-10T13:06:22.000Z | hugs/__init__.py | Bogdanp/hugs | e7f16f15369fbe3da11d89882d76c7ef432f3709 | [
"BSD-3-Clause"
] | null | null | null | hugs/__init__.py | Bogdanp/hugs | e7f16f15369fbe3da11d89882d76c7ef432f3709 | [
"BSD-3-Clause"
] | 2 | 2019-12-11T20:44:08.000Z | 2021-02-02T04:37:04.000Z | from .repository import Repository
from .manager import Manager
__all__ = ["Manager", "Repository", "__version__"]
__version__ = "0.2.0"
| 23 | 50 | 0.746377 |
fb62f6a8cb550f9476912173180ad44a3f1fe7d0 | 44,876 | py | Python | Source/Git/wb_git_project.py | barry-scott/git-workbench | 9f352875ab097ce5e45f85bf255b1fa02a196807 | [
"Apache-2.0"
] | 24 | 2017-03-23T06:24:02.000Z | 2022-03-19T13:35:44.000Z | Source/Git/wb_git_project.py | barry-scott/scm-workbench | 5607f12056f8245e0178816603e4922b7f5805ac | [
"Apache-2.0"
] | 14 | 2016-06-21T10:06:27.000Z | 2020-07-25T11:56:23.000Z | Source/Git/wb_git_project.py | barry-scott/git-workbench | 9f352875ab097ce5e45f85bf255b1fa02a196807 | [
"Apache-2.0"
] | 11 | 2016-12-25T12:36:16.000Z | 2022-03-23T14:25:25.000Z | '''
====================================================================
Copyright (c) 2016-2017 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_git_project.py
'''
import sys
import os
import pathlib
import wb_annotate_node
import wb_platform_specific
import wb_git_callback_server
import git
import git.exc
import git.index
GitCommandError = git.exc.GitCommandError
__callback_server = None
git_extra_environ = {}
| 33.741353 | 130 | 0.586661 |
fb64c2c423679d3b9a605145467c5cb4184c77b4 | 443 | py | Python | stackflowCrawl/spiders/stackoverflow/constants/consult.py | matheuslins/stackflowCrawl | b6adacc29bfc2e6210a24968f691a54854952b2e | [
"MIT"
] | null | null | null | stackflowCrawl/spiders/stackoverflow/constants/consult.py | matheuslins/stackflowCrawl | b6adacc29bfc2e6210a24968f691a54854952b2e | [
"MIT"
] | 2 | 2021-03-31T19:47:59.000Z | 2021-12-13T20:41:06.000Z | stackflowCrawl/spiders/stackoverflow/constants/consult.py | matheuslins/stackflowCrawl | b6adacc29bfc2e6210a24968f691a54854952b2e | [
"MIT"
] | null | null | null |
XPAHS_CONSULT = {
'jobs_urls': '//div[contains(@class, "listResults")]//div[contains(@data-jobid, "")]//h2//a/@href',
'results': '//span[@class="description fc-light fs-body1"]//text()',
'pagination_indicator': '//a[contains(@class, "s-pagination--item")][last()]//span[contains(text(), "next")]',
'pagination_url': '//a[contains(@class, "s-pagination--item")][last()]/@href',
}
START_URL = 'https://stackoverflow.com/jobs/'
| 44.3 | 114 | 0.629797 |
fb6699684cb8142168142ff3619e29cd5107fcf6 | 3,676 | py | Python | mainSample.py | snipeso/sample_psychopy | 332cd34cf2c584f9ba01302050964649dd2e5367 | [
"Linux-OpenIB"
] | null | null | null | mainSample.py | snipeso/sample_psychopy | 332cd34cf2c584f9ba01302050964649dd2e5367 | [
"Linux-OpenIB"
] | 3 | 2021-06-02T00:56:48.000Z | 2021-09-08T01:35:53.000Z | mainSample.py | snipeso/sample_psychopy | 332cd34cf2c584f9ba01302050964649dd2e5367 | [
"Linux-OpenIB"
] | null | null | null | import logging
import os
import random
import time
import datetime
import sys
import math
from screen import Screen
from scorer import Scorer
from trigger import Trigger
from psychopy import core, event, sound
from psychopy.hardware import keyboard
from pupil_labs import PupilCore
from datalog import Datalog
from config.configSample import CONF
#########################################################################
######################################
# Initialize screen, logger and inputs
logging.basicConfig(
level=CONF["loggingLevel"],
format='%(asctime)s-%(levelname)s-%(message)s',
) # This is a log for debugging the script, and prints messages to the terminal
# needs to be first, so that if it doesn't succeed, it doesn't freeze everything
eyetracker = PupilCore(ip=CONF["pupillometry"]
["ip"], port=CONF["pupillometry"]["port"], shouldRecord=CONF["recordEyetracking"])
trigger = Trigger(CONF["trigger"]["serial_device"],
CONF["sendTriggers"], CONF["trigger"]["labels"])
screen = Screen(CONF)
datalog = Datalog(OUTPUT_FOLDER=os.path.join(
'output', CONF["participant"] + "_" + CONF["session"],
datetime.datetime.now().strftime("%Y-%m-%d")), CONF=CONF) # This is for saving data
kb = keyboard.Keyboard()
mainClock = core.MonotonicClock() # starts clock for timestamping events
alarm = sound.Sound(os.path.join('sounds', CONF["instructions"]["alarm"]),
stereo=True)
questionnaireReminder = sound.Sound(os.path.join(
'sounds', CONF["instructions"]["questionnaireReminder"]), stereo=True)
scorer = Scorer()
logging.info('Initialization completed')
#########################################################################
def quitExperimentIf(shouldQuit):
"Quit experiment if condition is met"
if shouldQuit:
trigger.send("Quit")
scorer.getScore()
logging.info('quit experiment')
eyetracker.stop_recording()
trigger.reset()
sys.exit(2)
def onFlip(stimName, logName):
"send trigger on flip, set keyboard clock, and save timepoint"
trigger.send(stimName)
kb.clock.reset() # this starts the keyboard clock as soon as stimulus appears
datalog[logName] = mainClock.getTime()
##############
# Introduction
##############
# Display overview of session
screen.show_overview()
core.wait(CONF["timing"]["overview"])
# Optionally, display instructions
if CONF["showInstructions"]:
screen.show_instructions()
key = event.waitKeys()
quitExperimentIf(key[0] == 'q')
eyetracker.start_recording(os.path.join(
CONF["participant"], CONF["session"], CONF["task"]["name"]))
# Blank screen for initial rest
screen.show_blank()
logging.info('Starting blank period')
trigger.send("StartBlank")
core.wait(CONF["timing"]["rest"])
trigger.send("EndBlank")
# Cue start of the experiment
screen.show_cue("START")
trigger.send("Start")
core.wait(CONF["timing"]["cue"])
#################
# Main experiment
#################
# customize
datalog["trialID"] = trigger.sendTriggerId()
eyetracker.send_trigger("Stim", {"id": 1, "condition": "sample"})
datalog["pupilSize"] = eyetracker.getPupildiameter()
# save data to file
datalog.flush()
###########
# Concluion
###########
# End main experiment
screen.show_cue("DONE!")
trigger.send("End")
core.wait(CONF["timing"]["cue"])
# Blank screen for final rest
screen.show_blank()
logging.info('Starting blank period')
trigger.send("StartBlank")
core.wait(CONF["timing"]["rest"])
trigger.send("EndBlank")
logging.info('Finished')
scorer.getScore()
trigger.reset()
eyetracker.stop_recording()
questionnaireReminder.play()
core.wait(2)
| 24.344371 | 105 | 0.658052 |
fb67712141abf405660b20968e896ccaf386184f | 3,696 | py | Python | src/commercetools/services/inventory.py | labd/commercetools-python-sdk | d8ec285f08d56ede2e4cad45c74833f5b609ab5c | [
"MIT"
] | 15 | 2018-11-02T14:35:52.000Z | 2022-03-16T07:51:44.000Z | src/commercetools/services/inventory.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
] | 84 | 2018-11-02T12:50:32.000Z | 2022-03-22T01:25:54.000Z | src/commercetools/services/inventory.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
] | 13 | 2019-01-03T09:16:50.000Z | 2022-02-15T18:37:19.000Z | # DO NOT EDIT! This file is automatically generated
import typing
from commercetools.helpers import RemoveEmptyValuesMixin
from commercetools.platform.models.inventory import (
InventoryEntry,
InventoryEntryDraft,
InventoryEntryUpdate,
InventoryEntryUpdateAction,
InventoryPagedQueryResponse,
)
from commercetools.typing import OptionalListStr
from . import abstract, traits
| 30.545455 | 88 | 0.623106 |
fb685e91f9d3ddb25b69ea95c37b26cc21ab500f | 8,008 | py | Python | qmla/remote_model_learning.py | flynnbr11/QMD | ac8cfe1603658ee9b916452f29b99460ee5e3d44 | [
"MIT"
] | 9 | 2021-01-08T12:49:01.000Z | 2021-12-29T06:59:32.000Z | qmla/remote_model_learning.py | flynnbr11/QMD | ac8cfe1603658ee9b916452f29b99460ee5e3d44 | [
"MIT"
] | 2 | 2021-02-22T20:42:25.000Z | 2021-02-22T22:22:59.000Z | qmla/remote_model_learning.py | flynnbr11/QMD | ac8cfe1603658ee9b916452f29b99460ee5e3d44 | [
"MIT"
] | 9 | 2021-02-15T14:18:48.000Z | 2021-12-17T04:02:07.000Z | from __future__ import print_function # so print doesn't show brackets
import copy
import numpy as np
import time as time
import matplotlib.pyplot as plt
import pickle
import redis
import qmla.model_for_learning
import qmla.redis_settings
import qmla.logging
pickle.HIGHEST_PROTOCOL = 4
plt.switch_backend("agg")
__all__ = ["remote_learn_model_parameters"]
def remote_learn_model_parameters(
name,
model_id,
branch_id,
exploration_rule,
qmla_core_info_dict=None,
remote=False,
host_name="localhost",
port_number=6379,
qid=0,
log_file="rq_output.log",
):
"""
Standalone function to perform Quantum Hamiltonian Learning on individual models.
Used in conjunction with redis databases so this calculation can be
performed without any knowledge of the QMLA instance.
Given model ids and names are used to instantiate
the ModelInstanceForLearning class, which is then used
for learning the models parameters.
QMLA info is unpickled from a redis databse, containing
true operator, params etc.
Once parameters are learned, we pickle the results to dictionaries
held on a redis database which can be accessed by other actors.
:param str name: model name string
:param int model_id: unique model id
:param int branch_id: QMLA branch where the model was generated
:param str exploration_rule: string corresponding to a unique exploration strategy,
used by get_exploration_class to generate a
ExplorationStrategy (or subclass) instance.
:param dict qmla_core_info_dict: crucial data for QMLA, such as number
of experiments/particles etc. Default None: core info is stored on the
redis database so can be retrieved there on a server; if running locally,
can be passed to save pickling.
:param bool remote: whether QMLA is running remotely via RQ workers.
:param str host_name: name of host server on which redis database exists.
:param int port_number: this QMLA instance's unique port number,
on which redis database exists.
:param int qid: QMLA id, unique to a single instance within a run.
Used to identify the redis database corresponding to this instance.
:param str log_file: Path of the log file.
"""
log_print(["Starting QHL for Model {} on branch {}".format(model_id, branch_id)])
time_start = time.time()
num_redis_retries = 5
# Access databases
redis_databases = qmla.redis_settings.get_redis_databases_by_qmla_id(
host_name, port_number, qid
)
qmla_core_info_database = redis_databases["qmla_core_info_database"]
learned_models_info_db = redis_databases["learned_models_info_db"]
learned_models_ids = redis_databases["learned_models_ids"]
active_branches_learning_models = redis_databases["active_branches_learning_models"]
any_job_failed_db = redis_databases["any_job_failed"]
if qmla_core_info_dict is not None:
# for local runs, qmla_core_info_dict passed, with probe_dict included
# in it.
probe_dict = qmla_core_info_dict["probe_dict"]
else:
qmla_core_info_dict = pickle.loads(qmla_core_info_database["qmla_settings"])
probe_dict = pickle.loads(qmla_core_info_database["probes_system"])
true_model_terms_matrices = qmla_core_info_dict["true_oplist"]
qhl_plots = qmla_core_info_dict["qhl_plots"]
plots_directory = qmla_core_info_dict["plots_directory"]
long_id = qmla_core_info_dict["long_id"]
# Generate model instance
qml_instance = qmla.model_for_learning.ModelInstanceForLearning(
model_id=model_id,
model_name=name,
qid=qid,
log_file=log_file,
exploration_rule=exploration_rule,
host_name=host_name,
port_number=port_number,
)
try:
# Learn parameters
update_timer_start = time.time()
qml_instance.update_model()
log_print(
["Time for update alone: {}".format(time.time() - update_timer_start)]
)
# Evaluate learned parameterisation
# qml_instance.compute_likelihood_after_parameter_learning()
except NameError:
log_print(
[
"Model learning failed. QHL failed for model id {}. Setting job failure model_building_utilities.".format(
model_id
)
]
)
any_job_failed_db.set("Status", 1)
raise
except BaseException:
log_print(
[
"Model learning failed. QHL failed for model id {}. Setting job failure model_building_utilities.".format(
model_id
)
]
)
any_job_failed_db.set("Status", 1)
raise
if qhl_plots:
log_print(["Drawing plots for QHL"])
try:
if len(true_model_terms_matrices) == 1: # TODO buggy
qml_instance.plot_distribution_progression(
save_to_file=str(
plots_directory
+ "qhl_distribution_progression_"
+ str(long_id)
+ ".png"
)
)
qml_instance.plot_distribution_progression(
renormalise=False,
save_to_file=str(
plots_directory
+ "qhl_distribution_progression_uniform_"
+ str(long_id)
+ ".png"
),
)
except BaseException:
pass
# Throw away model instance; only need to store results.
updated_model_info = copy.deepcopy(qml_instance.learned_info_dict())
compressed_info = pickle.dumps(updated_model_info, protocol=4)
# Store the (compressed) result set on the redis database.
for k in range(num_redis_retries):
try:
learned_models_info_db.set(str(model_id), compressed_info)
log_print(
[
"learned_models_info_db added to db for model {} after {} attempts".format(
str(model_id), k
)
]
)
break
except Exception as e:
if k == num_redis_retries - 1:
log_print(
["Model learning failed at the storage stage. Error: {}".format(e)]
)
any_job_failed_db.set("Status", 1)
pass
# Update databases to record that this model has finished.
for k in range(num_redis_retries):
try:
active_branches_learning_models.incr(int(branch_id), 1)
learned_models_ids.set(str(model_id), 1)
log_print(
[
"Updated model/branch learned on redis db {}/{}".format(
model_id, branch_id
)
]
)
break
except Exception as e:
if k == num_redis_retries - 1:
log_print(["Model learning failed to update branch info. Error: ", e])
any_job_failed_db.set("Status", 1)
if remote:
del updated_model_info
del compressed_info
del qml_instance
log_print(
[
"Learned model; remote time:",
str(np.round((time.time() - time_start), 2)),
]
)
return None
else:
return updated_model_info
| 35.591111 | 123 | 0.604146 |
fb69066846fdd4ee95649e7481b0ff3dce03d604 | 9,390 | py | Python | pylinex/quantity/CompiledQuantity.py | CU-NESS/pylinex | b6f342595b6a154e129eb303782e5268088f34d5 | [
"Apache-2.0"
] | null | null | null | pylinex/quantity/CompiledQuantity.py | CU-NESS/pylinex | b6f342595b6a154e129eb303782e5268088f34d5 | [
"Apache-2.0"
] | null | null | null | pylinex/quantity/CompiledQuantity.py | CU-NESS/pylinex | b6f342595b6a154e129eb303782e5268088f34d5 | [
"Apache-2.0"
] | null | null | null | """
File: pylinex/quantity/CompiledQuantity.py
Author: Keith Tauscher
Date: 3 Sep 2017
Description: File containing a class representing a list of Quantities to be
evaluated with the same (or overlapping) arguments. When it is
called, each underlying Quantity is called.
"""
from ..util import int_types, sequence_types, Savable, Loadable
from .Quantity import Quantity
from .AttributeQuantity import AttributeQuantity
from .ConstantQuantity import ConstantQuantity
from .FunctionQuantity import FunctionQuantity
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
def __add__(self, other):
"""
Appends other to this CompiledQuantity.
other: CompiledQuantity (or some other Quantity)
returns: if other is another CompiledQuantity, names quantity lists of
both CompiledQuantity
objects are combined
otherwise, other must be a Quantity object. It will be added
to the quantity list of this CompiledQuantity
(whose name won't change)
"""
if isinstance(other, CompiledQuantity):
new_name = '{0!s}+{1!s}'.format(self.name, other.name)
new_quantities = self.quantities + other.quantities
elif isinstance(other, Quantity):
new_name = self.name
new_quantities = [quantity for quantity in self.quantities]
new_quantities.append(other)
else:
raise TypeError("Only Quantity objects can be added to " +\
"compiled quantities.")
return CompiledQuantity(new_name, *new_quantities)
def __call__(self, *args, **kwargs):
"""
Finds the values of all of the Quantity objects underlying this obejct.
args: list of arguments to pass on to the constituent Quantity objects
kwargs: list of keyword arguments to pass on to the constituent
Quantity objects
returns: list containing the values of all of the Quantity objects
underlying this one
"""
return [quantity(*args, **kwargs) for quantity in self.quantities]
def __contains__(self, key):
"""
Checks if a quantity with the given name exists in this
CompiledQuantity.
key: string name of Quantity to check for
returns: True if there exists at least one Quantity named key
"""
return any([(quantity.name == key) for quantity in self.quantities])
def fill_hdf5_group(self, group, exclude=[]):
"""
Fills given hdf5 file group with data about this CompiledQuantity.
group: hdf5 file group to fill with data about this CompiledQuantity
"""
iquantity = 0
group.attrs['name'] = self.name
group.attrs['class'] = 'CompiledQuantity'
for quantity in self.quantities:
excluded = (quantity.name in exclude)
savable = isinstance(quantity, Savable)
if (not excluded) and savable:
subgroup = group.create_group('quantity_{}'.format(iquantity))
if isinstance(quantity, Savable):
quantity.fill_hdf5_group(subgroup)
else:
raise TypeError("This CompiledQuantity cannot be saved " +\
"because it contains Quantity objects which cannot " +\
"be saved.")
iquantity += 1
| 39.620253 | 79 | 0.583174 |
fb6a3d12a6011f130cb0dca995f5e63b890b615a | 184 | py | Python | Desafios/desafio48.py | ArthurBrito1/MY-SCRIPTS-PYTHON | 86967fe293715a705ac50e908d3369fa3257b5a2 | [
"MIT"
] | 1 | 2019-11-21T02:08:58.000Z | 2019-11-21T02:08:58.000Z | Desafios/desafio48.py | ArthurBrito1/MY-SCRIPTS-PYTHON | 86967fe293715a705ac50e908d3369fa3257b5a2 | [
"MIT"
] | null | null | null | Desafios/desafio48.py | ArthurBrito1/MY-SCRIPTS-PYTHON | 86967fe293715a705ac50e908d3369fa3257b5a2 | [
"MIT"
] | null | null | null | s = 0
cont = 0
for c in range(1, 501, 2):
if c % 3 == 0:
s = s + c
cont = cont + 1
print('A soma de todos os {} valores solicitados {}'.format(cont, s))
| 20.444444 | 72 | 0.483696 |
fb6d1002b4582dd7b3bb100c3a8d3d43f66ca13b | 8,588 | py | Python | tests/test_k8s_cronjob.py | riconnon/kubernetes-py | 42a4537876985ed105ee44b6529763ba5d57c179 | [
"Apache-2.0"
] | null | null | null | tests/test_k8s_cronjob.py | riconnon/kubernetes-py | 42a4537876985ed105ee44b6529763ba5d57c179 | [
"Apache-2.0"
] | null | null | null | tests/test_k8s_cronjob.py | riconnon/kubernetes-py | 42a4537876985ed105ee44b6529763ba5d57c179 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import time
import uuid
from kubernetes.K8sCronJob import K8sCronJob
from kubernetes.K8sPod import K8sPod
from kubernetes.models.v2alpha1.CronJob import CronJob
from kubernetes.K8sExceptions import CronJobAlreadyRunningException
from tests import _constants
from tests import _utils
from tests.BaseTest import BaseTest
| 35.053061 | 115 | 0.577317 |
fb6d4b7447432a3d88c9b0ce1e3fc024eb47008f | 9,054 | py | Python | code/nn/optimization.py | serced/rcnn | 1c5949c7ae5652a342b359e9defa72b2a6a6666b | [
"Apache-2.0"
] | 372 | 2016-01-26T02:41:51.000Z | 2022-03-31T02:03:13.000Z | code/nn/optimization.py | serced/rcnn | 1c5949c7ae5652a342b359e9defa72b2a6a6666b | [
"Apache-2.0"
] | 17 | 2016-08-23T17:28:02.000Z | 2020-05-11T15:54:50.000Z | code/nn/optimization.py | serced/rcnn | 1c5949c7ae5652a342b359e9defa72b2a6a6666b | [
"Apache-2.0"
] | 143 | 2016-01-13T05:33:33.000Z | 2021-12-10T16:48:42.000Z | '''
This file implements various optimization methods, including
-- SGD with gradient norm clipping
-- AdaGrad
-- AdaDelta
-- Adam
Transparent to switch between CPU / GPU.
@author: Tao Lei (taolei@csail.mit.edu)
'''
import random
from collections import OrderedDict
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import HostFromGpu
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
from theano.printing import debugprint
from .initialization import default_mrng
def get_similar_subtensor(matrix, indexes, param_op):
'''
So far there is only two possible subtensor operation used.
'''
if isinstance(param_op.owner.op, T.AdvancedSubtensor1):
return matrix[indexes]
else:
# indexes is start index in this case
return matrix[indexes:]
| 37.882845 | 93 | 0.587254 |
fb704422d64cb57af346521f6ec226890742b70a | 883 | py | Python | projectlaika/looseWindow.py | TheSgtPepper23/LaikaIA | fc73aa17f74462b211c4a4159b663ed7c3cdb1bd | [
"MIT"
] | null | null | null | projectlaika/looseWindow.py | TheSgtPepper23/LaikaIA | fc73aa17f74462b211c4a4159b663ed7c3cdb1bd | [
"MIT"
] | null | null | null | projectlaika/looseWindow.py | TheSgtPepper23/LaikaIA | fc73aa17f74462b211c4a4159b663ed7c3cdb1bd | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5 import uic
from internationalization import LANGUAGE | 35.32 | 82 | 0.698754 |
fb705b9a868266542cc3de66cc5408c3859e9bcd | 617 | py | Python | messenger/helper/http/post.py | gellowmellow/python-messenger-bot | 01aaba569add8a6ed1349fc4774e3c7e64439dc0 | [
"MIT"
] | null | null | null | messenger/helper/http/post.py | gellowmellow/python-messenger-bot | 01aaba569add8a6ed1349fc4774e3c7e64439dc0 | [
"MIT"
] | null | null | null | messenger/helper/http/post.py | gellowmellow/python-messenger-bot | 01aaba569add8a6ed1349fc4774e3c7e64439dc0 | [
"MIT"
] | null | null | null | import requests | 30.85 | 87 | 0.593193 |
fb70843b616618f2f4796598ec6f5433ecaca7a0 | 2,424 | py | Python | scripts/Feature_Generation/calculateSREstrengthDifferenceBetweenWTandMUT_perCluster.py | JayKu4418/Computational-Experimental-framework-for-predicting-effects-of-variants-on-alternative-splicing | 4cb33b94bb8a864bc63fd5a3c96dae547914b20f | [
"CC0-1.0"
] | null | null | null | scripts/Feature_Generation/calculateSREstrengthDifferenceBetweenWTandMUT_perCluster.py | JayKu4418/Computational-Experimental-framework-for-predicting-effects-of-variants-on-alternative-splicing | 4cb33b94bb8a864bc63fd5a3c96dae547914b20f | [
"CC0-1.0"
] | null | null | null | scripts/Feature_Generation/calculateSREstrengthDifferenceBetweenWTandMUT_perCluster.py | JayKu4418/Computational-Experimental-framework-for-predicting-effects-of-variants-on-alternative-splicing | 4cb33b94bb8a864bc63fd5a3c96dae547914b20f | [
"CC0-1.0"
] | null | null | null | import argparse
import Functions_Features.functionsToDetermineMotifStrength as fdm
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument("-w","--tmpfolder",type=str,help="Input the upperlevel folder containing folder to Write to")
parser.add_argument("-t","--foldertitle",type=str,help="Input the title of the mutation file")
parser.add_argument("-m","--mutationfile",type=str,help="Input a mutation file")
parser.add_argument("-q","--quantile",nargs='?',default=0.95,type=float,help="Input a quantile value to set a threshold strength score for each motif cluster, default is 0.95")
args = parser.parse_args()
TMPfolder=args.tmpfolder
folderTitle=args.foldertitle
MUTATION_FILE=args.mutationfile
QUANTILE=args.quantile
dict_NumCluster={"ESE":8,"ESS":7,"ISE":7,"ISS":8}
strength_threshold_dict=fdm.createSREclusterThresholdDictionary(TMPfolder,dict_NumCluster,QUANTILE)
with open(MUTATION_FILE) as f:
#with open("../data/MAPT_MUTs_ToTest.tsv") as f:
mutations=[line.strip().split("\t") for line in f]
#mutsToIgnore=["Mut3","Mut10","Mut33"]
to_write = []
# Go through each mutation
for mut in mutations:
mutID=mut[0]
ESE_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ESE",dict_NumCluster["ESE"],strength_threshold_dict)
ESS_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ESS",dict_NumCluster["ESS"],strength_threshold_dict)
ISE_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ISE",dict_NumCluster["ISE"],strength_threshold_dict)
ISS_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ISS",dict_NumCluster["ISS"],strength_threshold_dict)
motifStrengths_forMut = [mutID]+ESE_motifStrengths+ESS_motifStrengths+ISE_motifStrengths+ISS_motifStrengths
to_write.append(motifStrengths_forMut)
with open(TMPfolder+MUTATION_FILE.split("/")[2].split(".")[0]+"_SREstrengthsDifferences_perCluster.tsv","w") as fw:
#with open(TMPfolder+motifType+"_MUTsToTest_ScoreDifferences.tsv","w") as fw:
fw.write("MutID")
for motifType in ["ESE","ESS","ISE","ISS"]:
for cluster in range(1,dict_NumCluster[motifType]+1):
fw.write("\t")
fw.write(motifType+"_Cluster"+str(cluster))
fw.write("\n")
for i in to_write:
fw.write("\t".join(i))
fw.write("\n") | 47.529412 | 176 | 0.757426 |
fb7155177920bff87f0b52005b1ab66f25856784 | 964 | py | Python | asciinema/asciicast.py | alex/asciinema | ff23896174c07719d3b2ace6320a193934a0ac71 | [
"MIT"
] | 1 | 2015-11-08T13:00:51.000Z | 2015-11-08T13:00:51.000Z | asciinema/asciicast.py | alex/asciinema | ff23896174c07719d3b2ace6320a193934a0ac71 | [
"MIT"
] | null | null | null | asciinema/asciicast.py | alex/asciinema | ff23896174c07719d3b2ace6320a193934a0ac71 | [
"MIT"
] | null | null | null | import os
import subprocess
import time
| 26.054054 | 60 | 0.520747 |
fb71ff02d4840f857aab0f05feb1b65683b1dfad | 88 | py | Python | software_engineering-project/project/admin.py | mahdiieh/software_engineering_PROJECT | f0c40ccf0452f6da83fbb253050848b49c4f6153 | [
"MIT"
] | null | null | null | software_engineering-project/project/admin.py | mahdiieh/software_engineering_PROJECT | f0c40ccf0452f6da83fbb253050848b49c4f6153 | [
"MIT"
] | null | null | null | software_engineering-project/project/admin.py | mahdiieh/software_engineering_PROJECT | f0c40ccf0452f6da83fbb253050848b49c4f6153 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Movie
admin.site.register(Movie)
| 14.666667 | 32 | 0.806818 |
fb731e02437da6274e8e54fa035f9eeb59f57f17 | 17,808 | py | Python | datasets/alt/alt.py | NihalHarish/datasets | 67574a8d74796bc065a8b9b49ec02f7b1200c172 | [
"Apache-2.0"
] | 9 | 2021-04-26T14:43:52.000Z | 2021-11-08T09:47:24.000Z | datasets/alt/alt.py | NihalHarish/datasets | 67574a8d74796bc065a8b9b49ec02f7b1200c172 | [
"Apache-2.0"
] | null | null | null | datasets/alt/alt.py | NihalHarish/datasets | 67574a8d74796bc065a8b9b49ec02f7b1200c172 | [
"Apache-2.0"
] | 1 | 2021-03-24T18:33:32.000Z | 2021-03-24T18:33:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Asian Language Treebank (ALT) Project"""
from __future__ import absolute_import, division, print_function
import os
import datasets
_CITATION = """\
@inproceedings{riza2016introduction,
title={Introduction of the asian language treebank},
author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},
booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},
pages={1--6},
year={2016},
organization={IEEE}
}
"""
_HOMEPAGE = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/"
_DESCRIPTION = """\
The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under ASEAN IVO as described in this Web page. The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages. ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).
"""
_URLs = {
"alt": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206.zip",
"alt-en": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/English-ALT-20170107.zip",
"alt-jp": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/Japanese-ALT-20170330.zip",
"alt-my": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-alt-190530.zip",
"alt-my-transliteration": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/my-en-transliteration.zip",
"alt-my-west-transliteration": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/western-myanmar-transliteration.zip",
"alt-km": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/km-nova-181101.zip",
}
_SPLIT = {
"train": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt",
"dev": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt",
"test": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt",
}
_WIKI_URL = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206/URL.txt"
| 41.901176 | 706 | 0.500898 |
fb747216a8e33dd3a7c21862ae471a10d4ad3882 | 246 | py | Python | setup.py | whistlebee/awis-py | 01793c72b369e5e41c4d11b7ba67f71e47cee3ef | [
"Apache-2.0"
] | 1 | 2020-09-04T18:50:32.000Z | 2020-09-04T18:50:32.000Z | setup.py | whistlebee/awis-py | 01793c72b369e5e41c4d11b7ba67f71e47cee3ef | [
"Apache-2.0"
] | 1 | 2020-09-06T05:51:43.000Z | 2020-09-19T09:27:56.000Z | setup.py | whistlebee/awis-py | 01793c72b369e5e41c4d11b7ba67f71e47cee3ef | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='awis-py',
version='0.0.2',
url='https://github.com/whistlebee/awis-py',
packages=find_packages(),
install_requires=['requests', 'lxml'],
python_requires='>=3.6'
)
| 20.5 | 48 | 0.658537 |
fb74a99668bbeadd3a3026fa2344b01e7a173609 | 17,918 | py | Python | src/validation/aux_functions.py | christianhilscher/dynasim | 881cfd3bd9d4b9291d289d703ec7da4a617a479a | [
"MIT"
] | null | null | null | src/validation/aux_functions.py | christianhilscher/dynasim | 881cfd3bd9d4b9291d289d703ec7da4a617a479a | [
"MIT"
] | 2 | 2020-08-06T10:01:59.000Z | 2021-05-17T12:14:44.000Z | src/validation/aux_functions.py | christianhilscher/dynasim | 881cfd3bd9d4b9291d289d703ec7da4a617a479a | [
"MIT"
] | 2 | 2020-08-19T06:52:09.000Z | 2021-12-10T08:57:54.000Z | import sys
from pathlib import Path
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource
from bokeh.io import export_png
from bokeh.plotting import figure
def gini_coefficient(x):
"""Compute Gini coefficient of array of values"""
diffsum = 0
for i, xi in enumerate(x[:-1], 1):
diffsum += np.sum(np.abs(xi - x[i:]))
return diffsum / (len(x)**2 * np.mean(x)) | 34.194656 | 127 | 0.60509 |
fb7583ba835e078f93bcf270c20be6606ba135d8 | 98 | py | Python | test.py | krithikV/vaccineregistration | 5d9aa52c7d8c9b196e23a73525dbaaf1e791e3e2 | [
"Apache-2.0"
] | null | null | null | test.py | krithikV/vaccineregistration | 5d9aa52c7d8c9b196e23a73525dbaaf1e791e3e2 | [
"Apache-2.0"
] | null | null | null | test.py | krithikV/vaccineregistration | 5d9aa52c7d8c9b196e23a73525dbaaf1e791e3e2 | [
"Apache-2.0"
] | null | null | null | from multiprocessing import Process
server = Process(target=app.run)# ...
server.terminate()
| 19.6 | 38 | 0.734694 |
fb7811b122904a7fba10519297aa03213ea6aa2e | 755 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/vboimplementation.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/vboimplementation.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/vboimplementation.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | from OpenGL.arrays import vbo
from OpenGL.GLES2.VERSION import GLES2_2_0
from OpenGL.GLES2.OES import mapbuffer
Implementation.register()
| 35.952381 | 73 | 0.561589 |
fb79110d81706eec2a558890fdef6435d3ebf1bb | 8,457 | py | Python | tests/test_model.py | zeta1999/OpenJij | 0fe03f07af947f519a32ad58fe20423919651634 | [
"Apache-2.0"
] | null | null | null | tests/test_model.py | zeta1999/OpenJij | 0fe03f07af947f519a32ad58fe20423919651634 | [
"Apache-2.0"
] | null | null | null | tests/test_model.py | zeta1999/OpenJij | 0fe03f07af947f519a32ad58fe20423919651634 | [
"Apache-2.0"
] | 1 | 2021-04-09T09:13:56.000Z | 2021-04-09T09:13:56.000Z | import unittest
import numpy as np
import openjij as oj
import cxxjij as cj
if __name__ == '__main__':
unittest.main()
| 37.255507 | 126 | 0.578101 |
fb795f78cbeba316633c5e08d2759b19e5be4e41 | 18,273 | py | Python | src/bgp-acl-agent/bgp-acl-agent.py | jbemmel/srl-bgp-acl | 18d2b625ea24cc1a269513798e0e58e84f3eaca8 | [
"Apache-2.0"
] | 1 | 2022-01-25T16:03:02.000Z | 2022-01-25T16:03:02.000Z | src/bgp-acl-agent/bgp-acl-agent.py | jbemmel/srl-bgp-acl | 18d2b625ea24cc1a269513798e0e58e84f3eaca8 | [
"Apache-2.0"
] | null | null | null | src/bgp-acl-agent/bgp-acl-agent.py | jbemmel/srl-bgp-acl | 18d2b625ea24cc1a269513798e0e58e84f3eaca8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
import grpc
from datetime import datetime
import sys
import logging
import socket
import os
from ipaddress import ip_network, ip_address, IPv4Address
import json
import signal
import traceback
import re
from concurrent.futures import ThreadPoolExecutor
import sdk_service_pb2
import sdk_service_pb2_grpc
import config_service_pb2
# To report state back
import telemetry_service_pb2
import telemetry_service_pb2_grpc
from pygnmi.client import gNMIclient, telemetryParser
from logging.handlers import RotatingFileHandler
############################################################
## Agent will start with this name
############################################################
agent_name='bgp_acl_agent'
acl_sequence_start=1000 # Default ACL sequence number base, can be configured
acl_count=0 # Number of ACL entries created/managed
############################################################
## Open a GRPC channel to connect to sdk_mgr on the dut
## sdk_mgr will be listening on 50053
############################################################
channel = grpc.insecure_channel('unix:///opt/srlinux/var/run/sr_sdk_service_manager:50053')
# channel = grpc.insecure_channel('127.0.0.1:50053')
metadata = [('agent_name', agent_name)]
stub = sdk_service_pb2_grpc.SdkMgrServiceStub(channel)
############################################################
## Subscribe to required event
## This proc handles subscription of: Interface, LLDP,
## Route, Network Instance, Config
############################################################
############################################################
## Subscribe to all the events that Agent needs
############################################################
def Subscribe_Notifications(stream_id):
'''
Agent will receive notifications to what is subscribed here.
'''
if not stream_id:
logging.info("Stream ID not sent.")
return False
# Subscribe to config changes, first
Subscribe(stream_id, 'cfg')
##################################################################
## Proc to process the config Notifications received by auto_config_agent
## At present processing config from js_path = .fib-agent
##################################################################
#
# Checks if this is an IPv4 or IPv6 address, and normalizes host prefixes
#
#
# Because it is possible that ACL entries get saved to 'startup', the agent may
# not have a full map of sequence number to peer_ip. Therefore, we perform a
# lookup based on IP address each time
# Since 'prefix' is not a key, we have to loop through all entries with a prefix
#
##################################################################################################
## This is the main proc where all processing for auto_config_agent starts.
## Agent registration, notification registration, Subscrition to notifications.
## Waits on the subscribed Notifications and once any config is received, handles that config
## If there are critical errors, Unregisters the fib_agent gracefully.
##################################################################################################
############################################################
## Gracefully handle SIGTERM signal
## When called, will unregister Agent and gracefully exit
############################################################
##################################################################################################
## Main from where the Agent starts
## Log file is written to: /var/log/srlinux/stdout/bgp_acl_agent.log
## Signals handled for graceful exit: SIGTERM
##################################################################################################
if __name__ == '__main__':
# hostname = socket.gethostname()
stdout_dir = '/var/log/srlinux/stdout' # PyTEnv.SRL_STDOUT_DIR
signal.signal(signal.SIGTERM, Exit_Gracefully)
if not os.path.exists(stdout_dir):
os.makedirs(stdout_dir, exist_ok=True)
log_filename = f'{stdout_dir}/{agent_name}.log'
logging.basicConfig(
handlers=[RotatingFileHandler(log_filename, maxBytes=3000000,backupCount=5)],
format='%(asctime)s,%(msecs)03d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S', level=logging.INFO)
logging.info("START TIME :: {}".format(datetime.now()))
if Run():
logging.info('Agent unregistered')
else:
logging.info('Should not happen')
| 46.614796 | 150 | 0.591747 |
fb7b00cf08bb11ddac21a6f98e99ab8e31ed948a | 1,003 | py | Python | tests/test_35_cfgrib_.py | shoyer/cfgrib | fe11a1b638b1779e51da87eaa30f1f12b2d0911c | [
"Apache-2.0"
] | null | null | null | tests/test_35_cfgrib_.py | shoyer/cfgrib | fe11a1b638b1779e51da87eaa30f1f12b2d0911c | [
"Apache-2.0"
] | null | null | null | tests/test_35_cfgrib_.py | shoyer/cfgrib | fe11a1b638b1779e51da87eaa30f1f12b2d0911c | [
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import os.path
import pytest
xr = pytest.importorskip('xarray') # noqa
from cfgrib import cfgrib_
SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), 'sample-data')
TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, 'era5-levels-members.grib')
| 31.34375 | 97 | 0.705882 |
fb7b2416a898fcaefa5788048c29968341cc3540 | 4,578 | py | Python | apps/stream_ty_gn_threaded/camera_processor.py | MichelleLau/ncappzoo | d222058e9bf16fbfa2670ff686d11fe521a976e0 | [
"MIT"
] | 1 | 2018-11-23T01:48:59.000Z | 2018-11-23T01:48:59.000Z | apps/stream_ty_gn_threaded/camera_processor.py | MichelleLau/ncappzoo | d222058e9bf16fbfa2670ff686d11fe521a976e0 | [
"MIT"
] | 1 | 2018-03-25T19:36:41.000Z | 2018-03-25T19:53:27.000Z | apps/stream_ty_gn_threaded/camera_processor.py | MichelleLau/ncappzoo | d222058e9bf16fbfa2670ff686d11fe521a976e0 | [
"MIT"
] | 1 | 2020-10-01T15:38:04.000Z | 2020-10-01T15:38:04.000Z | #! /usr/bin/env python3
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
# NPS
# pulls images from camera device and places them in a Queue
# if the queue is full will start to skip camera frames.
#import numpy as np
import cv2
import queue
import threading
import time
| 42 | 118 | 0.676933 |
fb7bbc55fb1c9399f6c93d62c5a66100a843787f | 175 | py | Python | examples/domainby.py | ipfinder/ip-finder-python | 48ba093801d244c12a4583c138d62c94355baf28 | [
"Apache-2.0"
] | 8 | 2019-07-12T22:20:49.000Z | 2022-03-01T09:03:58.000Z | examples/domainby.py | ipfinder/ip-finder-python | 48ba093801d244c12a4583c138d62c94355baf28 | [
"Apache-2.0"
] | 2 | 2019-08-29T23:24:57.000Z | 2021-02-01T15:15:16.000Z | examples/domainby.py | ipfinder/ip-finder-python | 48ba093801d244c12a4583c138d62c94355baf28 | [
"Apache-2.0"
] | 5 | 2019-07-12T23:01:03.000Z | 2021-07-07T11:11:44.000Z | import ipfinder
con = ipfinder.config('f67f788f8a02a188ec84502e0dff066ed4413a85') # YOUR_TOKEN_GOES_HERE
# domain name
by = 'DZ';
dby = con.getDomainBy(by)
print(dby.all)
| 15.909091 | 88 | 0.771429 |
fb7be7756110402e4a2ea628f2c6bc51fd0dd0f4 | 139 | py | Python | manager.py | thangbk2209/pretraining_auto_scaling_ng | 0b98b311c75ec4b87b3e8168f93eeb53ed0d16f5 | [
"MIT"
] | null | null | null | manager.py | thangbk2209/pretraining_auto_scaling_ng | 0b98b311c75ec4b87b3e8168f93eeb53ed0d16f5 | [
"MIT"
] | null | null | null | manager.py | thangbk2209/pretraining_auto_scaling_ng | 0b98b311c75ec4b87b3e8168f93eeb53ed0d16f5 | [
"MIT"
] | null | null | null | """
Author: bkc@data_analysis
Project: autoencoder_ng
Created: 7/29/20 10:51
Purpose: START SCRIPT FOR AUTOENCODER_NG PROJECT
"""
| 19.857143 | 50 | 0.726619 |
fb7dc85f21a97ece3e0b036a3c4e6d6962f9001a | 49 | py | Python | netvisor_api_client/schemas/sales_payments/__init__.py | kiuru/netvisor-api-client | 2af3e4ca400497ace5a86d0a1807ec3b9c530cf4 | [
"MIT"
] | 5 | 2019-04-17T08:10:47.000Z | 2021-11-27T12:26:15.000Z | netvisor_api_client/schemas/sales_payments/__init__.py | kiuru/netvisor-api-client | 2af3e4ca400497ace5a86d0a1807ec3b9c530cf4 | [
"MIT"
] | 7 | 2019-06-25T17:02:50.000Z | 2021-07-21T10:14:38.000Z | netvisor_api_client/schemas/sales_payments/__init__.py | kiuru/netvisor-api-client | 2af3e4ca400497ace5a86d0a1807ec3b9c530cf4 | [
"MIT"
] | 10 | 2019-06-25T15:37:33.000Z | 2021-10-16T19:40:37.000Z | from .list import SalesPaymentListSchema # noqa
| 24.5 | 48 | 0.816327 |
fb7ec52a5b793917a80604774ec9ccdc87a89f1d | 858 | py | Python | derive_cubic.py | vuonglv1612/page-dewarp | 68063db040ba97964a22f68a6056467dacd2952f | [
"MIT"
] | 9 | 2021-05-15T21:18:03.000Z | 2022-03-31T16:56:36.000Z | derive_cubic.py | vuonglv1612/page-dewarp | 68063db040ba97964a22f68a6056467dacd2952f | [
"MIT"
] | 5 | 2021-04-23T17:59:23.000Z | 2021-05-23T17:03:40.000Z | derive_cubic.py | vuonglv1612/page-dewarp | 68063db040ba97964a22f68a6056467dacd2952f | [
"MIT"
] | 3 | 2022-02-22T12:09:49.000Z | 2022-03-16T21:33:49.000Z | import numpy as np
from matplotlib import pyplot as plt
from sympy import symbols, solve
a, b, c, d, x, , = symbols("a b c d x ")
# polynomial function f(x) = ax + bx + cx + d
f = a * x ** 3 + b * x ** 2 + c * x + d
fp = f.diff(x) # derivative f'(x)
# evaluate both at x=0 and x=1
f0, f1 = [f.subs(x, i) for i in range(2)]
fp0, fp1 = [fp.subs(x, i) for i in range(2)]
# we want a, b, c, d such that the following conditions hold:
#
# f(0) = 0
# f(1) = 0
# f'(0) =
# f'(1) =
S = solve([f0, f1, fp0 - , fp1 - ], [a, b, c, d])
# print the analytic solution and plot a graphical example
coeffs = []
num_ = 0.3
num_ = -0.03
for key in [a, b, c, d]:
print(key, "=", S[key])
coeffs.append(S[key].subs(dict(=num_, =num_)))
xvals = np.linspace(0, 1, 101)
yvals = np.polyval(coeffs, xvals)
plt.plot(xvals, yvals)
plt.show()
| 21.45 | 61 | 0.581585 |
fb80aeb666b891b4a2d73d6188ae90784a764de1 | 13,614 | py | Python | test/test_admin.py | image72/browserscope | 44a63558ee376704d996851099bc7703128201cc | [
"Apache-2.0"
] | 22 | 2015-10-26T15:20:37.000Z | 2022-03-11T06:38:17.000Z | test/test_admin.py | image72/browserscope | 44a63558ee376704d996851099bc7703128201cc | [
"Apache-2.0"
] | 10 | 2016-01-22T18:46:19.000Z | 2019-07-19T12:49:51.000Z | test/test_admin.py | mcauer/browserscope | a9c0e1a250774f14689e06f93ad274d0b9d725e4 | [
"Apache-2.0"
] | 12 | 2015-10-17T09:40:44.000Z | 2019-06-08T19:54:36.000Z | #!/usr/bin/python2.5
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test admin_rankers."""
__author__ = 'slamm@google.com (Stephen Lamm)'
import datetime
import logging
import unittest
import mock_data
import settings
from django.test.client import Client
from django.utils import simplejson
from google.appengine.api import memcache
from google.appengine.ext import db
from categories import all_test_sets
from models import result_stats
from models.result import ResultParent
from models.result import ResultTime
from models.user_agent import UserAgent
from third_party import mox
from base import admin
USER_AGENT_STRINGS = {
'Firefox 3.0.6': ('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) '
'Gecko/2009011912 Firefox/3.0.6'),
'Firefox 3.5': ('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) '
'Gecko/2009011912 Firefox/3.5'),
'Firefox 3.0.9': ('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) '
'Gecko/2009011912 Firefox/3.0.9'),
}
| 36.40107 | 80 | 0.659027 |
fb8120f79917456a521cb4d10307f0c3faeada82 | 3,198 | py | Python | ml/rl/models/example_sequence_model.py | ccphillippi/Horizon | a18d8941f663eea55488781c804e6305a36f1b58 | [
"BSD-3-Clause"
] | 1 | 2020-07-30T06:15:20.000Z | 2020-07-30T06:15:20.000Z | ml/rl/models/example_sequence_model.py | ccphillippi/Horizon | a18d8941f663eea55488781c804e6305a36f1b58 | [
"BSD-3-Clause"
] | null | null | null | ml/rl/models/example_sequence_model.py | ccphillippi/Horizon | a18d8941f663eea55488781c804e6305a36f1b58 | [
"BSD-3-Clause"
] | 1 | 2019-06-05T15:52:18.000Z | 2019-06-05T15:52:18.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from dataclasses import dataclass
from typing import Dict, List
import torch
import torch.nn as nn
from ml.rl import types as rlt
from ml.rl.models.base import ModelBase
logger = logging.getLogger(__name__)
HISTORY_LENGTH = 5
class ExampleSequenceModel(ModelBase):
| 29.072727 | 88 | 0.6601 |
fb8171ca82d4da70cd7cdba0a82012d267002dc5 | 4,227 | py | Python | compuG/transformacionesPCarte copia.py | alejoso76/Computaci-n-gr-fica | 474a498a328b8951aa0bfa1db2d0d1f3d8cc914b | [
"MIT"
] | null | null | null | compuG/transformacionesPCarte copia.py | alejoso76/Computaci-n-gr-fica | 474a498a328b8951aa0bfa1db2d0d1f3d8cc914b | [
"MIT"
] | null | null | null | compuG/transformacionesPCarte copia.py | alejoso76/Computaci-n-gr-fica | 474a498a328b8951aa0bfa1db2d0d1f3d8cc914b | [
"MIT"
] | null | null | null | import pygame
import math
#Dibuja triangulo y lo escala con el teclado
ANCHO=600
ALTO=480
def dibujarTriangulo(a, b, c, plano):
'''
pygame.draw.line(plano, [0, 255, 0], [a[0], a[1]], [b[0], b[1]] )
pygame.draw.line(plano, [0, 255, 0], [b[0], b[1]], [c[0], c[1]] )
pygame.draw.line(plano, [0, 255, 0], [c[0], c[1]], [a[0], a[1]] )
'''
pygame.draw.polygon(plano, [0, 255, 0], [a,b,c])
pygame.display.flip()
return a, b, c
if __name__ == '__main__':
pygame.init()
pantalla=pygame.display.set_mode([ANCHO, ALTO]) #Crea la ventana
o=[ANCHO/2, ALTO/2]
dibujarPlano(o, pantalla)
pygame.display.flip()
print 'Funciona'
cont=0
lista=[]
fin=False
while not fin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin=True
if event.type == pygame.MOUSEBUTTONDOWN:
cont+=1
lista.append(mostrarPos())
if event.type == pygame.KEYDOWN:
if event.key==pygame.K_RIGHT:
pantalla.fill([0, 0, 0])
dibujarPlano(o, pantalla)
pygame.display.flip()
puntos=lista
puntos[0]=calcularPosPlano(o, puntos[0])
puntos[1]=calcularPosPlano(o, puntos[1])
puntos[2]=calcularPosPlano(o, puntos[2])
'''
print 'Puntos iniciales:'
print puntos
'''
puntos[0]=rotacionHoraria(puntos[0])
puntos[1]=rotacionHoraria(puntos[1])
puntos[2]=rotacionHoraria(puntos[2])
puntos[0]=calcularPosPantalla(o, puntos[0])
puntos[1]=calcularPosPantalla(o, puntos[1])
puntos[2]=calcularPosPantalla(o, puntos[2])
''''
print 'Puntos finales:'
print puntos
'''
dibujarTriangulo(puntos[0], puntos[1], puntos[2], pantalla)
if event.key==pygame.K_LEFT:
pantalla.fill([0, 0, 0])
dibujarPlano(o, pantalla)
pygame.display.flip()
puntos=lista
puntos[0]=calcularPosPlano(o, puntos[0])
puntos[1]=calcularPosPlano(o, puntos[1])
puntos[2]=calcularPosPlano(o, puntos[2])
'''
print 'Puntos iniciales:'
print puntos
'''
puntos[0]=rotacionAntiHoraria(puntos[0])
puntos[1]=rotacionAntiHoraria(puntos[1])
puntos[2]=rotacionAntiHoraria(puntos[2])
puntos[0]=calcularPosPantalla(o, puntos[0])
puntos[1]=calcularPosPantalla(o, puntos[1])
puntos[2]=calcularPosPantalla(o, puntos[2])
'''
print 'Puntos finales:'
print puntos
'''
dibujarTriangulo(puntos[0], puntos[1], puntos[2], pantalla)
if cont==3:
dibujarTriangulo(lista[0], lista[1], lista[2], pantalla)
cont=0
#lista=[]
| 32.767442 | 79 | 0.492548 |
fb81ef1eb1f00a9abc20cdf31b290c4e33722c10 | 10,692 | py | Python | eval.py | CLT29/pvse | bf5232148396ee5051564ef68a48538de0ddbc84 | [
"MIT"
] | 119 | 2019-06-18T19:10:04.000Z | 2022-03-25T02:24:26.000Z | eval.py | CLT29/pvse | bf5232148396ee5051564ef68a48538de0ddbc84 | [
"MIT"
] | 18 | 2019-08-28T09:32:24.000Z | 2021-09-08T15:25:01.000Z | eval.py | CLT29/pvse | bf5232148396ee5051564ef68a48538de0ddbc84 | [
"MIT"
] | 19 | 2019-07-11T08:19:18.000Z | 2022-02-07T12:59:05.000Z | from __future__ import print_function
import os, sys
import pickle
import time
import glob
import numpy as np
import torch
from model import PVSE
from loss import cosine_sim, order_sim
from vocab import Vocabulary
from data import get_test_loader
from logger import AverageMeter
from option import parser, verify_input_args
ORDER_BATCH_SIZE = 100
def encode_data(model, data_loader, use_gpu=False):
"""Encode all images and sentences loadable by data_loader"""
# switch to evaluate mode
model.eval()
use_mil = model.module.mil if hasattr(model, 'module') else model.mil
# numpy array to keep all the embeddings
img_embs, txt_embs = None, None
for i, data in enumerate(data_loader):
img, txt, txt_len, ids = data
if torch.cuda.is_available():
img, txt, txt_len = img.cuda(), txt.cuda(), txt_len.cuda()
# compute the embeddings
img_emb, txt_emb, _, _, _, _ = model.forward(img, txt, txt_len)
del img, txt, txt_len
# initialize the output embeddings
if img_embs is None:
if use_gpu:
emb_sz = [len(data_loader.dataset), img_emb.size(1), img_emb.size(2)] \
if use_mil else [len(data_loader.dataset), img_emb.size(1)]
img_embs = torch.zeros(emb_sz, dtype=img_emb.dtype, requires_grad=False).cuda()
txt_embs = torch.zeros(emb_sz, dtype=txt_emb.dtype, requires_grad=False).cuda()
else:
emb_sz = (len(data_loader.dataset), img_emb.size(1), img_emb.size(2)) \
if use_mil else (len(data_loader.dataset), img_emb.size(1))
img_embs = np.zeros(emb_sz)
txt_embs = np.zeros(emb_sz)
# preserve the embeddings by copying from gpu and converting to numpy
img_embs[ids] = img_emb if use_gpu else img_emb.data.cpu().numpy().copy()
txt_embs[ids] = txt_emb if use_gpu else txt_emb.data.cpu().numpy().copy()
return img_embs, txt_embs
def i2t(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Images->Text (Image Annotation)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
index_list = []
ranks, top1 = np.zeros(npts), np.zeros(npts)
for index in range(npts):
# Get query image
im = images[nreps * index]
im = im.reshape((1,) + im.shape)
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = im.mm(sentences.t()).view(-1)
else:
_, K, D = im.shape
sim_kk = im.view(-1, D).mm(sentences.view(-1, D).t())
sim_kk = sim_kk.view(im.size(0), K, sentences.size(0), K)
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(im.size(0), -1, sentences.size(0))
sim, _ = sim_kk.max(dim=1)
sim = sim.flatten()
else:
if order:
if index % ORDER_BATCH_SIZE == 0:
mx = min(images.shape[0], nreps * (index + ORDER_BATCH_SIZE))
im2 = images[nreps * index:mx:nreps]
sim_batch = order_sim(torch.Tensor(im2).cuda(), torch.Tensor(sentences).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[index % ORDER_BATCH_SIZE]
else:
sim = np.tensordot(im, sentences, axes=[2, 2]).max(axis=(0,1,3)).flatten() \
if len(sentences.shape) == 3 else np.dot(im, sentences.T).flatten()
if use_gpu:
_, inds_gpu = sim.sort()
inds = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds = np.argsort(sim)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(nreps * index, nreps * (index + 1), 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def t2i(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Text->Images (Image Search)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
if use_gpu:
ims = torch.stack([images[i] for i in range(0, len(images), nreps)])
else:
ims = np.array([images[i] for i in range(0, len(images), nreps)])
ranks, top1 = np.zeros(nreps * npts), np.zeros(nreps * npts)
for index in range(npts):
# Get query sentences
queries = sentences[nreps * index:nreps * (index + 1)]
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = queries.mm(ims.t())
else:
sim_kk = queries.view(-1, queries.size(-1)).mm(ims.view(-1, ims.size(-1)).t())
sim_kk = sim_kk.view(queries.size(0), queries.size(1), ims.size(0), ims.size(1))
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(queries.size(0), -1, ims.size(0))
sim, _ = sim_kk.max(dim=1)
else:
if order:
if nreps * index % ORDER_BATCH_SIZE == 0:
mx = min(sentences.shape[0], nreps * index + ORDER_BATCH_SIZE)
sentences_batch = sentences[nreps * index:mx]
sim_batch = order_sim(torch.Tensor(images).cuda(),
torch.Tensor(sentences_batch).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[:, (nreps * index) % ORDER_BATCH_SIZE:(nreps * index) % ORDER_BATCH_SIZE + nreps].T
else:
sim = np.tensordot(queries, ims, axes=[2, 2]).max(axis=(1,3)) \
if len(sentences.shape) == 3 else np.dot(queries, ims.T)
inds = np.zeros(sim.shape)
for i in range(len(inds)):
if use_gpu:
_, inds_gpu = sim[i].sort()
inds[i] = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds[i] = np.argsort(sim[i])[::-1]
ranks[nreps * index + i] = np.where(inds[i] == index)[0][0]
top1[nreps * index + i] = inds[i][0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
if __name__ == '__main__':
multi_gpu = torch.cuda.device_count() > 1
args = verify_input_args(parser.parse_args())
opt = verify_input_args(parser.parse_args())
# load vocabulary used by the model
with open('./vocab/%s_vocab.pkl' % args.data_name, 'rb') as f:
vocab = pickle.load(f)
args.vocab_size = len(vocab)
# load model and options
assert os.path.isfile(args.ckpt)
model = PVSE(vocab.word2idx, args)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda() if multi_gpu else model
torch.backends.cudnn.benchmark = True
model.load_state_dict(torch.load(args.ckpt))
# evaluate
metrics = evalrank(model, args, split='test')
| 36.742268 | 107 | 0.607931 |
fb82fbf2f9c2c62b4641f184634a4204b573ebe0 | 13,286 | py | Python | paddlex/ppdet/modeling/heads/detr_head.py | xiaolao/PaddleX | 4fa58cd0e485418ba353a87414052bd8a19204e5 | [
"Apache-2.0"
] | 3,655 | 2020-03-28T09:19:50.000Z | 2022-03-31T13:28:39.000Z | paddlex/ppdet/modeling/heads/detr_head.py | BDGZhengzhou/PaddleX | a07b54c6eaa363cb8008b26cba83eed734c52044 | [
"Apache-2.0"
] | 829 | 2020-03-28T04:03:18.000Z | 2022-03-31T14:34:30.000Z | paddlex/ppdet/modeling/heads/detr_head.py | BDGZhengzhou/PaddleX | a07b54c6eaa363cb8008b26cba83eed734c52044 | [
"Apache-2.0"
] | 738 | 2020-03-28T03:56:46.000Z | 2022-03-31T13:11:03.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlex.ppdet.core.workspace import register
import pycocotools.mask as mask_util
from ..initializer import linear_init_, constant_
from ..transformers.utils import inverse_sigmoid
__all__ = ['DETRHead', 'DeformableDETRHead']
| 37.215686 | 110 | 0.553967 |
fb83c2db2b3565baeaebf20f605f38a7b225d465 | 3,911 | py | Python | python/raspberryPi.py | FirewallRobotics/Vinnie-2019 | 2bc74f9947d41960ffe06e39bfc8dbe321ef2ae1 | [
"BSD-3-Clause"
] | null | null | null | python/raspberryPi.py | FirewallRobotics/Vinnie-2019 | 2bc74f9947d41960ffe06e39bfc8dbe321ef2ae1 | [
"BSD-3-Clause"
] | null | null | null | python/raspberryPi.py | FirewallRobotics/Vinnie-2019 | 2bc74f9947d41960ffe06e39bfc8dbe321ef2ae1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import json
import time
import sys
#import numpy as np
import cv2
from cscore import CameraServer, VideoSource, CvSource, VideoMode, CvSink, UsbCamera
from networktables import NetworkTablesInstance
cap1 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(1)
#HatchPanel = HatchPanelPipeline()
team = None
ntinst = NetworkTablesInstance.getDefault()
ntinst.startClientTeam(team)
SmartDashBoardValues = ntinst.getTable('SmartDashboard')
while(True):
# Capture frame-by-frame
if SmartDashBoardValues.getNumber("Camera to Use", 0):
ret, frame = cap1.read() #use camera 0
SmartDashBoardValues.putNumber("Using Camera", 0)
elif SmartDashBoardValues.getNumber("Camera to Use", 1):
ret, frame = cap2.read() #use camera 1
SmartDashBoardValues.putNumber("Using Camera", 1)
else:
print("No camera selected using camera 0")
ret, frame = cap1.read() #found no value for camera to use, using cam 0
SmartDashBoardValues.putNumber("Using Camera", 2)
# Our operations on the frame come here
Track(frame, SmartDashBoardValues)
cv2.imshow('frame',frame)
#print(type(mask))
#res = cv2.bitwise_and(frame,frame, mask=mask)
#cv2.imshow('frame',frame)
#cv2.imshow('mask',mask)
#cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap1.release()
cap2.release()
cv2.destroyAllWindows() | 36.212963 | 112 | 0.604449 |
fb8406595f2e94d2539b01b5e80bd41db3092f8b | 1,667 | py | Python | soccer_stats_calc/soccer_var_ana.py | steffens21/diesunddas | 35222ff2ddac0b115dfd2e5b6764c6878af8d228 | [
"MIT"
] | null | null | null | soccer_stats_calc/soccer_var_ana.py | steffens21/diesunddas | 35222ff2ddac0b115dfd2e5b6764c6878af8d228 | [
"MIT"
] | null | null | null | soccer_stats_calc/soccer_var_ana.py | steffens21/diesunddas | 35222ff2ddac0b115dfd2e5b6764c6878af8d228 | [
"MIT"
] | null | null | null | import sys
from collections import deque
import soccer_toolbox
import csv_tools
if __name__ == "__main__":
main()
| 29.245614 | 96 | 0.619076 |
fb8546895b2c19d3192fb1d824c0ca8782071aeb | 895 | py | Python | HelloWorldWebsite/searchTest/views.py | 404NotFound-401/DjangoTutorial | 8218b5308245b309c7cb36386306152378602b6d | [
"MIT"
] | null | null | null | HelloWorldWebsite/searchTest/views.py | 404NotFound-401/DjangoTutorial | 8218b5308245b309c7cb36386306152378602b6d | [
"MIT"
] | 10 | 2019-09-07T20:30:34.000Z | 2019-09-08T19:22:11.000Z | HelloWorldWebsite/searchTest/views.py | 404NotFound-401/DjangoTutorial | 8218b5308245b309c7cb36386306152378602b6d | [
"MIT"
] | 1 | 2019-09-08T19:38:54.000Z | 2019-09-08T19:38:54.000Z | from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.template import loader
from django.http import HttpResponse
from django.views import generic
from .models import Movie
from . import searchapi
from django.urls import reverse
# Create your views here.
| 28.870968 | 66 | 0.684916 |
fb8662bca964b254e5cebea057da8c25555e063b | 27,693 | py | Python | mapreduce/handlers.py | igeeker/v2ex | 9fa81f7c82aa7d162a924d357494b241eb8a6207 | [
"BSD-3-Clause"
] | 161 | 2019-07-23T06:53:45.000Z | 2022-03-24T01:07:19.000Z | mapreduce/handlers.py | igeeker/v2ex | 9fa81f7c82aa7d162a924d357494b241eb8a6207 | [
"BSD-3-Clause"
] | null | null | null | mapreduce/handlers.py | igeeker/v2ex | 9fa81f7c82aa7d162a924d357494b241eb8a6207 | [
"BSD-3-Clause"
] | 26 | 2019-08-05T06:09:38.000Z | 2021-07-08T02:05:13.000Z | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Defines executor tasks handlers for MapReduce implementation."""
# Disable "Invalid method name"
# pylint: disable-msg=C6409
import datetime
import logging
import math
import os
from mapreduce.lib import simplejson
import time
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import context
from mapreduce import quota
from mapreduce import model
from mapreduce import quota
from mapreduce import util
# TODO(user): Make this a product of the reader or in quotas.py
_QUOTA_BATCH_SIZE = 20
# The amount of time to perform scanning in one slice. New slice will be
# scheduled as soon as current one takes this long.
_SLICE_DURATION_SEC = 15
# Delay between consecutive controller callback invocations.
_CONTROLLER_PERIOD_SEC = 2
| 35.188056 | 80 | 0.680605 |
fb886601d83ea5836e86da12edc2cb8f001f8166 | 382 | py | Python | radiomicsfeatureextractionpipeline/backend/src/logic/entities/ct_series.py | Maastro-CDS-Imaging-Group/SQLite4Radiomics | e3a7afc181eec0fe04c18da00edc3772064e6758 | [
"Apache-2.0"
] | null | null | null | radiomicsfeatureextractionpipeline/backend/src/logic/entities/ct_series.py | Maastro-CDS-Imaging-Group/SQLite4Radiomics | e3a7afc181eec0fe04c18da00edc3772064e6758 | [
"Apache-2.0"
] | 6 | 2021-06-09T19:39:27.000Z | 2021-09-30T16:41:40.000Z | radiomicsfeatureextractionpipeline/backend/src/logic/entities/ct_series.py | Maastro-CDS-Imaging-Group/SQLite4Radiomics | e3a7afc181eec0fe04c18da00edc3772064e6758 | [
"Apache-2.0"
] | null | null | null | """
This module is used to represent a CTSeries object from the DICOMSeries table in the database.
Inherits SeriesWithImageSlices module.
"""
from logic.entities.series_with_image_slices import SeriesWithImageSlices
| 29.384615 | 99 | 0.780105 |
fb88b8dcfd3fd4a86eaad1ea35d9e6acff02b1b6 | 8,108 | py | Python | models/pointSIFT_pointnet_age.py | KelvinTao/pointSIFT_Age2 | b0684ee989b5f6f3dca25e9bbf15b3c5fd8cf1cf | [
"Apache-2.0"
] | null | null | null | models/pointSIFT_pointnet_age.py | KelvinTao/pointSIFT_Age2 | b0684ee989b5f6f3dca25e9bbf15b3c5fd8cf1cf | [
"Apache-2.0"
] | null | null | null | models/pointSIFT_pointnet_age.py | KelvinTao/pointSIFT_Age2 | b0684ee989b5f6f3dca25e9bbf15b3c5fd8cf1cf | [
"Apache-2.0"
] | null | null | null | import os
import sys
import tensorflow as tf
import tf_utils.tf_util as tf_util
from tf_utils.pointSIFT_util import pointSIFT_module, pointSIFT_res_module, pointnet_fp_module, pointnet_sa_module
def get_model(point_cloud, is_training, num_class, bn_decay=None, feature=None):
""" Semantic segmentation PointNet, input is B x N x 3, output B x num_class """
end_points = {}
l0_xyz = point_cloud
l0_points = feature
end_points['l0_xyz'] = l0_xyz
# c0: 1024*128
c0_l0_xyz, c0_l0_points, c0_l0_indices = pointSIFT_res_module(l0_xyz, l0_points, radius=0.15, out_channel=64, is_training=is_training, bn_decay=bn_decay, scope='layer0_c0', merge='concat')
l1_xyz, l1_points, l1_indices = pointnet_sa_module(c0_l0_xyz, c0_l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
# c1: 256*256
c0_l1_xyz, c0_l1_points, c0_l1_indices = pointSIFT_res_module(l1_xyz, l1_points, radius=0.25, out_channel=128, is_training=is_training, bn_decay=bn_decay, scope='layer1_c0')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(c0_l1_xyz, c0_l1_points, npoint=256, radius=0.2, nsample=32, mlp=[128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
# c2: 256*512
c0_l2_xyz, c0_l2_points, c0_l2_indices = pointSIFT_res_module(l2_xyz, l2_points, radius=0.5, out_channel=256, is_training=is_training, bn_decay=bn_decay, scope='layer2_c0')
c1_l2_xyz, c1_l2_points, c1_l2_indices = pointSIFT_res_module(c0_l2_xyz, c0_l2_points, radius=0.5, out_channel=512, is_training=is_training, bn_decay=bn_decay, scope='layer2_c1', same_dim=True)
l2_cat_points = tf.concat([c0_l2_points, c1_l2_points], axis=-1)
fc_l2_points = tf_util.conv1d(l2_cat_points, 512, 1, padding='VALID', bn=True, is_training=is_training, scope='layer2_conv_c2', bn_decay=bn_decay)
# c3: 64*512
l3_xyz, l3_points, l3_indices = pointnet_sa_module(c1_l2_xyz, fc_l2_points, npoint=64, radius=0.4, nsample=32, mlp=[512,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# FC layers:64*256->64*128---8192
net = tf_util.conv1d(l3_points, 256, 1, padding='VALID', bn=True, is_training=is_training, scope='layer4_conv', bn_decay=bn_decay)
#net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='layer5_conv', bn_decay=bn_decay)
##flatten:B*8192
flat = tf.reshape(net, [-1,64*128])
##dense layer:4096
dense = tf_util.fully_connected(flat,4096,scope='layer6_dense',bn=True,bn_decay=bn_decay,is_training=is_training)
dense = tf_util.fully_connected(dense,4096,scope='layer7_dense',bn=True,bn_decay=bn_decay,is_training=is_training)
dense = tf_util.dropout(dense, keep_prob=0.5, is_training=is_training, scope='dp')
logits = tf_util.fully_connected(dense,num_class,scope='layer8_dense',activation_fn=None,bn=True,bn_decay=bn_decay,is_training=is_training)#logits
return logits, end_points
'''
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [512,512], is_training, bn_decay, scope='fa_layer2')
_, l2_points_1, _ = pointSIFT_module(l2_xyz, l2_points, radius=0.5, out_channel=512, is_training=is_training, bn_decay=bn_decay, scope='fa_layer2_c0')
_, l2_points_2, _ = pointSIFT_module(l2_xyz, l2_points, radius=0.5, out_channel=512, is_training=is_training, bn_decay=bn_decay, scope='fa_layer2_c1')
_, l2_points_3, _ = pointSIFT_module(l2_xyz, l2_points, radius=0.5, out_channel=512, is_training=is_training, bn_decay=bn_decay, scope='fa_layer2_c2')
l2_points = tf.concat([l2_points_1, l2_points_2, l2_points_3], axis=-1)
l2_points = tf_util.conv1d(l2_points, 512, 1, padding='VALID', bn=True, is_training=is_training, scope='fa_2_fc', bn_decay=bn_decay)
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,256], is_training, bn_decay, scope='fa_layer3')
_, l1_points_1, _ = pointSIFT_module(l1_xyz, l1_points, radius=0.25, out_channel=256, is_training=is_training, bn_decay=bn_decay, scope='fa_layer3_c0')
_, l1_points_2, _ = pointSIFT_module(l1_xyz, l1_points_1, radius=0.25, out_channel=256, is_training=is_training, bn_decay=bn_decay, scope='fa_layer3_c1')
l1_points = tf.concat([l1_points_1, l1_points_2], axis=-1)
l1_points = tf_util.conv1d(l1_points, 256, 1, padding='VALID', bn=True, is_training=is_training, scope='fa_1_fc', bn_decay=bn_decay)
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')
_, l0_points, _ = pointSIFT_module(l0_xyz, l0_points, radius=0.1, out_channel=128, is_training=is_training, bn_decay=bn_decay, scope='fa_layer4_c0')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')
return net, end_points
'''
def get_loss(logits,labels,num_class,smpws=1):
"""
:param logits: Bx(C*2)--Bx200(100*2)
:param labels: BxCx2--Bx100x2
:param smpw: B ; sample weight
:num_class:200 --class_number*2
"""
labels=tf.cast(labels, tf.float32)
part_logits=tf.reshape(logits,[-1,num_class//2,2])
classify_loss=tf.reduce_mean(tf.multiply(tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=part_logits, labels=labels),1),smpws))
#classify_loss0=classify_loss
#tf.summary.scalar('classify loss 0', classify_loss0)
###attention!!!
#regularization_loss=tf.reduce_mean(tf.contrib.slim.losses.get_regularization_losses())
#regularization_loss=tf.reduce_mean(tf.losses.get_regularization_losses(scope))
#reg=1;classify_loss+=reg*regularization_loss ##scalar
#tf.summary.scalar('classify loss', classify_loss)
tf.summary.scalar('part logits shape 0', part_logits.shape[0])
tf.summary.scalar('part logits shape 1', part_logits.shape[1])
tf.summary.scalar('part logits shape 2', part_logits.shape[2])
tf.summary.scalar('labels shape 0', labels.shape[0])
tf.summary.scalar('labels shape 1', labels.shape[1])
tf.summary.scalar('labels shape 2', labels.shape[2])
tf.add_to_collection('losses', classify_loss)
return classify_loss
def eval_pred(logits,input_labels,num_class,wt=1):#
"""
:param logits: Bx(C*2)--Bx200(100*2);>< age_thresh
"""
input_labels=tf.cast(input_labels, tf.float32)
wt=tf.cast(wt, tf.float32)
part_logits=tf.reshape(logits,[-1,num_class//2,2])
part_logits1=tf.map_fn(lambda x:x[:,0],tf.nn.softmax(part_logits))
pred=tf.cast(tf.reduce_sum(part_logits1,1),tf.float32)
labb=tf.reduce_sum(tf.map_fn(lambda x:x[:,0],input_labels),1)
#mae_wt=tf.cast(tf.reduce_mean(tf.multiply(tf.abs(pred-labb),wt)), tf.float64)
mae_wt=tf.reduce_mean(tf.multiply(tf.abs(pred-labb),wt))
#mae=tf.cast(tf.reduce_mean(tf.abs(pred-labb)), tf.float64)
mae=tf.reduce_mean(tf.abs(pred-labb))
#tf.summary.scalar('Test set MAE', mae)
#tf.summary.scalar('Test set MAE_weighted', mae_wt)
return pred,mae,mae_wt
'''
def get_loss(pred, label, smpw):
"""
:param pred: BxNxC
:param label: BxN
:param smpw: BxN
:return:
"""
classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=label, logits=pred, weights=smpw)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
'''
| 57.503546 | 218 | 0.741737 |
fb89161fb05f2325ee9a0854f9561e3db343bc07 | 89 | py | Python | cwl_airflow_parser/operators/__init__.py | lrodri29/cwl-airflow-parser | 3854022fc7a5c62cfd92e93fdb7a97d528918239 | [
"Apache-2.0"
] | 14 | 2018-05-01T01:31:07.000Z | 2019-09-02T15:41:06.000Z | cwl_airflow_parser/operators/__init__.py | lrodri29/cwl-airflow-parser | 3854022fc7a5c62cfd92e93fdb7a97d528918239 | [
"Apache-2.0"
] | 1 | 2018-08-06T17:28:51.000Z | 2018-08-27T16:05:10.000Z | cwl_airflow_parser/operators/__init__.py | lrodri29/cwl-airflow-parser | 3854022fc7a5c62cfd92e93fdb7a97d528918239 | [
"Apache-2.0"
] | 8 | 2018-08-06T16:47:31.000Z | 2020-05-12T20:21:01.000Z | from .cwljobdispatcher import CWLJobDispatcher
from .cwljobgatherer import CWLJobGatherer | 44.5 | 46 | 0.898876 |
fb8b63ad2ffbee810610ac48848eca279fdeb691 | 47 | py | Python | primeiro programa/primeiro_programa.py | Cesario115/Ola-mundo | 2949ff2c9dc1b5f8bc70825072751b19920019af | [
"MIT"
] | null | null | null | primeiro programa/primeiro_programa.py | Cesario115/Ola-mundo | 2949ff2c9dc1b5f8bc70825072751b19920019af | [
"MIT"
] | null | null | null | primeiro programa/primeiro_programa.py | Cesario115/Ola-mundo | 2949ff2c9dc1b5f8bc70825072751b19920019af | [
"MIT"
] | null | null | null | print('='*50)
print("Ol mundo!")
print('='*50) | 15.666667 | 19 | 0.574468 |
fb8c8cf1deb8bca5c92c0e2fc8aa8a95783848a5 | 6,922 | py | Python | src/third_party/swiftshader/third_party/subzero/pydir/wasm-run-torture-tests.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | src/third_party/swiftshader/third_party/subzero/pydir/wasm-run-torture-tests.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | src/third_party/swiftshader/third_party/subzero/pydir/wasm-run-torture-tests.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python2
#===- subzero/wasm-run-torture-tests.py - Subzero WASM Torture Test Driver ===//
#
# The Subzero Code Generator
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===-----------------------------------------------------------------------===//
from __future__ import print_function
import argparse
import glob
import multiprocessing
import os
import Queue
import shutil
import StringIO
import sys
import threading
IGNORED_TESTS = set([
# The remaining tests are known waterfall failures
'20010122-1.c.wasm',
'20031003-1.c.wasm',
'20071018-1.c.wasm',
'20071120-1.c.wasm',
'20071220-1.c.wasm',
'20071220-2.c.wasm',
'20101011-1.c.wasm',
'alloca-1.c.wasm',
'bitfld-3.c.wasm',
'bitfld-5.c.wasm',
'builtin-bitops-1.c.wasm',
'conversion.c.wasm',
'eeprof-1.c.wasm',
'frame-address.c.wasm',
'pr17377.c.wasm',
'pr32244-1.c.wasm',
'pr34971.c.wasm',
'pr36765.c.wasm',
'pr39228.c.wasm',
'pr43008.c.wasm',
'pr47237.c.wasm',
'pr60960.c.wasm',
'va-arg-pack-1.c.wasm',
'20000717-5.c.wasm', # abort() (also works without emcc)
'20001203-2.c.wasm', # assert fail (works without emcc)
'20040811-1.c.wasm', # OOB trap
'20070824-1.c.wasm', # abort() (also works without emcc)
'arith-rand-ll.c.wasm', # abort() (works without emcc)
'arith-rand.c.wasm', # abort() (works without emcc)
'pr23135.c.wasm', # OOB trap (works without emcc)
'pr34415.c.wasm', # (empty output?)
'pr36339.c.wasm', # abort() (works without emcc)
'pr38048-2.c.wasm', # abort() (works without emcc)
'pr42691.c.wasm', # abort() (works without emcc)
'pr43220.c.wasm', # OOB trap (works without emcc)
'pr43269.c.wasm', # abort() (works without emcc)
'vla-dealloc-1.c.wasm', # OOB trap (works without emcc)
'20051012-1.c.wasm', # error reading binary
'921208-2.c.wasm', # error reading binary
'920501-1.c.wasm', # error reading binary
'call-trap-1.c.wasm', # error reading binary
'pr44942.c.wasm', # error reading binary
'920625-1.c.wasm', # abort() (also fails without emcc)
'931004-10.c.wasm', # abort() (also fails without emcc)
'931004-12.c.wasm', # abort() (also fails without emcc)
'931004-14.c.wasm', # abort() (also fails without emcc)
'931004-6.c.wasm', # abort() (also fails without emcc)
'pr38051.c.wasm', # (empty output?) (fails without emcc)
'pr38151.c.wasm', # abort() (fails without emcc)
'pr44575.c.wasm', # abort() (fails without emcc)
'strct-stdarg-1.c.wasm', # abort() (fails without emcc)
'strct-varg-1.c.wasm', # abort() (fails without emcc)
'va-arg-22.c.wasm', # abort() (fails without emcc)
'stdarg-3.c.wasm', # abort() (fails without emcc)
'pr56982.c.wasm', # missing setjmp (wasm.js check did not catch)
'20010605-2.c.wasm', # missing __netf2
'20020413-1.c.wasm', # missing __lttf2
'20030914-1.c.wasm', # missing __floatsitf
'20040709-1.c.wasm', # missing __netf2
'20040709-2.c.wasm', # missing __netf2
'20050121-1.c.wasm', # missing __floatsitf
'20080502-1.c.wasm', # missing __eqtf2
'920501-8.c.wasm', # missing __extenddftf2
'930513-1.c.wasm', # missing __extenddftf2
'930622-2.c.wasm', # missing __floatditf
'960215-1.c.wasm', # missing __addtf3
'960405-1.c.wasm', # missing __eqtf2
'960513-1.c.wasm', # missing __subtf3
'align-2.c.wasm', # missing __eqtf2
'complex-6.c.wasm', # missing __subtf3
'complex-7.c.wasm', # missing __netf2
'pr49218.c.wasm', # missing __fixsfti
'pr54471.c.wasm', # missing __multi3
'regstack-1.c.wasm', # missing __addtf3
'stdarg-1.c.wasm', # missing __netf2
'stdarg-2.c.wasm', # missing __floatsitf
'va-arg-5.c.wasm', # missing __eqtf2
'va-arg-6.c.wasm', # missing __eqtf2
'struct-ret-1.c.wasm', # missing __extenddftf2
])
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--translate-only', action='store_true')
parser.add_argument('tests', nargs='*')
args = parser.parse_args()
OUT_DIR = "./build/wasm-torture"
results_lock = threading.Lock()
compile_count = 0
compile_failures = []
run_count = 0
run_failures = []
verbose = args.verbose
if len(args.tests) > 0:
test_files = args.tests
else:
test_files = glob.glob("./emwasm-torture-out/*.wasm")
if os.path.exists(OUT_DIR):
shutil.rmtree(OUT_DIR)
os.mkdir(OUT_DIR)
tasks = Queue.Queue()
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
for test_file in test_files:
tasks.put(test_file)
tasks.join()
if len(compile_failures) > 0:
print()
print("Compilation failures:")
print("=====================\n")
for f in compile_failures:
print(" \033[1;31m" + f + "\033[1;m")
if len(run_failures) > 0:
print()
print("Run failures:")
print("=============\n")
for f in run_failures:
print(" \033[1;33m" + f + "\033[1;m")
print("\n\033[1;32m{}\033[1;m / \033[1;33m{}\033[1;m / {} tests passed"
.format(run_count, compile_count - run_count,
run_count + len(compile_failures) + len(run_failures)))
| 30.095652 | 80 | 0.647501 |
fb8c8d25a3c49500542e0cea8201311268f861d1 | 4,455 | py | Python | game_vs_ai.py | fernandojosuece/Minimax_AI_connect4 | e0110b7d3d25494b7e950c078eacd1337ee14f17 | [
"MIT"
] | null | null | null | game_vs_ai.py | fernandojosuece/Minimax_AI_connect4 | e0110b7d3d25494b7e950c078eacd1337ee14f17 | [
"MIT"
] | null | null | null | game_vs_ai.py | fernandojosuece/Minimax_AI_connect4 | e0110b7d3d25494b7e950c078eacd1337ee14f17 | [
"MIT"
] | null | null | null | import numpy as np
import pygame
import sys
import math
import random
from board import Board
from ai import Minimax_AI
# function to draw the board in pygame
if __name__ == '__main__':
# colors for game
colors = {"blue": (0, 0, 255),
"black": (0, 0, 0),
"red": (255, 0, 0),
"yellow": (255, 255, 0)}
# size of board
ROW_COUNT = 6
COLUMN_COUNT = 7
# create board
board = Board(ROW_COUNT, COLUMN_COUNT)
# players
players = [1, 2]
# initialize AI
ai_depth = 6
ai_player = random.choice(players)
ai = Minimax_AI(ai_depth, ai_player, ROW_COUNT, COLUMN_COUNT)
# decide turns; if turn is 0 player moves first
if ai_player == 2:
turn = 0
else:
turn = 1
pygame.init()
SQUARESIZE = 100
width = COLUMN_COUNT * SQUARESIZE
height = (ROW_COUNT+1) * SQUARESIZE
size = (width, height)
RADIUS = int(SQUARESIZE/2 - 5)
screen = pygame.display.set_mode(size)
draw_board(board.status)
pygame.display.update()
myfont = pygame.font.SysFont("monospace", 75)
game_over = False
while not game_over:
# Ask for Player 1 Input
if turn == 0:
turn_over = False
while not turn_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEMOTION:
pygame.draw.rect(
screen, colors["black"], (0, 0, width, SQUARESIZE))
posx = event.pos[0]
if turn == 0:
pygame.draw.circle(
screen, colors["red"], (posx, int(SQUARESIZE/2)), RADIUS)
else:
pygame.draw.circle(
screen, colors["yellow"], (posx, int(SQUARESIZE/2)), RADIUS)
pygame.display.update()
if event.type == pygame.MOUSEBUTTONDOWN:
pygame.draw.rect(
screen, colors["black"], (0, 0, width, SQUARESIZE))
# print(event.pos)
posx = event.pos[0]
col = int(math.floor(posx/SQUARESIZE))
if board.is_valid_location(col):
row = board.get_next_open_row(col)
board.insert_piece(row, col, 1)
turn_over = True
if board.is_winning_position(1):
label = myfont.render(
"You win!!", 1, colors["red"])
screen.blit(label, (40, 10))
game_over = True
draw_board(board.status)
# Ask for Player 2 Input
else:
col = ai.make_move(board.status)
if board.is_valid_location(col):
row = board.get_next_open_row(col)
board.insert_piece(row, col, 2)
if board.is_winning_position(2):
label = myfont.render(
"AI win!!", 1, colors["red"])
screen.blit(label, (40, 10))
game_over = True
draw_board(board.status)
turn += 1
turn = turn % 2
if game_over:
pygame.time.wait(3000)
| 32.05036 | 101 | 0.489787 |
fb8cf968314b4148ab23bd50a8ea481955bbd517 | 963 | py | Python | {{cookiecutter.project_slug}}/sources/app/config/settings/components/security.py | AsheKR/cookiecutter-django | d0402aefcc2eeaffa747faa7ef50ad97286bfcca | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/sources/app/config/settings/components/security.py | AsheKR/cookiecutter-django | d0402aefcc2eeaffa747faa7ef50ad97286bfcca | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/sources/app/config/settings/components/security.py | AsheKR/cookiecutter-django | d0402aefcc2eeaffa747faa7ef50ad97286bfcca | [
"BSD-3-Clause"
] | null | null | null | # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
SECURE_HSTS_SECONDS = 31536000
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = True
| 53.5 | 86 | 0.805815 |
fb8cffb83eb9a76e5f50b56af16a0174fdb1dc32 | 12,118 | py | Python | dabest/bootstrap_tools.py | nitishkumarmishra/DABEST | 82490f587e9b0180f29baa2daf44aa86cc3f52aa | [
"BSD-3-Clause-Clear"
] | null | null | null | dabest/bootstrap_tools.py | nitishkumarmishra/DABEST | 82490f587e9b0180f29baa2daf44aa86cc3f52aa | [
"BSD-3-Clause-Clear"
] | null | null | null | dabest/bootstrap_tools.py | nitishkumarmishra/DABEST | 82490f587e9b0180f29baa2daf44aa86cc3f52aa | [
"BSD-3-Clause-Clear"
] | null | null | null | #!/usr/bin/python
# -*-coding: utf-8 -*-
# Author: Joses Ho
# Email : joseshowh@gmail.com
from __future__ import division
def jackknife_indexes(data):
# Taken without modification from scikits.bootstrap package.
"""
From the scikits.bootstrap package.
Given an array, returns a list of arrays where each array is a set of
jackknife indexes.
For a given set of data Y, the jackknife sample J[i] is defined as the
data set Y with the ith data point deleted.
"""
import numpy as np
base = np.arange(0,len(data))
return (np.delete(base,i) for i in base)
def bca(data, alphas, statarray, statfunction, ostat, reps):
'''
Subroutine called to calculate the BCa statistics.
Borrowed heavily from scikits.bootstrap code.
'''
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import norm
from numpy.random import randint
# The bias correction value.
z0 = norm.ppf( ( 1.0*np.sum(statarray < ostat, axis = 0) ) / reps )
# Statistics of the jackknife distribution
jackindexes = jackknife_indexes(data[0])
jstat = [statfunction(*(x[indexes] for x in data))
for indexes in jackindexes]
jmean = np.mean(jstat,axis = 0)
# Acceleration value
a = np.divide(np.sum( (jmean - jstat)**3, axis = 0 ),
( 6.0 * np.sum( (jmean - jstat)**2, axis = 0)**1.5 )
)
if np.any(np.isnan(a)):
nanind = np.nonzero(np.isnan(a))
warnings.warn("Some acceleration values were undefined."
"This is almost certainly because all values"
"for the statistic were equal. Affected"
"confidence intervals will have zero width and"
"may be inaccurate (indexes: {})".format(nanind))
zs = z0 + norm.ppf(alphas).reshape(alphas.shape+(1,)*z0.ndim)
avals = norm.cdf(z0 + zs/(1-a*zs))
nvals = np.round((reps-1)*avals)
nvals = np.nan_to_num(nvals).astype('int')
return nvals
| 36.173134 | 104 | 0.605793 |
fb8db21fcb68449e4d69cc6ce7881b29676db85c | 864 | py | Python | Numbers/floor_tiles_cost.py | lucasc896/Projects | 01ec687b07e4b56554c89ecc244fe5979c489826 | [
"MIT"
] | null | null | null | Numbers/floor_tiles_cost.py | lucasc896/Projects | 01ec687b07e4b56554c89ecc244fe5979c489826 | [
"MIT"
] | null | null | null | Numbers/floor_tiles_cost.py | lucasc896/Projects | 01ec687b07e4b56554c89ecc244fe5979c489826 | [
"MIT"
] | null | null | null | import math as ma
# note all sizes in m^2
# all costs in pounds
if __name__ == "__main__":
vals = get_details()
vals['tile_area'] = 0.04 #0.2m squared tiles
print "\n > Total cost: %.2f\n" % get_cost(vals) | 23.351351 | 81 | 0.572917 |
fb8ef4de168793f84748b01a69155748193991bb | 2,579 | py | Python | pyflow/demo/cwdDemo/cwdDemo.py | quejebo/pyflow | 99718942f9ea4ac0ceacde17c8006068ef19f2c8 | [
"BSD-2-Clause"
] | 3 | 2019-05-29T23:01:51.000Z | 2020-02-20T21:36:55.000Z | pyflow/demo/cwdDemo/cwdDemo.py | quejebo/pyflow | 99718942f9ea4ac0ceacde17c8006068ef19f2c8 | [
"BSD-2-Clause"
] | null | null | null | pyflow/demo/cwdDemo/cwdDemo.py | quejebo/pyflow | 99718942f9ea4ac0ceacde17c8006068ef19f2c8 | [
"BSD-2-Clause"
] | 2 | 2020-10-31T00:49:40.000Z | 2021-04-28T18:56:40.000Z | #!/usr/bin/env python
#
# pyFlow - a lightweight parallel task engine
#
# Copyright (c) 2012-2017 Illumina, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
#
# demonstrate/test addTask() cwd option
#
import os.path
import sys
# add module path by hand
#
scriptDir=os.path.abspath(os.path.dirname(__file__))
sys.path.append(scriptDir+"/../../src")
from pyflow import WorkflowRunner
# all pyflow workflows are written into classes derived from
# pyflow.WorkflowRunner:
#
# Instantiate the workflow
#
wflow = CwdWorkflow()
# Run the worklow:
#
retval=wflow.run(mode="local")
sys.exit(retval)
| 29.643678 | 92 | 0.711904 |