hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1069ebd444cd41fde8b4f2543e607693b51a94b8 | 2,394 | py | Python | subsurface/geological_formats/segy_reader.py | andieie/subsurface | 96d3cc4a0ef6eb549d1ea9d32b71a1380a1383e7 | [
"Apache-2.0"
] | 55 | 2019-05-09T12:26:28.000Z | 2021-11-05T07:35:15.000Z | subsurface/geological_formats/segy_reader.py | RajdeepTarafder/subsurface | 1308bc2a1d8e803db1680a1300682a91fec8d5fe | [
"Apache-2.0"
] | 33 | 2019-05-09T16:28:19.000Z | 2022-03-30T13:40:21.000Z | subsurface/geological_formats/segy_reader.py | RajdeepTarafder/subsurface | 1308bc2a1d8e803db1680a1300682a91fec8d5fe | [
"Apache-2.0"
] | 14 | 2019-05-09T12:26:33.000Z | 2021-09-01T11:31:27.000Z | from typing import Union
from scipy.spatial.qhull import Delaunay
from shapely.geometry import LineString
from subsurface.structs.base_structures import StructuredData
import numpy as np
try:
import segyio
segyio_imported = True
except ImportError:
segyio_imported = False
def read_in_segy(filepath: str, coords=None) -> StructuredData:
"""Reader for seismic data stored in sgy/segy files
Args:
filepath (str): the path of the sgy/segy file
coords (dict): If data is a numpy array coords provides the values for
the xarray dimension. These dimensions are 'x', 'y' and 'z'
Returns: a StructuredData object with data, the traces with samples written into an xr.Dataset, optionally with
labels defined by coords
"""
segyfile = segyio.open(filepath, ignore_geometry=True)
data = np.asarray([np.copy(tr) for tr in segyfile.trace[:]])
sd = StructuredData.from_numpy(data) # data holds traces * (samples per trace) values
segyfile.close()
return sd
def create_mesh_from_coords(coords: Union[dict, LineString],
zmin: Union[float, int], zmax: Union[float, int] = 0.0):
"""Creates a mesh for plotting StructuredData
Args:
coords (Union[dict, LineString]): the x and y, i.e. latitude and longitude, location of the traces of the seismic profile
zmax (float): the maximum elevation of the seismic profile, by default 0.0
zmin (float): the location in z where the lowest sample was taken
Returns: vertices and faces for creating an UnstructuredData object
"""
if type(coords) == LineString:
linestring = coords
n = len(list(linestring.coords))
coords = np.array([[x[0] for x in list(linestring.coords)],
[y[1] for y in list(linestring.coords)]]).T
else:
n = len(coords['x'])
coords = np.array([coords['x'],
coords['y']]).T
# duplicating the line, once with z=lower and another with z=upper values
vertices = np.zeros((2*n, 3))
vertices[:n, :2] = coords
vertices[:n, 2] = zmin
vertices[n:, :2] = coords
vertices[n:, 2] = zmax
# i+n --- i+n+1
# |\ |
# | \ |
# | \ |
# | \ |
# i --- i+1
tri = Delaunay(vertices[:, [0, 2]])
faces = tri.simplices
return vertices, faces
| 32.351351 | 129 | 0.629908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,046 | 0.436926 |
106b9963fc2887b968d35680d06f2e568d43f337 | 1,069 | py | Python | python_learning/basic_learning/lesson_interview.py | suncht/sun-python | b02cba75c142adc44438192577cae171cb4837d4 | [
"Apache-2.0"
] | null | null | null | python_learning/basic_learning/lesson_interview.py | suncht/sun-python | b02cba75c142adc44438192577cae171cb4837d4 | [
"Apache-2.0"
] | null | null | null | python_learning/basic_learning/lesson_interview.py | suncht/sun-python | b02cba75c142adc44438192577cae171cb4837d4 | [
"Apache-2.0"
] | null | null | null | #生成器的创建,区分迭代器、生成器、推导式、生成器表达式
l_01 = [x for x in range(10)] #列表推导式
print(l_01)
l_02 = (x for x in range(10)) #列表生成器表达式
print(l_02)
class Fib:
def __init__(self):
self.prev = 0
self.curr = 1
def __iter__(self): #Fib是迭代对象, 因为Fib实现了__iter__方法 -->类/对象
return self
def __next__(self): #Fib是迭代器, 因为Fib实现了__next__方法 -->类/对象
value = self.curr
self.curr += self.prev
self.prev = value
return value
def fib():
prev, curr = 0, 1
while True:
yield curr #fib()是生成器,yield是关键字,生成器是一种特殊的迭代器 -->函数
prev, curr = curr, curr + prev
def islice(fib, start, end):
for i in range(start, end):
yield next(fib)
f = fib()
l_03 = list(islice(f, 0, 10))
print(l_03)
#yield
print('--------------------')
y = 0
def g():
global y
print('1')
for i in range(0, 10):
yield i
y = 5 + i
print(y)
func_g = g()
print(func_g.__next__()) #等价于next(func_g)
print(func_g.__next__())
print(func_g.__next__())
print(func_g.send(10))
print(func_g.__next__()) | 19.436364 | 62 | 0.57811 | 391 | 0.309091 | 395 | 0.312253 | 0 | 0 | 0 | 0 | 399 | 0.315415 |
106bc619d04ec9ede221c61b7b392e024005d45b | 2,290 | py | Python | hack/generateChartOptions.py | deissnerk/external-dns-management | f1e003f04ad9d19af576d8b0d037e892b687c121 | [
"Apache-2.0"
] | null | null | null | hack/generateChartOptions.py | deissnerk/external-dns-management | f1e003f04ad9d19af576d8b0d037e892b687c121 | [
"Apache-2.0"
] | null | null | null | hack/generateChartOptions.py | deissnerk/external-dns-management | f1e003f04ad9d19af576d8b0d037e892b687c121 | [
"Apache-2.0"
] | null | null | null | #!/bin/python
# should be started from project base directory
# helper script to regenerate helm chart file: partial of charts/external-dns-management/templates/deployment.yaml
import re
import os
helpFilename = "/tmp/dns-controller-manager-help.txt"
rc = os.system("make build-local && ./dns-controller-manager --help | grep ' --' > {}".format(helpFilename))
if rc != 0:
exit(rc)
f = open(helpFilename,"r")
options = f.read()
os.remove(helpFilename)
def toCamelCase(name):
str = ''.join(x.capitalize() for x in re.split("[.-]", name))
str = str[0].lower() + str[1:]
str = str.replace("alicloudDns", "alicloudDNS")
str = str.replace("azureDns", "azureDNS")
str = str.replace("googleClouddns", "googleCloudDNS")
str = str.replace("ingressDns", "ingressDNS")
str = str.replace("serviceDns", "serviceDNS")
str = str.replace("googleClouddns", "googleCloudDNS")
str = str.replace("cloudflareDns", "cloudflareDNS")
str = str.replace("infobloxDns", "infobloxDNS")
return str
excluded = {"name", "help", "identifier", "dry-run"}
excludedPattern = [re.compile(".*cache-dir$"), re.compile(".*blocked-zone$"), re.compile(".*remote-access-.+")]
def isExcluded(name):
if name == "" or name in excluded:
return True
for prog in excludedPattern:
if prog.match(name):
return True
return False
for line in options.split("\n"):
m = re.match(r"\s+(?:-[^-]+)?--(\S+)\s", line)
if m:
name = m.group(1)
if not isExcluded(name):
camelCase = toCamelCase(name)
txt = """ {{- if .Values.configuration.%s }}
- --%s={{ .Values.configuration.%s }}
{{- end }}""" % (camelCase, name, camelCase)
print(txt)
defaultValues = {
"controllers": "all",
"persistentCache": "false",
"persistentCacheStorageSize": "1Gi",
"persistentCacheStorageSizeAlicloud": "20Gi",
"serverPortHttp": "8080",
"ttl": 120,
}
print("configuration:")
for line in options.split("\n"):
m = re.match(r"\s+(?:-[^-]+)?--(\S+)\s", line)
if m:
name = m.group(1)
if not isExcluded(name):
camelCase = toCamelCase(name)
if camelCase in defaultValues:
txt = " %s: %s" % (camelCase, defaultValues[camelCase])
else:
txt = "# %s:" % camelCase
print(txt)
| 30.533333 | 114 | 0.621397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 944 | 0.412227 |
106c3eefe5f3cdcf470da1774e9ab84ee3897c21 | 1,353 | py | Python | test.py | ttran1904/MDP | 1c44f79dba1ff33de3f9f791612a91f4b2fad4b9 | [
"MIT"
] | null | null | null | test.py | ttran1904/MDP | 1c44f79dba1ff33de3f9f791612a91f4b2fad4b9 | [
"MIT"
] | null | null | null | test.py | ttran1904/MDP | 1c44f79dba1ff33de3f9f791612a91f4b2fad4b9 | [
"MIT"
] | null | null | null | from MDP import MDP
import unittest
class MDPTestCase(unittest.TestCase):
def test_small1(self):
lst = [['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd']]
self.__printInput(lst)
mdp = MDP(lst)
mdp.run()
# Get the result Transition Probabilities (dictionary)
tp = mdp.getTransitionProbs()
self.__printOutput(tp)
solution = {'a': {'b': 1}, 'b': {'c':1}, 'c' : {'d': 1}}
self.assertEqual(tp, solution)
def test_small2(self):
seq1 = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd']
seq2 = ['a', 'b', 'b', 'a', 'a', 'd', 'd', 'b', 'b', 'c', 'a']
lst = [seq1, seq2]
self.__printInput(lst)
mdp = MDP(lst)
mdp.run()
# Get the result Transition Probabilities (dictionary)
tp = mdp.getTransitionProbs()
self.__printOutput(tp)
solution = {'a': {'b': 2/3, 'd': 1/3}, 'b': {'c': 2/3, 'a': 1/3},
'c': {'d': 1/2, 'a': 1/2}, 'd': {'b': 1}}
self.assertEqual(tp, solution)
def __printInput(self, lst):
# Uncomment HERE to see input
# print("\n......Input: ", lst)
pass
def __printOutput(self, o):
# Uncomment HERE to see output
# print(".....Output:", o)
pass
if __name__ == '__main__':
unittest.main() | 31.465116 | 74 | 0.474501 | 1,262 | 0.932742 | 0 | 0 | 0 | 0 | 0 | 0 | 366 | 0.27051 |
106da9215eb8067c762142bbcb0cab6695c0b979 | 379 | py | Python | apps/purchases/migrations/0009_auto_20200502_0253.py | jorgesaw/kstore | 4ec6612eeeb96edb7b7bd374fd0520733c58451c | [
"MIT"
] | null | null | null | apps/purchases/migrations/0009_auto_20200502_0253.py | jorgesaw/kstore | 4ec6612eeeb96edb7b7bd374fd0520733c58451c | [
"MIT"
] | 5 | 2021-03-19T10:16:00.000Z | 2022-02-10T09:16:32.000Z | apps/purchases/migrations/0009_auto_20200502_0253.py | jorgesaw/kstore | 4ec6612eeeb96edb7b7bd374fd0520733c58451c | [
"MIT"
] | null | null | null | # Generated by Django 2.2.10 on 2020-05-02 05:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('purchases', '0008_auto_20200430_1617'),
]
operations = [
migrations.RenameField(
model_name='itempurchase',
old_name='supplier_price',
new_name='price',
),
]
| 19.947368 | 49 | 0.601583 | 293 | 0.773087 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.319261 |
106e4b0ccf7d2a2518a1959d9ed235098c74fcea | 97 | py | Python | getKey.py | cychiang/spotify-lyrics | 78219ea2e9c8eacda7a8cb1cecbb7ecdd39d208e | [
"Apache-2.0"
] | null | null | null | getKey.py | cychiang/spotify-lyrics | 78219ea2e9c8eacda7a8cb1cecbb7ecdd39d208e | [
"Apache-2.0"
] | null | null | null | getKey.py | cychiang/spotify-lyrics | 78219ea2e9c8eacda7a8cb1cecbb7ecdd39d208e | [
"Apache-2.0"
] | null | null | null | def musixmatch():
with open('musixmatch.txt', 'r') as key:
return key.readline()
| 24.25 | 44 | 0.587629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.195876 |
106ea10390a127ae67a672fa8cfbea48593dd793 | 2,750 | py | Python | pylayers/antprop/examples/ex_meta.py | usmanwardag/pylayers | 2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a | [
"MIT"
] | 143 | 2015-01-09T07:50:20.000Z | 2022-03-02T11:26:53.000Z | pylayers/antprop/examples/ex_meta.py | usmanwardag/pylayers | 2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a | [
"MIT"
] | 148 | 2015-01-13T04:19:34.000Z | 2022-03-11T23:48:25.000Z | pylayers/antprop/examples/ex_meta.py | usmanwardag/pylayers | 2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a | [
"MIT"
] | 95 | 2015-05-01T13:22:42.000Z | 2022-03-15T11:22:28.000Z | from pylayers.gis.layout import *
from pylayers.antprop.signature import *
from pylayers.antprop.channel import *
import pylayers.signal.waveform as wvf
import networkx as nx
import numpy as np
import time
import logging
L = Layout('WHERE1_clean.ini')
#L = Layout('defstr2.ini')
try:
L.dumpr()
except:
L.build()
L.dumpw()
#L.build()
#L.dumpw()
#L.buildGi()
nc1 = 6#5
nc2 = 25#37
poly1 = L.Gt.node[nc1]['polyg']
cp1 = poly1.centroid.xy
poly2 = L.Gt.node[nc2]['polyg']
cp2 = poly2.centroid.xy
ptx = np.array([cp1[0][0],cp1[1][0],1.5])
prx = np.array([cp2[0][0]+0.5,cp2[1][0]+0.5,1.5])
print ptx
print prx
d = np.sqrt(np.dot((ptx-prx),(ptx-prx)))
tau = d/0.3
print d,tau
logging.info('Signature')
S = Signatures(L,nc1,nc2)
a =time.time()
logging.info('Calculate signature')
#S.run2(cutoff=6,dcut=3)
S.run(cutoff=2)
b=time.time()
print b-a
for i in L.Gi.nodes():
ei = eval(i)
if type(ei)!= int:
if ei[0] == 354:
print i
#Gsi.add_node('Tx')
#Gsi.pos['Tx']=tuple(ptx[:2])
#for i in L.Gt.node[nc1]['inter']:
# if i in Gsi.nodes():
# Gsi.add_edge('Tx',i)
#Gsi.add_node('Rx')
#Gsi.pos['Rx']=tuple(prx[:2])
#for i in L.Gt.node[nc2]['inter']:
# if i in Gsi.nodes():
# Gsi.add_edge(i,'Rx')
#print 'signatures'
#co = nx.dijkstra_path_length(Gsi,'Tx','Rx')
#sig=list(nx.all_simple_paths(Gsi,'Tx','Rx',cutoff=co+2))
#b=time.time()
#print b-a
#f,ax=L.showG('t')
#nx.draw(Gsi,Gsi.pos,ax=ax)
#plt.show()
##S.run(L,metasig,cutoff=3)
#print "r = S.rays "
r = S.rays(ptx,prx)
print "r3 = r.to3D "
r3 = r.to3D()
print "r3.locbas "
r3.locbas(L)
#print "r3.fillinter "
r3.fillinter(L)
r3.show(L)
plt.show()
##
#config = ConfigParser.ConfigParser()
#_filesimul = 'default.ini'
#filesimul = pyu.getlong(_filesimul, "ini")
#config.read(filesimul)
#fGHz = np.linspace(eval(config.get("frequency", "fghzmin")),
# eval(config.get("frequency", "fghzmax")),
# eval(config.get("frequency", "nf")))
#
#Cn=r3.eval(fGHz)
#
#Cn.freq=Cn.fGHz
#sco=Cn.prop2tran(a='theta',b='theta')
#wav = wvf.Waveform()
#ciro = sco.applywavB(wav.sfg)
#
##raynumber = 4
#
##fig=plt.figure('Cpp')
##f,ax=Cn.Cpp.plot(fig=fig,iy=np.array(([raynumber])))
#
##r3d.info(raynumber)
## plt.show()
##
##
##
###
###c11 = r3d.Ctilde[:,:,0,0]
###c12 = r3d.Ctilde[:,:,0,1]
###c21 = r3d.Ctilde[:,:,1,0]
###c22 = r3d.Ctilde[:,:,1,1]
###
###
###
###Cn=Ctilde()
###Cn.Cpp = bs.FUsignal(r3d.I.f, c11)
###Cn.Ctp = bs.FUsignal(r3d.I.f, c12)
###Cn.Cpt = bs.FUsignal(r3d.I.f, c21)
###Cn.Ctt = bs.FUsignal(r3d.I.f, c22)
###Cn.nfreq = r3d.I.nf
###Cn.nray = r3d.nray
###Cn.tauk=r3d.delays
###
###raynumber = 4
###
###fig=plt.figure('Cpp')
###f,ax=Cn.Cpp.plot(fig=fig,iy=np.array(([raynumber])))
###
##
##
##
##
##
##
| 19.097222 | 61 | 0.607273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,755 | 0.638182 |
106f055608a9a9b954de165a95645804abf6cce8 | 2,261 | py | Python | code/dash_app/app.py | siwei-li/tweet_stock | 5c265a747f06388f303f314d812879cfff90f938 | [
"MIT"
] | null | null | null | code/dash_app/app.py | siwei-li/tweet_stock | 5c265a747f06388f303f314d812879cfff90f938 | [
"MIT"
] | null | null | null | code/dash_app/app.py | siwei-li/tweet_stock | 5c265a747f06388f303f314d812879cfff90f938 | [
"MIT"
] | null | null | null | import dash
from dash import Output, Input, dcc
from dash import html
from tabs import tab1, tab2
# from tab2_callbacks import tab2_out, upload_prediction, render_graph2
import flask
server = flask.Flask(__name__) # define flask app.server
external_stylesheets = [
{
"href": "https://fonts.googleapis.com/css2?"
"family=Lato:wght@400;700&display=swap",
"rel": "stylesheet",
},
# dbc.themes.BOOTSTRAP,
# "https://cdn.jsdelivr.net/npm/bootstrap@5.1.0/dist/css/bootstrap.min.css"
]
app = dash.Dash(__name__,
server=server,
external_stylesheets=external_stylesheets)
app.title = "Tweet the Stocks"
app.layout = html.Div(
children=[
html.Div(
children=[
html.H1(children="Tweet the Stocks", className="header-title"),
html.P(
children="Explore the correlation of stock prices and the related tagged tweets in 2019",
className="header-description",
),
],
className="header",
),
html.Div(
children=[
html.Div(
dcc.Tabs(id="tabs", value='tab1', children=[
dcc.Tab(label='Historical records', value='tab1', ),
dcc.Tab(label='Prediction', value='tab2'),
], colors={
"border": "white",
"primary": "#e36209",
"background": "#fafbfc"
})),
],
className="tabs",
),
tab1.layout,
tab2.layout,
]
)
@app.callback(
Output('tab1', 'style'), Output('tab2', 'style'),
[Input('tabs', 'value')])
def show_hide_tab(tab):
if tab == 'tab1':
return {'display': 'block'}, {'display': 'none'}
elif tab == 'tab2':
return {'display': 'none'}, {'display': 'block'}
@app.callback(Output('popover1', 'children'), Input('import', 'n_clicks'), Input('upload-data', 'contents'),Input('tabs', 'value'))
def hint(clicks, file_content, tab):
if clicks > 0 and file_content and tab=="tab1":
return f"Calculating tweet sentiment scores..."
return ""
| 28.620253 | 131 | 0.534277 | 0 | 0 | 0 | 0 | 572 | 0.252985 | 0 | 0 | 800 | 0.353826 |
1070116f31c79416da1322b497edf3dc3c3e1f16 | 158 | py | Python | 18.py | christi-john/codechef-practice | 2f52cb72cbc5df4f4978fdd1cf77b1f01bc583b0 | [
"MIT"
] | null | null | null | 18.py | christi-john/codechef-practice | 2f52cb72cbc5df4f4978fdd1cf77b1f01bc583b0 | [
"MIT"
] | null | null | null | 18.py | christi-john/codechef-practice | 2f52cb72cbc5df4f4978fdd1cf77b1f01bc583b0 | [
"MIT"
] | null | null | null | # REMISS
for i in range(int(input())):
A,B = map(int,input().split())
if A>B: print(str(A) + " " + str(A+B))
else: print(str(B) + " " + str(A+B)) | 26.333333 | 42 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.088608 |
1070daf15ff202a5e3bcc5749503b19039cf94af | 2,260 | py | Python | pydemic/data/__init__.py | uiuc-covid19-modeling/pydemic | 3c0af60c2ac7e0dbf722584f61c45f9a2f993521 | [
"MIT"
] | 6 | 2020-05-29T22:52:30.000Z | 2020-11-08T23:27:07.000Z | pydemic/data/__init__.py | uiuc-covid19-modeling/pydemic | 3c0af60c2ac7e0dbf722584f61c45f9a2f993521 | [
"MIT"
] | null | null | null | pydemic/data/__init__.py | uiuc-covid19-modeling/pydemic | 3c0af60c2ac7e0dbf722584f61c45f9a2f993521 | [
"MIT"
] | 5 | 2020-06-12T01:47:18.000Z | 2022-03-29T13:26:09.000Z | __copyright__ = """
Copyright (C) 2020 George N Wong
Copyright (C) 2020 Zachary J Weiner
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import pandas as pd
__doc__ = """
Currently, two simple parsers are implemented to collect United States data.
More parsers can be added straightforwardly by subclassing
:class:`pydemic.data.DataParser`.
.. automodule:: pydemic.data.united_states
"""
def camel_to_snake(name):
import re
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
class DataParser:
data_url = None
date_column = 'date'
region_column = 'region'
translation = {}
def translate_columns(self, key):
_key = camel_to_snake(key)
return self.translation.get(_key, _key)
def __call__(self, region=None):
df = pd.read_csv(self.data_url, parse_dates=[self.date_column],
index_col=[self.region_column, self.date_column])
df = df.drop(columns=set(self.translation.values()) & set(df.columns))
df = df.rename(columns=self.translate_columns)
if region is not None:
df = df.sort_index().loc[region]
return df
__all__ = [
"camel_to_snake",
"DataParser",
]
| 32.753623 | 78 | 0.715487 | 649 | 0.287168 | 0 | 0 | 0 | 0 | 0 | 0 | 1,422 | 0.629204 |
1071230a1f43893cad26a92ac7143ffa8a21a932 | 47 | py | Python | Lesson5/srez.py | shinkai-tester/python_beginner | a934328c9a50241cc3f02a423060e16aab53b425 | [
"Apache-2.0"
] | 2 | 2021-06-01T13:24:04.000Z | 2021-06-01T13:27:47.000Z | Lesson5/srez.py | shinkai-tester/python_beginner | a934328c9a50241cc3f02a423060e16aab53b425 | [
"Apache-2.0"
] | null | null | null | Lesson5/srez.py | shinkai-tester/python_beginner | a934328c9a50241cc3f02a423060e16aab53b425 | [
"Apache-2.0"
] | null | null | null | a = [1, 2, 3, 4, 5]
b = a[:3]
print(b)
print(a) | 11.75 | 19 | 0.446809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1071d5d3c31d440ee16ee62b40826c1d771809f7 | 216 | py | Python | regularize_charsets.py | sys-bio/temp-biomodels | 596eebb590d72e74419773f4e9b829a62d7fff9a | [
"CC0-1.0"
] | null | null | null | regularize_charsets.py | sys-bio/temp-biomodels | 596eebb590d72e74419773f4e9b829a62d7fff9a | [
"CC0-1.0"
] | 5 | 2022-03-30T21:33:45.000Z | 2022-03-31T20:08:15.000Z | regularize_charsets.py | sys-bio/temp-biomodels | 596eebb590d72e74419773f4e9b829a62d7fff9a | [
"CC0-1.0"
] | null | null | null | from charset_normalizer import from_path, normalize
results = from_path('original\BIOMD0000000424\BIOMD0000000424_url.xml')
best = str(results.best())
normalize('original\BIOMD0000000424\BIOMD0000000424_url.xml')
| 27 | 71 | 0.828704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.462963 |
1073ef5093a5b8b166fa58df1b32f9b4c68bda68 | 9,812 | py | Python | accounts/forms.py | GDGSNF/My-Business | 792bb13a5b296260e5de7e03fba6445a13922851 | [
"MIT"
] | 21 | 2020-08-29T14:32:13.000Z | 2021-08-28T21:40:32.000Z | accounts/forms.py | GDGSNF/My-Business | 792bb13a5b296260e5de7e03fba6445a13922851 | [
"MIT"
] | 1 | 2020-10-11T21:56:15.000Z | 2020-10-11T21:56:15.000Z | accounts/forms.py | yezz123/My-Business | 792bb13a5b296260e5de7e03fba6445a13922851 | [
"MIT"
] | 5 | 2021-09-11T23:31:10.000Z | 2022-03-06T20:29:59.000Z | import datetime
import re
from configparser import ConfigParser
from smtplib import SMTPException
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.tokens import default_token_generator
from django.core.mail import send_mail
from django.core.mail.backends.smtp import EmailBackend
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.html import strip_tags
from django.utils.http import urlsafe_base64_encode
from accounts.models import Account, Shift
class LoginForm(forms.Form):
email = forms.EmailField(label="Email")
password = forms.CharField(
label="Password", widget=forms.PasswordInput(render_value=False)
)
def clean_email(self):
return self.cleaned_data["email"].lower()
def clean(self):
if self._errors:
return
self.account = authenticate(
email=self.cleaned_data["email"], password=self.cleaned_data["password"]
)
if self.account is None:
raise forms.ValidationError(
"The email and/or password you entered are incorrect."
)
return self.cleaned_data
def login(self, request):
if self.is_valid():
login(request, self.account)
return True
return False
class PasswordResetForm(forms.Form):
email = forms.EmailField(label="Email")
def clean_email(self):
try:
self.account = Account.objects.get(email=self.cleaned_data["email"].lower())
except Account.DoesNotExist:
raise forms.ValidationError(
"The email is not associated with any accounts."
)
return self.cleaned_data["email"].lower()
def save(self, request):
url = request.build_absolute_uri("/accounts/password/reset/")
url += urlsafe_base64_encode(force_bytes(self.account.uid)) + "/"
url += default_token_generator.make_token(self.account) + "/"
html = render_to_string(
"email.html",
{
"url": url,
"message": "You requested a password reset.",
"button": "Reset Password",
},
)
text = strip_tags(html).replace("Reset Password", url)
config = ConfigParser(interpolation=None)
config.read(settings.CONFIG_FILE)
backend = EmailBackend(
host=config.get("email", "EMAIL_HOST"),
port=config.getint("email", "EMAIL_PORT"),
username=config.get("email", "EMAIL_USER"),
password=config.get("email", "EMAIL_PASSWORD"),
use_tls=config.getboolean("email", "EMAIL_USE_TLS"),
)
try:
send_mail(
subject="Reset Password | Business Tracker",
message=text,
html_message=html,
from_email=config.get("email", "EMAIL_USER"),
recipient_list=(self.cleaned_data["email"],),
connection=backend,
)
except SMTPException:
return False
return True
class PasswordResetConfirmForm(forms.Form):
new_password = forms.CharField(
label="New Password", widget=forms.PasswordInput(render_value=False)
)
verify_new_password = forms.CharField(
label="Verify New Password", widget=forms.PasswordInput(render_value=False)
)
def clean_new_password(self):
if not re.match(settings.PASSWORD_REGEX, self.cleaned_data["new_password"]):
raise forms.ValidationError(
"The password needs to have at least 8 characters, a letter, and a number."
)
return self.cleaned_data["new_password"]
def clean(self):
if self._errors:
return
if (
self.cleaned_data["new_password"]
!= self.cleaned_data["verify_new_password"]
):
raise forms.ValidationError("The passwords do not match.")
return self.cleaned_data
def save(self, account):
account.set_password(self.cleaned_data["new_password"])
account.save()
return account
class PasswordChangeForm(forms.Form):
new_password = forms.CharField(
label="New Password", widget=forms.PasswordInput(render_value=False)
)
verify_new_password = forms.CharField(
label="Verify New Password", widget=forms.PasswordInput(render_value=False)
)
def clean_new_password(self):
if not re.match(settings.PASSWORD_REGEX, self.cleaned_data["new_password"]):
raise forms.ValidationError(
"The password needs to have at least 8 characters, a letter, and a number."
)
return self.cleaned_data["new_password"]
def clean(self):
if self._errors:
return
if (
self.cleaned_data["new_password"]
!= self.cleaned_data["verify_new_password"]
):
raise forms.ValidationError("The passwords do not match.")
return self.cleaned_data
def save(self, account):
account.set_password(self.cleaned_data["new_password"])
account.save()
return account
class AccountForm(forms.ModelForm):
verify_email = forms.EmailField(label="Verify Email")
class Meta:
model = Account
exclude = ("last_login", "password", "is_superuser")
labels = {
"first_name": "First Name",
"last_name": "Last Name",
"address1": "Address Line 1",
"address2": "Address Line 2",
"state": "State / Region / Province",
"zipcode": "ZIP / Postal Code",
}
def clean_email(self):
return self.cleaned_data["email"].lower()
def clean_verify_email(self):
return self.cleaned_data["verify_email"].lower()
def clean_first_name(self):
if not re.match(settings.NAME_REGEX, self.cleaned_data["first_name"]):
raise forms.ValidationError("Enter a valid first name.")
return self.cleaned_data["first_name"]
def clean_last_name(self):
if not re.match(settings.NAME_REGEX, self.cleaned_data["last_name"]):
raise forms.ValidationError("Enter a valid last name.")
return self.cleaned_data["last_name"]
def clean(self):
self.cleaned_data = super().clean()
if self._errors:
return
if self.cleaned_data["email"] != self.cleaned_data["verify_email"]:
raise forms.ValidationError("The emails do not match.")
return self.cleaned_data
def save(self, request=None):
account = super().save(commit=False)
if not account._state.adding:
account.save()
return account
account.is_superuser = False
account.save()
url = request.build_absolute_uri("/accounts/password/reset/")
url += urlsafe_base64_encode(force_bytes(account.uid)) + "/"
url += default_token_generator.make_token(account) + "/"
html = render_to_string(
"email.html",
{
"url": url,
"message": f"{account.first_name}, activate your new account using the link below.",
"button": "Activate Account",
},
)
text = strip_tags(html).replace("Activate Account", url)
config = ConfigParser(interpolation=None)
config.read(settings.CONFIG_FILE)
backend = EmailBackend(
host=config.get("email", "EMAIL_HOST"),
port=config.getint("email", "EMAIL_PORT"),
username=config.get("email", "EMAIL_USER"),
password=config.get("email", "EMAIL_PASSWORD"),
use_tls=config.getboolean("email", "EMAIL_USE_TLS"),
)
try:
send_mail(
subject="Activate Account | Business Tracker",
message=text,
html_message=html,
from_email=config.get("email", "EMAIL_USER"),
recipient_list=(self.cleaned_data["email"],),
connection=backend,
)
return account
except SMTPException:
return False
class ShiftForm(forms.ModelForm):
duration = forms.CharField(
max_length=5, help_text="Must be formatted as HH:MM (00:00 - 16:00)"
)
class Meta:
model = Shift
exclude = ("worker",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["project"].empty_label = ""
if not self.initial.get("duration", None):
self.initial["duration"] = "00:00"
if not self.initial.get("date", None):
self.initial["date"] = datetime.date.today()
instance = getattr(self, "instance", None)
if instance and instance.pk and self.initial["duration"]:
hours = self.initial["duration"] // 3600
minutes = self.initial["duration"] % 3600 // 60
self.initial["duration"] = f"{hours:02d}:{minutes:02d}"
def clean_duration(self):
if len(self.cleaned_data["duration"]) != 5:
raise forms.ValidationError("Enter a valid duration.")
duration_str = self.cleaned_data["duration"].split(":")
if not (duration_str[0].isdecimal() and duration_str[1].isdecimal()):
raise forms.ValidationError("Enter a valid duration.")
self.cleaned_data["duration"] = (
int(duration_str[0]) * 3600 + int(duration_str[1]) * 60
)
if self.cleaned_data["duration"] not in range(60, 57601):
raise forms.ValidationError("Enter a valid duration.")
return self.cleaned_data["duration"]
| 34.918149 | 100 | 0.610171 | 9,192 | 0.936812 | 0 | 0 | 0 | 0 | 0 | 0 | 1,916 | 0.195271 |
1074779ac430e926e8880572bf45c1deb9b0db82 | 3,300 | py | Python | tests/st/model_zoo_tests/DeepFM/test_deepfm.py | HappyKL/mindspore | 479cb89e8b5c9d859130891567038bb849a30bce | [
"Apache-2.0"
] | 1 | 2020-10-18T12:27:45.000Z | 2020-10-18T12:27:45.000Z | tests/st/model_zoo_tests/DeepFM/test_deepfm.py | ReIadnSan/mindspore | c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5 | [
"Apache-2.0"
] | null | null | null | tests/st/model_zoo_tests/DeepFM/test_deepfm.py | ReIadnSan/mindspore | c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train_criteo."""
import os
import pytest
from mindspore import context
from mindspore.train.model import Model
from mindspore.common import set_seed
from src.deepfm import ModelBuilder, AUCMetric
from src.config import DataConfig, ModelConfig, TrainConfig
from src.dataset import create_dataset, DataType
from src.callback import EvalCallBack, LossCallBack, TimeMonitor
set_seed(1)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_deepfm():
data_config = DataConfig()
train_config = TrainConfig()
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id)
rank_size = None
rank_id = None
dataset_path = "/home/workspace/mindspore_dataset/criteo_data/criteo_h5/"
print("dataset_path:", dataset_path)
ds_train = create_dataset(dataset_path,
train_mode=True,
epochs=1,
batch_size=train_config.batch_size,
data_type=DataType(data_config.data_format),
rank_size=rank_size,
rank_id=rank_id)
model_builder = ModelBuilder(ModelConfig, TrainConfig)
train_net, eval_net = model_builder.get_train_eval_net()
auc_metric = AUCMetric()
model = Model(train_net, eval_network=eval_net, metrics={"auc": auc_metric})
loss_file_name = './loss.log'
time_callback = TimeMonitor(data_size=ds_train.get_dataset_size())
loss_callback = LossCallBack(loss_file_path=loss_file_name)
callback_list = [time_callback, loss_callback]
eval_file_name = './auc.log'
ds_eval = create_dataset(dataset_path, train_mode=False,
epochs=1,
batch_size=train_config.batch_size,
data_type=DataType(data_config.data_format))
eval_callback = EvalCallBack(model, ds_eval, auc_metric,
eval_file_path=eval_file_name)
callback_list.append(eval_callback)
print("train_config.train_epochs:", train_config.train_epochs)
model.train(train_config.train_epochs, ds_train, callbacks=callback_list)
export_loss_value = 0.51
print("loss_callback.loss:", loss_callback.loss)
assert loss_callback.loss < export_loss_value
export_per_step_time = 10.4
print("time_callback:", time_callback.per_step_time)
assert time_callback.per_step_time < export_per_step_time
print("*******test case pass!********")
| 40.740741 | 93 | 0.688485 | 0 | 0 | 0 | 0 | 2,243 | 0.679697 | 0 | 0 | 889 | 0.269394 |
1077969251a2cc5472238f5a65083a0f8624450e | 2,588 | py | Python | meterer/s3.py | dacut/meterer | 441e7f021e3302597f56876948a40fe8799a3375 | [
"Apache-2.0"
] | null | null | null | meterer/s3.py | dacut/meterer | 441e7f021e3302597f56876948a40fe8799a3375 | [
"Apache-2.0"
] | null | null | null | meterer/s3.py | dacut/meterer | 441e7f021e3302597f56876948a40fe8799a3375 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3.6
# pylint: disable=C0103
"""
Metering client for S3.
"""
from .client import Meterer
class S3Meterer(Meterer):
"""
Meter an S3 bucket.
"""
s3_url_prefix = "s3://"
def __init__(self, cache, boto_session=None, cloudwatch_namespace=None):
"""
S3Meterer(cache, boto_session=None, cloudwatch_namespace=None)
-> S3Meterer
Create a new S3Meterer, using the specified cache for recording access
patterns and retrieving limits. If boto_session is not None, it is
used to generate new S3 Boto clients. Otherwise, the default Boto3
session is used.
If cloudwatch_namespace is not None, CloudWatch metrics will be emitted
to the specified namespace.
"""
super(S3Meterer, self).__init__(cache)
if boto_session is None:
import boto3
self.boto_session = boto3
else:
self.boto_session = boto_session
self.cloudwatch_namespace = cloudwatch_namespace
return
def pool_for_resource(self, resource_name):
"""
s3meterer.pool_for_resource(resource_name) -> str
Returns the bucket for the given S3 resource, to be used as the
metering pool. The S3 resource must be in the form 's3://bucket/key'.
"""
return S3Meterer.resource_to_bucket_and_key(resource_name)[0]
def get_actual_resource_size(self, resource_name):
s3 = self.boto_session.resource("s3")
bucket_name, key_name = S3Meterer.resource_to_bucket_and_key(
resource_name)
summary = s3.ObjectSummary(bucket_name, key_name)
return summary.size
@staticmethod
def resource_to_bucket_and_key(resource_name):
"""
S3Meterer.resource_to_bucket_and_key(resource_name) -> (str, str)
Convert an S3 resource name in the form 's3://bucket/key' to a tuple
of ('bucket', 'key').
"""
if not resource_name.startswith(S3Meterer.s3_url_prefix):
raise ValueError("Expected resource_name to start with %s" %
S3Meterer.s3_url_prefix)
s3_bucket_and_key = resource_name[len(S3Meterer.s3_url_prefix):]
try:
bucket, key = s3_bucket_and_key.split("/", 1)
if not bucket:
raise ValueError("Bucket name cannot be empty in URL %r" %
resource_name)
return (bucket, key)
except ValueError:
raise ValueError("No key specified in URL %r" % resource_name)
| 32.759494 | 79 | 0.633308 | 2,476 | 0.956723 | 0 | 0 | 895 | 0.345827 | 0 | 0 | 1,137 | 0.439335 |
1077e543e91d42e1a23cc4585cc1adc1e252701a | 984 | py | Python | Python/3 kyu/Calculator/test_evaluate.py | newtonsspawn/codewars_challenges | 62b20d4e729c8ba79eac7cae6a179af57abd45d4 | [
"MIT"
] | 3 | 2020-05-29T23:29:35.000Z | 2021-08-12T03:16:44.000Z | Python/3 kyu/Calculator/test_evaluate.py | newtonsspawn/codewars_challenges | 62b20d4e729c8ba79eac7cae6a179af57abd45d4 | [
"MIT"
] | null | null | null | Python/3 kyu/Calculator/test_evaluate.py | newtonsspawn/codewars_challenges | 62b20d4e729c8ba79eac7cae6a179af57abd45d4 | [
"MIT"
] | 3 | 2020-05-22T12:14:55.000Z | 2021-04-15T12:52:42.000Z | from unittest import TestCase
from evaluate import Calculator
calc = Calculator()
class TestCalculator(TestCase):
def test_evaluate_01(self):
self.assertEqual(calc.evaluate(string='127'), 127)
def test_evaluate_02(self):
self.assertEqual(calc.evaluate(string='2 + 3'), 5)
def test_evaluate_03(self):
self.assertEqual(calc.evaluate(string='2 - 3 - 4'), -5)
def test_evaluate_04(self):
self.assertEqual(calc.evaluate(string='10 * 5 / 2'), 25)
def test_evaluate_05(self):
self.assertEqual(calc.evaluate(string='2 / 2 + 3 * 4 - 6'), 7)
def test_evaluate_06(self):
self.assertEqual(calc.evaluate(string='2 + 3 * 4 / 3 - 6 / 3 * 3 + 8'),
8)
def test_evaluate_07(self):
self.assertEqual(calc.evaluate(string='1.1 + 2.2 + 3.3'), 6.6)
def test_evaluate_08(self):
self.assertEqual(calc.evaluate(string='1.1 * 2.2 * 3.3'), 7.986)
| 28.941176 | 79 | 0.604675 | 897 | 0.911585 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.120935 |
1078131895be80dd05cd641cc3a84d9d01ba5a0a | 272 | py | Python | gfapy/line/__init__.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 44 | 2017-03-18T08:08:04.000Z | 2021-11-10T16:11:15.000Z | gfapy/line/__init__.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 22 | 2017-04-04T21:20:31.000Z | 2022-03-09T19:05:30.000Z | gfapy/line/__init__.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 5 | 2017-07-07T02:56:56.000Z | 2020-09-30T20:10:49.000Z | from .comment import Comment
from .line import Line
from .unknown import Unknown
from .edge import Edge
from .gap import Gap
from .custom_record import CustomRecord
from .fragment import Fragment
from .header import Header
from .segment import Segment
from . import group
| 24.727273 | 39 | 0.816176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
107e914f1ae2666a80b799c84f120d5ecb4a420a | 21,546 | py | Python | preprocessing/parsing.py | bradhackinen/frdocs | c846eef4bb6e09b49a4a63a69d3d16c8bd505e4e | [
"Apache-2.0"
] | null | null | null | preprocessing/parsing.py | bradhackinen/frdocs | c846eef4bb6e09b49a4a63a69d3d16c8bd505e4e | [
"Apache-2.0"
] | null | null | null | preprocessing/parsing.py | bradhackinen/frdocs | c846eef4bb6e09b49a4a63a69d3d16c8bd505e4e | [
"Apache-2.0"
] | null | null | null | import os
import lxml.etree as et
import pandas as pd
import numpy as np
import regex as re
def get_element_text(element):
"""
Extract text while
-skipping footnote numbers
-Adding a space before and after emphasized text
"""
head = element.text if element.tag != 'SU' else ''
child = ' '.join(s for e in element.getchildren()
for s in [get_element_text(e)] if s)
tail = element.tail
return ' '.join(s for s in (head,child,tail) if s)
def update_header(header,element):
"""
Track hierarchical headers
"""
header_text = ''.join([s for s in element.itertext()])
level_string = element.get('SOURCE','HED')
new_level = int(level_string[-1].replace('D','0')) + 1
if new_level < len(header):
header = header[:new_level-1] + [header_text]
else:
header += [''] * (new_level - len(header)) # Make header length = new_level
header[-1] = header_text # Make last entry equal to header text
return header
def get_ancestor(element,tag):
for a in element.iterancestors():
if a.tag == tag:
return a
return None
def parse_cfr_string(s,verbose=False):
try:
title = s.split()[0]
for part in re.search(r'CFR(.*?(?P<part>(_|\d+|\b[IVXCL]+\b)))+',s,re.IGNORECASE).captures('part'):
yield (title,part)
except Exception:
if verbose: print('Warning: Failed to parse CFR string "%s"' % s)
yield (np.nan,np.nan)
def build_lstsub_cfr_map(root,verbose=False):
# First associate each lstsub with the appropriate part elements
lstsub_map = {e:[] for e in root.xpath('.//LSTSUB')}
lstsub = None
for e in root.xpath('.//LSTSUB|.//PART'):
if e.tag == 'LSTSUB':
lstsub = e
elif lstsub is not None:
lstsub_map[lstsub].append(e)
# Then create a map from part elements to cfr
cfr_map = {}
for lstsub,part_elements in lstsub_map.items():
cfrs = [cfr for e in lstsub.xpath('./CFR')
for cfr in parse_cfr_string(get_element_text(e),verbose)]
if len(cfrs) == len(part_elements):
for cfr,part_element in zip(cfrs,part_elements):
cfr_map[part_element] = cfr
return cfr_map
def build_header_part_map(root):
part_map = {}
for part_element in root.xpath('.//PART'):
for header_element in part_element.xpath(r'descendant::HD[1]'):
s = get_element_text(header_element)
for m in re.finditer(r'part\s+(\d+|_)',s,re.IGNORECASE):
part_map[part_element] = m[1]
return part_map
def build_part_cfr_map(root,verbose=False):
"""
Build a mapping from part elements to CFR (title,part) tuples.
If document has one CFR reference an one part element then mapping is trivial.
If document has multiple CFR references and/or multiple part elements, *try*
to infer correct CFR for each part element using the "List of Subjects"
table that comes before a CFR reg part, and part headers.
"""
doc_cfrs = [cfr for e in root.xpath('.//PREAMB/CFR')
for cfr in parse_cfr_string(get_element_text(e),verbose)]
part_elements = root.xpath('.//PART')
if not doc_cfrs and not part_elements:
return {}
elif len(doc_cfrs) == 1 and len(part_elements) == 1:
# Trivial case with one CFR part and no ambiguity
return {part_elements[0]:doc_cfrs[0]}
else:
# Multiple sections case. Use lstsub and header information to infer CFR.
lstsub_cfr_map = build_lstsub_cfr_map(root)
header_part_map = build_header_part_map(root)
cfr_map = {}
for e in part_elements:
if e in lstsub_cfr_map:
cfr_map[e] = lstsub_cfr_map[e]
if e in header_part_map and lstsub_cfr_map[e][1] != header_part_map[e]:
if verbose: print('Warning: lstsub and header cfr do not agree for %s\nlstsub_cfr_map=%s\nheader_part_map=%s' % (str(e),str(lstsub_cfr_map),str(header_part_map)))
elif e in header_part_map:
part = header_part_map[e]
potential_titles = set(cfr[0] for cfr in doc_cfrs if cfr[1] == part)
if part == '_':
title = '_'
elif len(potential_titles) == 1:
title = list(potential_titles)[0]
else:
title = np.nan
if verbose: print('Warning: Could not infer title for part element %s\npotential_titles=%s' % (str(e),str(potential_titles)))
cfr_map[e] = (title,part)
else:
cfr_map[e] = (np.nan,np.nan)
if verbose: print('Warning: Could not infer CFR for part element %s' % str(e))
return cfr_map
def split_numbering(s):
if type(s) is str:
bracketed = re.match(r'\s*?((\(\s*(\d+|[A-Z]{1,3}|[a-z]{1,3})\s*\))\s*)+',s)
if bracketed:
number = bracketed.group(0) # re.sub('\s+','',bracketed.group(0))
s = s[len(bracketed.group(0)):]
return number,s
dotted = re.match(r'\s*((\d+|[A-Z]+)\.)\s',s)
if dotted:
number = dotted.group(0) # .strip()
s = s[len(dotted.group(0)):]
return number,s
return np.nan,s
def clean_paragraph_text(s):
'''
Adjust paragraph text, primarily to improve spacy tokenization
(kind of a hack, but oh well)
'''
#Add spaces to split on opening brackets
s = re.sub(r'(\S)\(',r'\g<1> (',s)
#Add spaces around emdash and dash
s = re.sub(r'(\S)([\u2014-])(\S)',r'\g<1> \g<2> \g<3>',s)
return s
def parse_xml_file(xmlFile,**args):
with open(xmlFile,'rb') as f:
tree = et.parse(f)
return parse_reg_xml_tree(tree,**args)
def parse_reg_xml_tree(tree,nlp=None,extract_numbering=True,verbose=False,
split_paragraphs=True):
root = tree.getroot()
part_cfr_map = build_part_cfr_map(root,verbose)
paragraphs = []
header = []
part_element = None # Need to track part elements because they are not hierarchical
for element in root.iter():
if element.tag == 'PART':
part_element = element
elif element.tag == 'HD':
header = update_header(header,element)
if element.tag in ['P','AMDPAR','HD','EXTRACT','GPOTABLE','SECTION']:
text = get_element_text(element)
paragraph = {
'xml_path':tree.getpath(element),
'header':tuple(header),
'legal':False,
'tag':element.tag
}
reg_element = get_ancestor(element,'REGTEXT')
if reg_element is not None:
paragraph['legal'] = True
paragraph['cfr_title'] = reg_element.get('TITLE')
paragraph['cfr_part'] = reg_element.get('PART')
elif part_element is not None and re.search(r'(SECTION|AMDPAR|EXTRACT|SUBPART)',paragraph['xml_path']):
paragraph['legal'] = True
paragraph['cfr_title'],paragraph['cfr_part'] = part_cfr_map[part_element]
section_element = get_ancestor(element,'SECTION')
if section_element is not None:
try:
paragraph['cfr_section'] = section_element.xpath('./SECTNO/text()')[0].split()[1]
except Exception:
if verbose: print('Warning: Failed to get section number for section %s' % section_element)
try:
paragraph['section_subject'] = section_element.xpath('.//SUBJECT/text()')[0]
except Exception:
if verbose: print('Warning: Section %s has no subject information' % section_element)
ftnt_element = get_ancestor(element,'FTNT')
if ftnt_element is not None:
try:
paragraph['footnote'] = ftnt_element.xpath('.//SU/text()')[0]
except Exception:
paragraph['footnote'] = 0
if verbose: print('Warning: Footnote %s has no numbering information' % ftnt_element)
else:
paragraph['footnotes'] = element.xpath('./SU/text()')
"""
Agencies are inconsistent about how they use paragraph formatting:
-In some documents, XML <p> items correspond to paragraphs
-In some documents, <p> items contain multiple paragraphs split by newline characters
For the sake of consistentency, split all paragraphs on newlines by default,
keeping trailing whitespace with each paragraph
"""
if split_paragraphs:
par_texts = [m.group(0) for m in re.finditer(r'\s*.*\n?\s*',text)]
else:
par_texts = [text]
for par_text in par_texts:
if extract_numbering:
paragraph['numbering'],par_text = split_numbering(par_text)
paragraph['text'] = par_text
paragraphs.append(paragraph.copy())
paragraph_df = pd.DataFrame(paragraphs)
# Ensure dataframe has all columns
for c in ['cfr_title','cfr_part','section','section_subject','footnote','footnotes']:
if c not in paragraph_df.columns:
paragraph_df[c] = np.nan
# paragraph_df['text'] = paragraph_df['text'].apply(clean_paragraph_text)
if nlp is not None:
paragraph_df['doc'] = paragraph_df['text'].apply(nlp)
return paragraph_df
def clean_html_text(text):
# Strip header and footer
text = re.sub(r'.+(?=AGENCY)','',text,flags=re.DOTALL)
text = re.sub(r'\[(FR )?Doc.+?$','',text,flags=re.DOTALL | re.REVERSE)
# Replace bullets with bullet char
text = re.sub(r'<bullet>','•',text)
# Replace doube-dash with em-dash
text = re.sub(r'(?<=[^\s-])--(?=[^\s-])','—',text)
# Replace 'Sec.' with §
text = re.sub(r'(?<=\s)Sec\.','§',text)
# Remove html tags (not worth parsing)
text = re.sub(r'<\\?.+?>','',text)
# #Remove dashed horizontal lines
# text = re.sub(r'\n-{5,}\n','\n',text)
# Delete page markers
text = re.sub(r'\n\s*\[\[Page.+\]\]\s*\n',' ',text,flags=re.IGNORECASE)
# Replace in-paragraph line-breaks with spaces
text = re.sub(r'[ -]\n(?=\S)',' ',text)
# Convert inline titles to their own lines (starting next line with tab)
text = re.sub(r'(?<=(^|\n)[^a-z]+:\s)','\n ',text)
return text
def tag_html_paragraph(s):
if s.startswith('<P>'):
return 'P'
elif re.match(r'\d\n\S',s):
return 'AMDPAR'
elif re.match(r'§\s+\d',s):
return 'SECTION'
elif re.match(r'\*+\s*',s):
return 'STARS'
elif re.match(r'\S',s):
if '\n' in s.strip():
return 'EXTRACT'
else:
return 'HD'
def parse_footnotes(s):
footnote_pattern = r'(?<!\d\s*)\\(?P<i>\d+)\\'
# Parse footnotes
footnote_paragraph_match = re.match(footnote_pattern,s)
if footnote_paragraph_match:
footnote = footnote_paragraph_match.group('i')
footnotes = []
else:
footnote = np.nan
footnotes = [m.group('i') for m in re.finditer(footnote_pattern,s)]
s = re.sub(footnote_pattern,'',s)
return footnote,footnotes,s
def line_code(line):
# line = line.strip()
if not line.strip():
return ' '
if line.count('-') == len(line):
return '-'
if line.startswith(' '):
return 'c'
stars = line.count('*')
if stars >= 0.5*len(line):
return '*'
alphas = sum(map(str.isalpha,line))
if alphas + stars > 0.5*len(line):
return 'a'
return '.'
# ''.join(map(line_code,text.split('\n')))
# print('\n'.join(map(lambda line: line_code(line)+' '+line,text.split('\n'))))
def split_tables(text):
'''
Identify tables using pseudo-likelyhood approach.
Want to create table partitions (start line, end line) such that:
-Tables are at least 3 lines long
-Tables start and end with a horizontal line
-Tables sort common table chars {'-',' ','.'} from other chars
as completely as possible
'''
lines = text.split('\n')
line_codes = ''.join(map(line_code,lines))
table_matches = list(re.finditer(r'.(-[ c\.]*){2,}',line_codes))
if table_matches:
for table_match in table_matches:
pre_table = '\n'.join(lines[:table_match.start()])
table = '\n'.join(lines[table_match.start():table_match.end()])
yield pre_table,table
if table_match.end() < len(lines):
yield '\n'.join(lines[table_match.end():]),''
else:
yield text,''
def extract_html_paragraphs(text,extract_numbering=True):
header = None
legal_header = False
legal = False
# Split tables
for text,table in split_tables(text):
# Mark paragraphs indicated by leading tabs (4 spaces)
text = re.sub(r'(?<=\n) {4}(?!\s)',' <P>',text)
for s in (m.group(0) for m in re.finditer(r'.+?[^\n]($|\n{2}\s*|(?=<P>))',text,flags=re.DOTALL)):
# Skip dashed lines
if not re.match(r'^-{5,}\s*$',s):
tag = tag_html_paragraph(s)
footnote = np.nan
footnotes = []
# Drop dashed lines
s = re.sub(r'\n-{5,}\n','\n',s)
if s:
if tag == 'P':
# Trim tab indentation
s = s[3:]
# Parse and remove footnote numbers
footnote,footnotes,s = parse_footnotes(s)
elif tag == 'AMDPAR':
# Trim amendment indicator ("0")
s = s[2:]
elif tag == 'HD':
header = s.strip()
legal_header = bool(re.match(r'(Part\s\d+|Subpart|§|Appendix)',header,re.IGNORECASE))
elif tag == 'SECTION':
header = s.strip()
legal_header = True
legal = legal_header or tag in {'AMDPAR','SECTION','STARS'}
paragraph_info = {'tag':tag,'text':s,'footnote':footnote,'footnotes':footnotes,'header':header,'legal':legal}
if extract_numbering:
paragraph_info['numbering'],paragraph_info['text'] = split_numbering(s)
yield paragraph_info
if table:
yield {'tag':'GPOTABLE','text':table,'header':header,'legal':legal}
def parse_html(s,extract_numbering=True):
text = clean_html_text(s)
parsed_df = pd.DataFrame(list(extract_html_paragraphs(text,extract_numbering=extract_numbering)))
return parsed_df
def parse_html_file(html_filename,extract_numbering=True):
with open(html_filename,encoding='utf8') as f:
text = f.read()
return parse_html(text,extract_numbering=extract_numbering)
def extract_frdoc_number(s):
'''
This function extracts the document from an FRDOC string. The standard
format is something like:
"[FR Doc. 12-1149 Filed 1-18-12; 8:45 am]"
Where "12-1149" is the document number. However, contents are clearly
manually entered, because there can be a variety of deviations from this
format. For example:
"[FR Doc.12-1149; Filed 1-18-12; 8:45 am]"
"[FR Doc. 12-1149 1-18-12; 8:45 am]"
"[JR Doc. 12-1149 Filed 1-18-12; 8:45 am]"
"[FR Doc. 12- 1149 Filed 1-18-12; 8:45 am]"
"[FR Doc. 1149 Filed 1-18-12; 8:45 am]"
Many document numbers also start with "E" or "Z" instead of the first digits
of the year.
Reprints and corrections are also sometimes labeled by prepending to the
document number with something like "C1-", "R1-", or "X"
This function assumes the document number is located either immediately
following "FR Doc.", or immediately preceeding "Filed". Document numbers are
allowed to start with E or Z (as in "E7-1592").
Sometimes the year component is ommitted ("12-" in the example above).
In these cases, the function attempts to locate the year from the date,
assuming it appears just before the time, and prepends it to the document
number.
Finally, the document number is standardized by converting to ascii,
removing all whitespace, and making letters uppercase.
If the function cannot parse the document number it prints a warning and
returns None.
'''
# Define the general fr doc pattern with up to three parts
fr_pattern = r'''((?P<part1>[CRX]\d*)[\s-]*)? # Optional prepended part
(?P<part2>[EZ]?\d+) # Required initial or middle number
(\s?-+\s?(?P<part3>\d*))? # Optional third number
'''
s = unidecode(s)
# Case 1: Format is "[FR Doc. #####..."
m = re.search(fr'FR\sDoc\.?\s*{fr_pattern}',s,flags=re.IGNORECASE | re.VERBOSE)
if not m or len(m.group(0)) < 3:
# Case 2: Format is "...###### Filed..."
m = re.search(fr'[.\s]{fr_pattern};?\s*Fil',s,flags=re.IGNORECASE | re.VERBOSE)
if m:
# Rebuild the document number from parts
d = m.group('part2')
if m.group('part1'):
d = m.group('part1') + '-' + d
if m.group('part3'):
d = d + '-' + m.group('part3')
d = unidecode(d)
d = d.upper()
return d
else:
print(f'Warning: Could not parse document number in "{s}"')
return None
def standardize_frdoc_number(d):
"""
The document numbers used in on federalregister.gov are also parsed from
raw data, and can include errors such as small pieces of text appended to
the end of the string.
Document numbers also sometimes have multiple reprentations. For example,
2005-0034
05-0034
5-0034
E5-0034
2005-34
Would presumably all refer to the same document.
This function standardizes documents numbers by:
1) Removing any trailing non-numeric characters
2) Dropping first two digits of the year when 4 digits are present
3) Dropping leading zeros from the last number
"""
try:
# Remove trailing non-numeric chars
d = re.sub(r'[^0-9]+$','',d)
# Remove "E" - never seems completely necessary
d = d.replace('E','')
# Split into parts
parts = d.rsplit('-')
# Clean year. Could be in any part except the last.
for i in range(len(parts)-1) in parts[:-1]:
if re.match(r'(19|20?)\d\d',parts[i]):
parts[i] = re.sub(r'(19|200?)','',parts[i])
break
try:
parts[-1] = str(int(parts[-1]))
except Exception:
pass
return '-'.join(parts)
except Exception:
return d
class FrdocResolver():
def __init__(self):
self.info_df = load_info_df(fields=['frdoc_number','publication_date','volume','start_page','end_page'])
self.info_df['standardized_frdoc'] = self.info_df['frdoc_number'].apply(standardize_frdoc_number)
self.all_frdocs = set(d for d in self.info_df['frdoc_number'].dropna() if d.strip())
def __call__(self,doc_info):
if doc_info['frdoc_string']:
frdoc_number = extract_frdoc_number(doc_info['frdoc_string'])
# Search based on extracted frdoc number
if frdoc_number in self.all_frdocs:
return frdoc_number
# Search based on the standardized frdoc number
standardized_frdoc = standardize_frdoc_number(frdoc_number)
candidates_df = self.info_df[self.info_df['standardized_frdoc'] == standardized_frdoc]
if len(candidates_df) == 1:
return candidates_df['frdoc_number'].values[0]
# Search based on the publication date, volume and pages (FR citation)
candidates_df = self.info_df[(self.info_df['publication_date'] == doc_info['publication_date']) \
& (self.info_df['volume'] == doc_info['volume']) \
& (self.info_df['start_page'] == doc_info['start_page']) \
& (self.info_df['end_page'] == doc_info['end_page'])]
if len(candidates_df) == 1:
return candidates_df['frdoc_number'].values[0]
if doc_info['frdoc_string']:
# Try to refine search by seeing if frdoc is within frdoc_string (need to strip whitespace)
frdoc_string_nospace = re.sub(r'\s','',doc_info['frdoc_string'])
candidates_df['frdoc_number_nospace'] = [re.sub(r'\s','',d) for d in candidates_df['frdoc_number']]
candidates_df['frdoc_match'] = [(d in frdoc_string_nospace) for d in candidates_df['frdoc_number_nospace']]
candidates_df = candidates_df[candidates_df['frdoc_match']]
if len(candidates_df) == 1:
return candidates_df['frdoc_number'].values[0]
print('Warning: Could not resolve frdoc for document with the following identify info:')
print(doc_info)
if len(candidates_df):
print('Candidates:')
print(candidates_df)
else:
print('Candidates: None')
return None
| 34.751613 | 182 | 0.578808 | 2,463 | 0.114271 | 3,123 | 0.144892 | 0 | 0 | 0 | 0 | 8,306 | 0.385358 |
107ee9ab6b9b9047573689de88a1c06760a9cb86 | 3,960 | py | Python | src/stopwords/create_stopword_list.py | prrao87/topic-modelling | b7bceef8711edb097c3afec95c30474ae0789e1f | [
"MIT"
] | 3 | 2020-11-22T14:55:58.000Z | 2021-03-13T17:59:26.000Z | src/stopwords/create_stopword_list.py | prrao87/topic-modelling | b7bceef8711edb097c3afec95c30474ae0789e1f | [
"MIT"
] | null | null | null | src/stopwords/create_stopword_list.py | prrao87/topic-modelling | b7bceef8711edb097c3afec95c30474ae0789e1f | [
"MIT"
] | null | null | null | """
Script to generate a custom list of stopwords that extend upon existing lists.
"""
import json
import spacy
from urllib.request import urlopen
from itertools import chain
def combine(*lists):
"Combine an arbitrary number of lists into a single list"
return list(chain(*lists))
def get_spacy_lemmas():
spacy_lemma_url = "https://raw.githubusercontent.com/explosion/spacy-lookups-data/master/spacy_lookups_data/data/en_lemma_lookup.json"
with urlopen(spacy_lemma_url) as response:
lemmas = response.read()
return json.loads(lemmas)
def lookup_verbs(roots, spacy_lemmas):
"""Return a full of list light verbs and all its forms"""
def flatten(list_of_lists):
"Return a flattened list of a list of lists"
return [item for sublist in list_of_lists for item in sublist]
verblist = []
for root in roots:
verbs = [key for key in spacy_lemmas if spacy_lemmas[key] == root]
verbs.append(root)
verblist.append(verbs)
return flatten(verblist)
if __name__ == "__main__":
# We first get the default spaCy stopword list
nlp = spacy.blank('en')
spacy_stopwords = nlp.Defaults.stop_words
spacy_lemmas = get_spacy_lemmas()
# Create custom lists depending on the class of words seen in the data
person_titles = ['mr', 'mrs', 'ms', 'dr', 'mr.', 'mrs.', 'ms.', 'dr.', 'e']
broken_words = ['don', 'isn', 'mustn', 'shouldn', 'couldn', 'doesn', 'didn']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '000']
url_terms = ['http', 'https', 'ref', 'href', 'com', 'src']
days_of_the_week = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
months_of_the_year = ['january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
time_periods = ['minute', 'minutes', 'hour', 'hours', 'day', 'days', 'week', 'weeks',
'month', 'months', 'year', 'years']
time_related = ['yesterday', 'today', 'tomorrow', 'day', 'night', 'morning',
'afternoon', 'evening', 'edt', 'est', 'time', 'times']
common_nouns = ['new', 'york', 'nytimes', 'press', 'news', 'report', 'page', 'user', 'file', 'video', 'pic',
'photo', 'online', 'social', 'media', 'group', 'inbox', 'item',
'advertisement', 'world', 'store', 'story', 'life', 'family',
'people', 'man', 'woman', 'friend', 'friends']
social_media = ['twitter', 'facebook', 'google', 'gmail', 'video', 'photo', 'image',
'user', 'social', 'media', 'page', 'online', 'stream', 'post',
'app']
light_verb_roots = [
'ask', 'come', 'go', 'know', 'look', 'see', 'talk', 'try', 'use', 'want', 'call', 'click',
'continue', 'comment', 'do', 'feel', 'find', 'give', 'get', 'have', 'include', 'like', 'live',
'love', 'make', 'post', 'read', 'say', 'speak', 'send', 'share', 'show', 'sign', 'tag',
'take', 'tell', 'think', 'update', 'work', 'write'
]
# Convert light verb roots to all its forms using lemma lookup
light_verbs_full = lookup_verbs(light_verb_roots, spacy_lemmas)
# Combine into a single lit of stopwords
add_stopwords = set(
combine(
person_titles, broken_words, numbers, url_terms, days_of_the_week, months_of_the_year,
time_periods, time_related, common_nouns, social_media, light_verbs_full
)
)
# Combine all stopwords into one list and export to text file
combined_stopwords = spacy_stopwords.union(add_stopwords)
stopword_list = sorted(list(combined_stopwords))
# Write out stopwords to file
with open('custom_stopwords.txt', 'w') as f:
for word in stopword_list:
f.write(word + '\n')
print(f"Exported {len(stopword_list)} words to stopword list.")
| 44.494382 | 138 | 0.600253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,827 | 0.461364 |
108240c1077fc8cf237931a298bf13f9490847ae | 10,081 | py | Python | main.py | carl-andersson/impest | d896923597b98180d3656efe05a4935b69d3e36e | [
"MIT"
] | null | null | null | main.py | carl-andersson/impest | d896923597b98180d3656efe05a4935b69d3e36e | [
"MIT"
] | null | null | null | main.py | carl-andersson/impest | d896923597b98180d3656efe05a4935b69d3e36e | [
"MIT"
] | 2 | 2018-09-13T19:40:49.000Z | 2021-04-04T07:57:32.000Z | '''
@author: Carl Andersson
'''
import sys
import numpy as np
import tensorflow as tf
import processdata
import time
import scipy.io
# Preformfs a custom matrix multiplication for all matrices in a batch
def batchMatMul(x: tf.Tensor, y: tf.Tensor, scope="batchMatMul"):
return tf.map_fn(lambda inx: tf.tensordot(inx[0], inx[1],1), [x, y], dtype=tf.float32)
# Preformfs a custom matrix inversion for all matrices in a batch
def batchMatInv(x: tf.Tensor, scope="batchMatMul"):
return tf.map_fn(lambda inx: tf.matrix_inverse(inx), x, dtype=tf.float32)
if __name__ == '__main__':
batchSize = 2500
N = seq_len = 125
n = resp_len = 50
U = tf.placeholder(tf.float32, [None, N], 'U')
Y = tf.placeholder(tf.float32, [None, N], 'Y')
TH = tf.placeholder(tf.float32, [None, n], 'TH')
layers = [600, 300, 200]
nMats = 500
trainingdata_file = "data_train.mat"
validationdata_file = "data_test.mat"
# Least squares caluclations
with tf.variable_scope('LSModel', reuse=None) as scope:
# Phi is represented in matrix notation instead of a sum as in the proposal
PH = []
for t in range(0, N - n):
ph = U[:, t:t + n] # 0 indexed , reversed at a later stage
PH.append(ph)
# reverse PH since it is defined that way
PH = tf.reverse(tf.stack(PH, -1), [1]) # (? x n x N-n )
R = batchMatMul(PH, tf.transpose(PH, [0, 2, 1])) # (? x n x n)
Fn = tf.expand_dims(batchMatMul(PH, Y[:, n:N]), -1)
eTHLS = tf.squeeze(tf.matrix_solve(R, Fn), -1)
with tf.name_scope('Optimal_Regularization'):
Pn = batchMatMul(tf.expand_dims(TH, 2), tf.expand_dims(TH, 1));
_, varEst = tf.nn.moments(Y, [1], keep_dims=True);
SNR = tf.placeholder(tf.float32, [None, 1]);
varEst = 1 / (1 + SNR) * varEst;
RHS = batchMatMul(Pn, Fn);
LHS = batchMatMul(Pn, R) + tf.expand_dims(varEst, -1) * tf.expand_dims(
tf.constant(np.eye(n, n), dtype=tf.float32), 0)
eTHOpt = tf.squeeze(tf.matrix_solve(LHS, RHS));
with tf.variable_scope('KernelModel', reuse=None) as scope:
currentBatchSize = tf.shape(U)[0];
# Placholders for mean and std of the impulse response and Y for the training data.
THmean = tf.placeholder(tf.float32, [1, n], "THmean")
THstd = tf.placeholder(tf.float32, [1, n], "THstd")
Ymstd = tf.placeholder(tf.float32, [1, N], "Ystd")
# Input to the network
out = tf.concat([U, Y, (eTHLS - THmean) / THstd], -1)
# Layers
for i in range(1, len(layers)):
out = tf.layers.dense(out, layers[i], tf.nn.relu, True)
# Intitialization of S-vectors
S = tf.get_variable("S", [nMats, n, 1], tf.float32,
tf.random_normal_initializer(0, 1)) * \
tf.expand_dims(THstd,-1) + tf.expand_dims( THmean, -1)
# Drop out probability
keep_prob = tf.placeholder(tf.float32)
sigma = tf.nn.softmax(tf.nn.dropout(tf.layers.dense(out, nMats, None, True, name="FinalLayerSigma"), keep_prob))
SQuads = batchMatMul(S, tf.transpose(S, [0, 2, 1]))
SNRg = 5.5
P = tf.tensordot(sigma, SQuads, ([1], [0])) * (SNRg + 1)
with tf.variable_scope('PriorModel', reuse=None) as scope:
eTHPrior = tf.squeeze(
tf.matrix_solve(batchMatMul(P, R) + tf.expand_dims(tf.constant(np.eye(n), dtype=tf.float32), 0),
batchMatMul(P, Fn)), -1);
with tf.name_scope('cost'):
g0mean = tf.reduce_mean(TH, -1, keepdims=True);
TH_MS = tf.reduce_mean(tf.square(TH - g0mean), -1);
eTH_MSE = tf.reduce_mean(tf.square(TH - eTHPrior), -1)
eTHopt_MSE = tf.reduce_mean(tf.square(TH - eTHOpt), -1)
eTHls_MSE = tf.reduce_mean(tf.square(TH - eTHLS), -1)
# improvefactor is equivilent to the measure S
improvefactor = tf.reduce_mean(eTH_MSE / eTHls_MSE);
improvefactoropt = tf.reduce_mean(eTHopt_MSE / eTHls_MSE);
cost = tf.reduce_mean(eTH_MSE)
lscost = tf.reduce_mean(eTHls_MSE)
optcost = tf.reduce_mean(eTHopt_MSE)
WRMSE = eTH_MSE / TH_MS;
# W is equivilent to the 'fit' measure used in matlab
W = tf.reduce_mean(100 * (1 - tf.sqrt(WRMSE)))
WRMSEopt = eTHopt_MSE / TH_MS;
WRMSEls = eTHls_MSE / TH_MS;
Wopt = tf.reduce_mean(100 * (1 - tf.sqrt(WRMSEopt)))
Wls = tf.reduce_mean(100 * (1 - tf.sqrt(WRMSEls)))
with tf.name_scope('optimizer'):
l_rate = tf.placeholder(tf.float32, name="l")
optimizer = tf.train.AdamOptimizer(learning_rate=l_rate)
opt = optimizer.minimize(improvefactor)
with tf.name_scope('summary'):
tf.summary.scalar('cost_LS', lscost)
tf.summary.scalar('cost_Prior', cost)
tf.summary.scalar('ImproveFactor', improvefactor)
tf.summary.scalar('W', W);
with tf.name_scope('Validaiton'):
valSummaries = tf.get_variable('valSummaries', [3], tf.float32,
initializer=tf.constant_initializer(0, tf.float32))
valSummariesOnetime = tf.get_variable('valSummariesOntime', [4], tf.float32,
initializer=tf.constant_initializer(0, tf.float32))
valSummaries_incOp = tf.assign_add(valSummaries,
tf.stack(tf.cast(currentBatchSize, tf.float32) * [W, improvefactor, 1]))
valSummaries_clearOp = tf.assign(valSummaries, [0, 0, 0]);
valSummariesOnetime_incOp = tf.assign_add(valSummariesOnetime, tf.stack(
tf.cast(currentBatchSize, tf.float32) * [Wopt, Wls, improvefactoropt, 1]))
tf.summary.scalar("W", valSummaries[0] / valSummaries[2], ["Validation"])
tf.summary.scalar("ImproveFactor", valSummaries[1] / valSummaries[2], ["Validation"])
tf.summary.scalar('W_Opt', valSummariesOnetime[0] / valSummariesOnetime[3], ["Validation"])
tf.summary.scalar('W_LS', valSummariesOnetime[1] / valSummariesOnetime[3], ["Validation"])
tf.summary.scalar('ImproveFactor_Opt', valSummariesOnetime[2] / valSummariesOnetime[3], ["Validation"])
summary = tf.summary.merge_all()
valid_summary = tf.summary.merge_all("Validation")
# Data processing
data = processdata.getData(trainingdata_file)
Us = np.array(data["u"]);
Ys = np.array(data["y"]);
Gs = np.array(data["g"]);
SNRs = np.array(data["SNR"])
M = Us.shape[0];
data_val = processdata.getData(validationdata_file)
Us_val = np.array(data_val["u"]);
Ys_val = np.array(data_val["y"]);
Gs_val = np.array(data_val["g"]);
SNRs_val = np.array(data["SNR"])
M_val = Us_val.shape[0];
Gmean = np.mean(Gs, 0, keepdims=True);
Gstd = np.std(Gs, 0, keepdims=True);
Ystd = np.std(Ys, 0, keepdims=True);
perm = [];
saver = tf.train.Saver();
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
init = tf.global_variables_initializer();
sess.run(init)
it_i = 0
logpath = 'log/train ' + time.strftime("%c")
train_writer = tf.summary.FileWriter(logpath, sess.graph)
while it_i <= 5000:
if not len(perm):
perm = np.random.permutation(M);
idx = perm[:batchSize];
perm = perm[batchSize:];
l = 0.005
[sum_t, cp, c, _] = sess.run([summary, cost, lscost, opt],
feed_dict={TH: Gs[idx], U: Us[idx], Y: Ys[idx], l_rate: l, THmean: Gmean,
THstd: Gstd, Ymstd: Ystd, keep_prob: 0.7})
print(it_i, cp, c, cp / c)
if it_i % 10 == 0:
train_writer.add_summary(sum_t, it_i);
val_perm = np.random.permutation(M_val);
sess.run([valSummaries_clearOp]);
while len(val_perm):
idx = val_perm[:batchSize];
val_perm = val_perm[batchSize:];
fdict = {TH: Gs_val[idx], U: Us_val[idx], Y: Ys_val[idx], THmean: Gmean, THstd: Gstd, Ymstd: Ystd,
keep_prob: 1.0, SNR: SNRs_val[idx]};
if (it_i == 0):
sess.run([valSummariesOnetime_incOp], feed_dict=fdict)
sess.run([valSummaries_incOp], feed_dict=fdict)
print(sess.run(valSummaries))
[sum_val] = sess.run([valid_summary])
train_writer.add_summary(sum_val, it_i);
sys.stdout.flush()
it_i += 1
for i in range(int((M_val + batchSize - 1) / batchSize)):
sigma_part, eTh_part = sess.run([sigma, eTHPrior], feed_dict={TH: Gs_val[i * batchSize:(i + 1) * batchSize],
U: Us_val[i * batchSize:(i + 1) * batchSize],
Y: Ys_val[i * batchSize:(i + 1) * batchSize],
THmean: Gmean, THstd: Gstd, Ymstd: Ystd,
keep_prob: 1.0});
if i == 0:
sigmaRes = sigma_part
eTh = eTh_part;
else:
sigmaRes = np.concatenate((sigmaRes, sigma_part), 0)
eTh = np.concatenate((eTh, eTh_part), 0)
# remember to rescale the estimate with the variance and mean of input sequence and output sequence
scipy.io.savemat(logpath + "/sigma.mat", dict(sigma=sigmaRes));
scipy.io.savemat(logpath + "/D.mat", dict(D=sess.run(S, feed_dict={THmean: Gmean, THstd: Gstd, Ymstd: Ystd})));
scipy.io.savemat(logpath + "/eTH.mat", dict(dlTh=eTh));
save_path = saver.save(sess, logpath + "/model.ckpt")
| 37.898496 | 120 | 0.564924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,272 | 0.126178 |
10846775ed810108f0674e3280e3bdb519f11227 | 586 | py | Python | setup.py | Lzejie/DynamicPool | 9a0d42738f55e97962e2a93a0db0b30a38fa1126 | [
"MIT"
] | 1 | 2019-01-16T03:00:18.000Z | 2019-01-16T03:00:18.000Z | setup.py | Lzejie/DynamicPool | 9a0d42738f55e97962e2a93a0db0b30a38fa1126 | [
"MIT"
] | null | null | null | setup.py | Lzejie/DynamicPool | 9a0d42738f55e97962e2a93a0db0b30a38fa1126 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 18/12/10 上午10:27
# @Author : L_zejie
# @Site :
# @File : setup.py.py
# @Software: PyCharm Community Edition
from setuptools import setup, find_packages
setup(
name="DynamicPool",
packages=find_packages(),
version='0.14',
description="动态任务阻塞线程/进程池",
author="L_zejie",
author_email='lzj_xuexi@163.com',
url="https://github.com/Lzejie/DynamicPool",
license="MIT Licence",
keywords=["Thread Pool", "Dynamic Pool", "Dynamic Thread Pool", "Dynamic Process Pool"],
classifiers=[],
install_requires=[]
)
| 25.478261 | 92 | 0.648464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.580065 |
10847fe0c9aff3581b7ae6656083f85f2e69bec1 | 28,738 | py | Python | testsuite/test_singly_linked_list.py | trycatchhorn/PyAlgDat | 85f8c7550630cf31b5e4472fd593956c9d96c078 | [
"MIT"
] | null | null | null | testsuite/test_singly_linked_list.py | trycatchhorn/PyAlgDat | 85f8c7550630cf31b5e4472fd593956c9d96c078 | [
"MIT"
] | null | null | null | testsuite/test_singly_linked_list.py | trycatchhorn/PyAlgDat | 85f8c7550630cf31b5e4472fd593956c9d96c078 | [
"MIT"
] | null | null | null | #!/usr/bin/env py.test
"""
Test SinglyLinkedList class.
"""
import copy
import unittest
from py_alg_dat import singly_linked_list
class TestSinglyLinkedList(unittest.TestCase):
"""
Test SinglyLinkedList class.
"""
def setUp(self):
self.list1 = singly_linked_list.SinglyLinkedList()
self.list1.append('b')
self.list1.append('c')
self.list1.append('d')
### Begin test of local class SinglyLinkedListElement ###
def test_singly_linked_list_element_equal(self):
"""
Test operator (list element) "equal".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
self.assertEqual(elem1, elem2)
def test_singly_linked_list_element_not_equal(self):
"""
Test operator (list element) "equal" - inverted.
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
self.assertNotEqual(elem1, elem2)
def test_singly_linked_list_element_copy_equal(self):
"""
Test operator (list element) "copy".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
e_copy = copy.copy(elem)
self.assertEqual(elem, e_copy)
def test_singly_linked_list_element_copy_not_equal(self):
"""
Test operator (list element) "copy" - inverted.
"""
a_list = singly_linked_list.SinglyLinkedList()
elem = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
e_copy = copy.copy(elem)
elem.data = 'aa'
self.assertNotEqual(elem, e_copy)
def test_singly_linked_list_element_get_data(self):
"""
Test method (list element) "get_data".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
self.assertEqual('a', elem.get_data())
def test_singly_linked_list_element_get_next(self):
"""
Test method (list element) "get_next".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem2)
self.assertEqual(elem2, elem1.get_next())
def test_singly_linked_list_element_insert_empty(self):
"""
Test method (list element) "insert".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
elem1.insert('b')
test1 = a_list.get_head() == a_list[0]
test2 = a_list.get_tail() == a_list[0]
test3 = a_list[0].get_next() is None
test4 = len(a_list) == 1
test = test1 and test2 and test3 and test4
self.assertTrue(test)
def test_singly_linked_list_element_insert_head(self):
"""
Test method (list element) "insert".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem2)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
elem0.insert('aa')
test1 = a_list.get_head() == a_list[0]
test2 = a_list.get_tail() == a_list[len(a_list) - 1]
test3 = a_list[0].get_next() == elem1
test4 = len(a_list) == 3
test = test1 and test2 and test3 and test4
self.assertTrue(test)
def test_singly_linked_list_element_insert_middle(self):
"""
Test method (list element) "insert".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem5 = singly_linked_list.SinglyLinkedListElement(a_list, 'e', None)
elem4 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', elem5)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'cc', elem4)
elem3 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem4)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem3)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem2)
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
a_list.append(elem3.get_data())
a_list.append(elem4.get_data())
a_list.append(elem5.get_data())
elem3.insert('cc')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
res.append(a_list[4])
ref = []
ref.append(elem1)
ref.append(elem2)
ref.append(elemx)
ref.append(elem4)
ref.append(elem5)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[4]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = a_list[3].get_next() == a_list[4]
t_7 = a_list[4].get_next() is None
t_8 = len(a_list) == 5
t_9 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7 and t_8 and t_9
self.assertTrue(test)
def test_singly_linked_list_element_insert_tail(self):
"""
Test method (list element) "insert".
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
a_list.append(elem1.get_data())
elem1.insert('aa')
test1 = a_list.get_head() == a_list[0]
test2 = a_list.get_tail() == a_list[0]
test3 = a_list[0].get_next() is None
test4 = len(a_list) == 1
test = test1 and test2 and test3 and test4
self.assertTrue(test)
def test_singly_linked_list_element_insert_before_first_one(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains a single
element prior to the insertion of the second element
and the new element is inserted before the first element.
Before inserting:
list = [b]
After inserting:
list = [a] -> [b]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem1.get_data())
elem1.insert_before('a')
res = []
res.append(a_list[0])
res.append(a_list[1])
ref = []
ref.append(elemx)
ref.append(elem1)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[1]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() is None
t_5 = len(a_list) == 2
t_6 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6
self.assertTrue(test)
def test_singly_linked_list_element_insert_before_first_two(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains two elements
prior to the insertion of the third element and the new
element is inserted before the first element.
Before inserting:
list = [b] -> [c]
After inserting:
list = [a] -> [b] -> [c]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem2)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
elem1.insert_before('a')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
ref = []
ref.append(elemx)
ref.append(elem1)
ref.append(elem2)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[2]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = len(a_list) == 3
t_6 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6
self.assertTrue(test)
def test_singly_linked_list_element_insert_before_middle(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains three elements
prior to the insertion of the fourh element and the new
element is inserted before the third element.
Before inserting:
list = [a] -> [b] -> [d]
After inserting:
list = [a] -> [b] -> [c] -> [d]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', None)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem2)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elemx)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
elem2.insert_before('c')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
ref = []
ref.append(elem0)
ref.append(elem1)
ref.append(elemx)
ref.append(elem2)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[3]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = a_list[3].get_next() is None
t_7 = len(a_list) == 4
t_8 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7 and t_8
self.assertTrue(test)
def test_singly_linked_list_element_insert_after_first_one(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains a single
element prior to the insertion of the second element
and the new element is inserted after this element.
Before inserting:
list = [a]
After inserting:
list = [a] -> [b]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
a_list.append(elem0.get_data())
elem0.insert_after('b')
res = []
res.append(a_list[0])
res.append(a_list[1])
ref = []
ref.append(elem0)
ref.append(elemx)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[1]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() is None
t_5 = len(a_list) == 2
t_6 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6
self.assertTrue(test)
def test_singly_linked_list_element_insert_after_first_two(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains two elements
prior to the insertion of the third element and the new
element is inserted after the second element.
Before inserting:
list = [a] -> [b]
After inserting:
list = [a] -> [b] -> [c]
"""
a_list = singly_linked_list.SinglyLinkedList()
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'c', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elemx)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
elem1.insert_after('c')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
ref = []
ref.append(elem0)
ref.append(elem1)
ref.append(elemx)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[2]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() is None
t_6 = len(a_list) == 3
t_7 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7
self.assertTrue(test)
def test_singly_linked_list_element_insert_after_middle(self):
"""
Testing inserting a linked list element into a linked
list. In this test the linked list contains three elements
prior to the insertion of the fourh element and the new
element is inserted after the third element.
Before inserting:
list = [a] -> [b] -> [c]
After inserting:
list = [a] -> [b] -> [c] -> [d]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', None)
elemx = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem2)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elemx)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
elem1.insert_after('c')
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
ref = []
ref.append(elem0)
ref.append(elem1)
ref.append(elemx)
ref.append(elem2)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[3]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = len(a_list) == 4
t_7 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7
self.assertTrue(test)
def test_singly_linked_list_element_remove_first_one(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains a single
element prior to removing this element.
Before removing:
list = [a]
After removing:
list = [None]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', None)
a_list.append(elem0.get_data())
elem0.remove()
res = []
ref = []
t_1 = len(a_list) == 0
t_2 = ref == res
test = t_1 and t_2
self.assertTrue(test)
def test_singly_linked_list_element_remove_first_two(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains two
elements prior to removing the first element.
Before removing:
list = [a] -> [b]
After removing:
list = [b]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', None)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
elem0.remove()
res = []
res.append(a_list[0])
ref = []
ref.append(elem1)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[0]
t_3 = elem1.get_next() is None
t_4 = len(a_list) == 1
t_5 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5
self.assertTrue(test)
def test_singly_linked_list_element_remove_middle(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains five
elements prior to removing. The element being removed
is the third element.
Before removing:
list = [a] -> [b] -> [c] -> [d] -> [e]
After removing:
list = [a] -> [b] -> [d] -> [e]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem5 = singly_linked_list.SinglyLinkedListElement(a_list, 'e', None)
elem4 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', elem5)
elem3 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem4)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem3)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem2)
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
a_list.append(elem3.get_data())
a_list.append(elem4.get_data())
a_list.append(elem5.get_data())
elem3.remove()
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
ref = []
ref.append(elem1)
ref.append(elem2)
ref.append(elem4)
ref.append(elem5)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[3]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = a_list[3].get_next() is None
t_7 = len(a_list) == 4
t_8 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7 and t_8
self.assertTrue(test)
def test_singly_linked_list_element_remove_end(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains five
elements prior to removing. The element being removed
is the last element.
Before removing:
list = [a] -> [b] -> [c] -> [d] -> [e]
After removing:
list = [a] -> [b] -> [c] -> [d]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem4 = singly_linked_list.SinglyLinkedListElement(a_list, 'e', None)
elem3 = singly_linked_list.SinglyLinkedListElement(a_list, 'd', elem4)
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', elem3)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem2)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
a_list.append(elem2.get_data())
a_list.append(elem3.get_data())
a_list.append(elem4.get_data())
elem4.remove()
res = []
res.append(a_list[0])
res.append(a_list[1])
res.append(a_list[2])
res.append(a_list[3])
ref = []
ref.append(elem0)
ref.append(elem1)
ref.append(elem2)
ref.append(elem3)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[3]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() == a_list[2]
t_5 = a_list[2].get_next() == a_list[3]
t_6 = a_list[3].get_next() is None
t_7 = len(a_list) == 4
t_8 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6 and t_7 and t_8
self.assertTrue(test)
def test_singly_linked_list_element_remove_not_present(self):
"""
Testing removing a linked list element from a linked
list. In this test the linked list contains two
elements prior to removing. The element which should
be removed is not in the list, so nothing should be
removed and the pointers should be intact.
Before removing:
list = [a] -> [b]
After removing:
list = [a] -> [b]
"""
a_list = singly_linked_list.SinglyLinkedList()
elem2 = singly_linked_list.SinglyLinkedListElement(a_list, 'c', None)
elem1 = singly_linked_list.SinglyLinkedListElement(a_list, 'b', elem2)
elem0 = singly_linked_list.SinglyLinkedListElement(a_list, 'a', elem1)
a_list.append(elem0.get_data())
a_list.append(elem1.get_data())
elem2.remove()
res = []
res.append(a_list[0])
res.append(a_list[1])
ref = []
ref.append(elem0)
ref.append(elem1)
t_1 = a_list.get_head() == a_list[0]
t_2 = a_list.get_tail() == a_list[1]
t_3 = a_list[0].get_next() == a_list[1]
t_4 = a_list[1].get_next() is None
t_5 = len(a_list) == 2
t_6 = ref == res
test = t_1 and t_2 and t_3 and t_4 and t_5 and t_6
self.assertTrue(test)
### End test of local class SinglyLinkedListElement ###
### Begin test of class SinglyLinkedList ###
def test_singly_linked_list_len(self):
"""
Test operator "len".
"""
self.assertEqual(3, len(self.list1))
def test_singly_linked_list_equal(self):
"""
Test operator "equal".
"""
a_list1 = singly_linked_list.SinglyLinkedList()
a_list2 = singly_linked_list.SinglyLinkedList()
a_list1.append('a')
a_list1.append('b')
a_list1.append('c')
a_list2.append('a')
a_list2.append('b')
a_list2.append('c')
self.assertEqual(a_list1, a_list2)
def test_singly_linked_list_not_equal(self):
"""
Test operator "equal" - inverted.
"""
a_list1 = singly_linked_list.SinglyLinkedList()
a_list2 = singly_linked_list.SinglyLinkedList()
a_list1.append('a')
a_list1.append('b')
a_list1.append('c')
a_list2.append('a')
a_list2.append('b')
a_list2.append('d')
self.assertNotEqual(a_list1, a_list2)
def test_singly_linked_list_copy_not_equal(self):
"""
Test operator "copy" - inverted.
"""
a_list1 = singly_linked_list.SinglyLinkedList()
a_list1.append('a')
a_list1.append('b')
a_list1.append('c')
a_list2 = copy.copy(a_list1)
a_list1[len(a_list1) - 1] = 'cc'
self.assertNotEqual(a_list1, a_list2)
def test_singly_linked_list_copy_equal(self):
"""
Test operator "copy".
"""
a_list1 = singly_linked_list.SinglyLinkedList()
a_list1.append('a')
a_list1.append('b')
a_list1.append('c')
a_list2 = copy.copy(a_list1)
# print ""
# print l1
# print l2
# print len( l1 )
# print len( l2 )
# print l1[0]
# print l2[0]
# print l1[1]
# print l2[1]
# print l1[2]
# print l2[2]
# print l1.get_head()
# print l2.get_head()
# print l1.get_tail()
# # NOTE: it appears that the tail is different!!!
# print l2.get_tail()
self.assertEqual(a_list1, a_list2)
def test_singly_linked_list_contains(self):
"""
Test operator "contains".
"""
self.assertTrue('b' in self.list1)
def test_singly_linked_list_contains_not(self):
"""
Test operator "contains" - inverted.
"""
self.assertFalse('bb' in self.list1)
def test_singly_linked_list_get_item(self):
"""
Test operator "get_item".
"""
elem = singly_linked_list.SinglyLinkedListElement(
self.list1, 'b', None)
self.assertEqual(elem, self.list1[0])
def test_singly_linked_list_get_item_raise(self):
"""
Test operator "get_item" - raises exception.
"""
self.assertRaises(IndexError, lambda: self.list1[10])
def test_singly_linked_list_get_head(self):
"""
Test method "get_head".
"""
self.assertEqual('b', self.list1.get_head().get_data())
def test_singly_linked_list_get_tail(self):
"""
Test method "get_tail".
"""
self.assertEqual('d', self.list1.get_tail().get_data())
def test_singly_linked_list_is_empty(self):
"""
Test method "is_empty".
"""
a_list = singly_linked_list.SinglyLinkedList()
self.assertTrue(a_list.is_empty())
def test_singly_linked_list_is_empty_not(self):
"""
Test method "is_empty" - inverted.
"""
self.assertFalse(self.list1.is_empty())
def test_singly_linked_list_clear(self):
"""
Test method "clear".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.clear()
self.assertTrue(a_list.is_empty())
def test_singly_linked_list_get_first(self):
"""
Test method "get_first".
"""
self.assertEqual('b', self.list1.get_first())
def test_singly_linked_list_get_last(self):
"""
Test method "get_last".
"""
self.assertEqual('d', self.list1.get_last())
def test_singly_linked_list_prepend(self):
"""
Test method "get_prepend".
"""
self.list1.prepend('a')
self.assertEqual('a', self.list1.get_first())
def test_singly_linked_list_insert_at_empty(self):
"""
Test method "insert_at".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.insert_at(0, 'b')
self.assertEqual('b', a_list[0].get_data())
def test_singly_linked_list_insert_at_head(self):
"""
Test method "insert_at".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
a_list.insert_at(0, 'aa')
t_1 = 'aa' == a_list.get_head().data
t_2 = a_list[0] == a_list.get_head()
t_3 = a_list[len(a_list) - 1] == a_list.get_tail()
test = t_1 and t_2 and t_3
self.assertTrue(test)
def test_singly_linked_list_insert_at_middle(self):
"""
Test method "insert_at".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
a_list.append('d')
a_list.append('e')
a_list.insert_at(2, 'cc')
self.assertEqual('cc', a_list[2].get_data())
def test_singly_linked_list_insert_at_tail(self):
"""
Test method "insert_at".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
a_list.insert_at(len(a_list) - 1, 'cc')
t_1 = a_list[0] == a_list.get_head()
t_2 = a_list[len(a_list) - 1] == a_list.get_tail()
t_3 = 'cc' == a_list.get_tail().data
t_4 = len(a_list) == 3
test = t_1 and t_2 and t_3 and t_4
self.assertTrue(test)
def test_singly_linked_list_insert_before_element(self):
"""
Test method "insert_before_element".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
elem2 = a_list[2]
a_list.insert_before_element('cc', elem2)
self.assertEqual('cc', a_list[2].get_data())
def test_singly_linked_list_insert_after_element(self):
"""
Test method "insert_after_element".
"""
a_list = singly_linked_list.SinglyLinkedList()
a_list.append('a')
a_list.append('b')
a_list.append('c')
elem2 = a_list[2]
a_list.insert_after_element('cc', elem2)
self.assertEqual('cc', a_list[3].get_data())
def test_singly_linked_list_remove(self):
"""
Test method "remove".
"""
self.list1.remove('d')
self.assertEqual('c', self.list1.get_last())
### End test of class SinglyLinkedList ###
| 33.070196 | 82 | 0.598163 | 28,602 | 0.995268 | 0 | 0 | 0 | 0 | 0 | 0 | 6,873 | 0.239161 |
1084d7175183cbca0c6049d853c6dc625cb88022 | 243 | py | Python | package/awesome_panel/application/services/message_service.py | Jhsmit/awesome-panel | 53f7754f7c505a2666f6724df26c851ae942ec40 | [
"Apache-2.0"
] | 179 | 2019-12-04T14:54:53.000Z | 2022-03-30T09:08:38.000Z | package/awesome_panel/application/services/message_service.py | Jhsmit/awesome-panel | 53f7754f7c505a2666f6724df26c851ae942ec40 | [
"Apache-2.0"
] | 62 | 2019-12-14T16:51:28.000Z | 2022-03-19T18:47:12.000Z | package/awesome_panel/application/services/message_service.py | Jhsmit/awesome-panel | 53f7754f7c505a2666f6724df26c851ae942ec40 | [
"Apache-2.0"
] | 35 | 2019-12-08T13:19:53.000Z | 2022-03-25T10:33:02.000Z | """This module implements the MessageService
The MessageService enables sending and receiving messages
"""
import param
class MessageService(param.Parameterized):
"""The MessageService enables sending and receiving messages"""
| 24.3 | 68 | 0.769547 | 111 | 0.45679 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.711934 |
1085b6a16b899345091e7cc84cb90bde70ab693d | 2,448 | py | Python | BitMEXAPIKeyAuthenticator.py | SaarasM/trading-algos | 79d1cdd84df71b5a6fca19ffa98bd039b4bc89bc | [
"MIT"
] | null | null | null | BitMEXAPIKeyAuthenticator.py | SaarasM/trading-algos | 79d1cdd84df71b5a6fca19ffa98bd039b4bc89bc | [
"MIT"
] | null | null | null | BitMEXAPIKeyAuthenticator.py | SaarasM/trading-algos | 79d1cdd84df71b5a6fca19ffa98bd039b4bc89bc | [
"MIT"
] | null | null | null | import urllib.parse
import time
import hashlib
import hmac
from bravado.requests_client import Authenticator
class APIKeyAuthenticator(Authenticator):
"""?api_key authenticator.
This authenticator adds BitMEX API key support via header.
:param host: Host to authenticate for.
:param api_key: API key.
:param api_secret: API secret.
"""
def __init__(self, host, api_key, api_secret):
super(APIKeyAuthenticator, self).__init__(host)
self.api_key = api_key
self.api_secret = api_secret
# Forces this to apply to all requests.
def matches(self, url):
if "swagger.json" in url:
return False
return True
# Add the proper headers via the `expires` scheme.
def apply(self, r):
# 5s grace period in case of clock skew
expires = int(round(time.time()) + 5)
r.headers['api-expires'] = str(expires)
r.headers['api-key'] = self.api_key
prepared = r.prepare()
body = prepared.body or ''
url = prepared.path_url
# print(json.dumps(r.data, separators=(',',':')))
r.headers['api-signature'] = self.generate_signature(self.api_secret, r.method, url, expires, body)
return r
# Generates an API signature.
# A signature is HMAC_SHA256(secret, verb + path + nonce + data), hex encoded.
# Verb must be uppercased, url is relative, nonce must be an increasing 64-bit integer
# and the data, if present, must be JSON without whitespace between keys.
#
# For example, in psuedocode (and in real code below):
#
# verb=POST
# url=/api/v1/order
# nonce=1416993995705
# data={"symbol":"XBTZ14","quantity":1,"price":395.01}
# signature = HEX(HMAC_SHA256(secret, 'POST/api/v1/order1416993995705{"symbol":"XBTZ14","quantity":1,"price":395.01}'))
def generate_signature(self, secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urllib.parse.urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
message = bytes(verb + path + str(nonce) + data, 'utf-8')
# print("Computing HMAC: %s" % message)
signature = hmac.new(bytes(secret, 'utf-8'), message, digestmod=hashlib.sha256).hexdigest()
return signature | 38.25 | 123 | 0.646242 | 2,337 | 0.954657 | 0 | 0 | 0 | 0 | 0 | 0 | 1,163 | 0.475082 |
1085fa70c9dbc377238e6a8020eb054c9de9d0f5 | 382 | py | Python | herokuapp/project_template/manage.py | urkonn/django-herokuapp | 1d3ec10f6e83b7556a443150aa0658bbf341f6d1 | [
"BSD-3-Clause"
] | 262 | 2015-01-03T18:34:11.000Z | 2021-12-25T21:16:31.000Z | herokuapp/project_template/manage.py | urkonn/django-herokuapp | 1d3ec10f6e83b7556a443150aa0658bbf341f6d1 | [
"BSD-3-Clause"
] | 13 | 2015-01-14T04:02:34.000Z | 2021-10-05T13:59:46.000Z | herokuapp/project_template/manage.py | urkonn/django-herokuapp | 1d3ec10f6e83b7556a443150aa0658bbf341f6d1 | [
"BSD-3-Clause"
] | 52 | 2015-01-28T20:16:01.000Z | 2022-02-11T20:22:39.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
# Load the Heroku environment.
from herokuapp.env import load_env
load_env(__file__, "{{ app_name }}")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.875 | 82 | 0.722513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.340314 |
1086c5f31cb6487877c19a733713381c6f9031e7 | 477 | py | Python | packages/indextools/tests/test_cases/test_primes.py | zhihanyang2022/drqn | ac2482e3b42094e6242c042583dbbd9c98e4750b | [
"MIT"
] | 5 | 2021-03-28T14:12:40.000Z | 2021-11-19T20:46:10.000Z | packages/indextools/tests/test_cases/test_primes.py | zhihanyang2022/drqn | ac2482e3b42094e6242c042583dbbd9c98e4750b | [
"MIT"
] | null | null | null | packages/indextools/tests/test_cases/test_primes.py | zhihanyang2022/drqn | ac2482e3b42094e6242c042583dbbd9c98e4750b | [
"MIT"
] | null | null | null | import unittest
import indextools
def is_prime(n):
return n > 1 and all(n % i != 0 for i in range(2, n))
class PrimeTest(unittest.TestCase):
def test_primes(self):
int_space = indextools.RangeSpace(50)
prime_space = indextools.SubSpace(int_space, is_prime)
values = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47)
self.assertEqual(prime_space.nelems, len(values))
self.assertCountEqual(prime_space.values, values)
| 26.5 | 73 | 0.656184 | 362 | 0.75891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
108711ba01a7e1bdb1abf96274dfe881e5311d41 | 2,522 | py | Python | lascli/parser/users.py | LucidtechAI/las-cli | bf9ca7d016141059c543527c6654cebb2e2f1178 | [
"Apache-2.0"
] | null | null | null | lascli/parser/users.py | LucidtechAI/las-cli | bf9ca7d016141059c543527c6654cebb2e2f1178 | [
"Apache-2.0"
] | 10 | 2021-04-12T06:42:04.000Z | 2022-03-30T10:21:33.000Z | lascli/parser/users.py | LucidtechAI/las-cli | bf9ca7d016141059c543527c6654cebb2e2f1178 | [
"Apache-2.0"
] | null | null | null | import base64
import pathlib
from las import Client
from lascli.util import nullable, NotProvided
def encode_avatar(avatar):
return base64.b64encode(pathlib.Path(avatar).read_bytes()).decode()
def list_users(las_client: Client, max_results, next_token):
return las_client.list_users(max_results=max_results, next_token=next_token)
def get_user(las_client: Client, user_id):
return las_client.get_user(user_id)
def create_user(las_client: Client, email, app_client_id, **optional_args):
avatar = optional_args.get('avatar')
if avatar:
optional_args['avatar'] = encode_avatar(avatar)
return las_client.create_user(email, app_client_id=app_client_id, **optional_args)
def update_user(las_client: Client, user_id, **optional_args):
avatar = optional_args.get('avatar')
if avatar:
optional_args['avatar'] = encode_avatar(avatar)
return las_client.update_user(user_id, **optional_args)
def delete_user(las_client: Client, user_id):
return las_client.delete_user(user_id)
def create_users_parser(subparsers):
parser = subparsers.add_parser('users')
subparsers = parser.add_subparsers()
create_user_parser = subparsers.add_parser('create')
create_user_parser.add_argument('email')
create_user_parser.add_argument('app_client_id')
create_user_parser.add_argument('--name')
create_user_parser.add_argument('--avatar', help='Path to avatar JPEG image.')
create_user_parser.set_defaults(cmd=create_user)
update_user_parser = subparsers.add_parser('update')
update_user_parser.add_argument('user_id')
update_user_parser.add_argument('--name', type=nullable, default=NotProvided)
update_user_parser.add_argument(
'--avatar',
help='Path to avatar JPEG image or "null" to remove avatar from user.',
type=nullable,
default=NotProvided
)
update_user_parser.set_defaults(cmd=update_user)
list_users_parser = subparsers.add_parser('list')
list_users_parser.add_argument('--max-results', '-m', type=int, default=None)
list_users_parser.add_argument('--next-token', '-n', type=str, default=None)
list_users_parser.set_defaults(cmd=list_users)
get_user_parser = subparsers.add_parser('get')
get_user_parser.add_argument('user_id')
get_user_parser.set_defaults(cmd=get_user)
delete_user_parser = subparsers.add_parser('delete')
delete_user_parser.add_argument('user_id')
delete_user_parser.set_defaults(cmd=delete_user)
return parser
| 32.753247 | 86 | 0.752181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.114592 |
10876b8adb313d894238da00cbc965a6ae323a52 | 114 | py | Python | labs/tony-monday-15-jg103/hello_hello.py | TonyJenkins/lbu-python-code | d02d843290e887d016cdb05ddc1a8639874f2e06 | [
"Unlicense"
] | 2 | 2021-08-20T13:02:45.000Z | 2021-10-03T20:34:45.000Z | labs/tony-monday-15-jg103/hello_hello.py | TonyJenkins/lbu-python-code | d02d843290e887d016cdb05ddc1a8639874f2e06 | [
"Unlicense"
] | null | null | null | labs/tony-monday-15-jg103/hello_hello.py | TonyJenkins/lbu-python-code | d02d843290e887d016cdb05ddc1a8639874f2e06 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
def hello(**kwargs):
print(f'Hello')
if __name__ == '__main__':
hello(hello())
| 11.4 | 26 | 0.605263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.350877 |
10879ad312bd0d2e0eb47f8d066224e8607d0fb3 | 619 | py | Python | example/example/handlers.py | comynli/m | 245f4091f0e1bddc4cff26ad89df03122987d549 | [
"Apache-2.0"
] | 11 | 2016-09-25T01:35:09.000Z | 2020-12-30T03:14:35.000Z | example/example/handlers.py | EscapeLife/mini-SQLAlchemy | c5e4d08349c6469884c5668f4c9dc86cd631b257 | [
"Apache-2.0"
] | null | null | null | example/example/handlers.py | EscapeLife/mini-SQLAlchemy | c5e4d08349c6469884c5668f4c9dc86cd631b257 | [
"Apache-2.0"
] | 21 | 2016-09-24T09:56:31.000Z | 2020-02-18T05:57:21.000Z | from .models import db, User
from m import Router
from m.utils import jsonify
router = Router(prefix='')
@router.route('/', methods=['POST'])
def home(ctx, request):
name = request.json().get('name')
user = User(name=name)
db.session.add(user)
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
@router.route('/{name}', methods=['GET'])
def get(ctx, request):
name = request.args.get('name')
user = User.query.filter(User.name == name).first_or_404('user {} not exist'.format(name))
return jsonify(code=200, user=user.dictify())
| 23.807692 | 94 | 0.638126 | 0 | 0 | 0 | 0 | 506 | 0.817447 | 0 | 0 | 56 | 0.090468 |
108807ac7d6193c42b40df14783acb29975e2f3a | 4,609 | py | Python | scripts/touchup_for_web.py | BennZoll/roboto | cb3cde1a3069f28b9a66f3d104f51fd6c0734be1 | [
"Apache-2.0"
] | 3,933 | 2015-05-26T17:02:24.000Z | 2020-04-20T12:09:09.000Z | scripts/touchup_for_web.py | BennZoll/roboto | cb3cde1a3069f28b9a66f3d104f51fd6c0734be1 | [
"Apache-2.0"
] | 274 | 2015-05-26T20:05:46.000Z | 2020-04-16T01:00:03.000Z | scripts/touchup_for_web.py | BennZoll/roboto | cb3cde1a3069f28b9a66f3d104f51fd6c0734be1 | [
"Apache-2.0"
] | 416 | 2015-05-26T18:06:06.000Z | 2020-03-27T06:33:47.000Z | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Post-build web fonts changes for Roboto."""
import sys
from fontTools import ttLib
from nototools import font_data
import temporary_touchups
def apply_web_specific_fixes(font, unhinted, family_name):
"""Apply fixes needed for web fonts."""
# set vertical metrics to old values
hhea = font['hhea']
hhea.ascent = 1900
hhea.descent = -500
os2 = font['OS/2']
os2.sTypoAscender = 1536
os2.sTypoDescender = -512
os2.sTypoLineGap = 102
os2.usWinAscent = 1946
os2.usWinDescent = 512
# correct anything else needed for both web and Chrome OS
apply_web_cros_common_fixes(font, unhinted, family_name)
def apply_web_cros_common_fixes(font, unhinted, family_name):
"""Apply fixes needed for web and CrOS targets"""
subfamily_name = font_data.get_name_records(font)[2].encode('ASCII')
assert(subfamily_name in
['Thin', 'Thin Italic',
'Light', 'Light Italic',
'Regular', 'Italic',
'Medium', 'Medium Italic',
'Bold', 'Bold Italic',
'Black', 'Black Italic'])
if 'Condensed' in font_data.get_name_records(font)[1]:
family_name += ' Condensed'
full_name = family_name
if subfamily_name != 'Regular':
full_name += ' ' + subfamily_name
# Family, subfamily names
font_data.set_name_record(font, 16, family_name)
style_map = ['Regular', 'Bold', 'Italic', 'Bold Italic']
if subfamily_name in style_map:
font_data.set_name_record(font, 1, family_name)
else:
weight = subfamily_name.split()[0]
new_family_name = family_name
if weight != 'Regular':
new_family_name += ' ' + weight
font_data.set_name_record(font, 1, new_family_name)
# all weights outside regular and bold should only have subfamily
# "Regular" or "Italic"
italic = subfamily_name.endswith('Italic')
font_data.set_name_record(font, 2, style_map[italic << 1])
# Unique identifier and full name
font_data.set_name_record(font, 3, full_name)
font_data.set_name_record(font, 4, full_name)
font_data.set_name_record(font, 18, None)
# PostScript name
font_data.set_name_record(
font, 6, (family_name+'-'+subfamily_name).replace(' ', ''))
# Copyright message
font_data.set_name_record(
font, 0, 'Copyright 2011 Google Inc. All Rights Reserved.')
# hotpatch glyphs by swapping
# https://github.com/google/roboto/issues/18
glyf = font['glyf']
glyf['chi'], glyf['chi.alt'] = glyf['chi.alt'], glyf['chi']
# make glyph orders consistent for feature copying
# https://github.com/google/roboto/issues/71
glyph_order = font.getGlyphOrder()
for i, glyph_name in enumerate(glyph_order):
if glyph_name.endswith('.lnum'):
new_name = glyph_name.replace('.lnum', '.pnum')
glyph_order[i] = new_name
font['glyf'][new_name] = font['glyf'][glyph_name]
# append old name to glyph order so del succeeds
glyph_order.append(glyph_name)
del font['glyf'][glyph_name]
# copy features from unhinted
# https://github.com/google/roboto/pull/163
for table in ['GDEF', 'GPOS', 'GSUB']:
font[table] = unhinted[table]
def correct_font(source_name, unhinted_name, target_font_name, family_name):
"""Corrects metrics and other meta information."""
font = ttLib.TTFont(source_name)
unhinted = ttLib.TTFont(unhinted_name)
# apply web-specific fixes before shared, so that sub/family names are
# correct for black weights and their bold bits will be set
apply_web_specific_fixes(font, unhinted, family_name)
temporary_touchups.apply_temporary_fixes(font, is_for_web=True)
temporary_touchups.update_version_and_revision(font)
font.save(target_font_name)
def main(argv):
"""Correct the font specified in the command line."""
correct_font(*argv[1:])
if __name__ == "__main__":
main(sys.argv)
| 33.398551 | 76 | 0.681493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,917 | 0.415925 |
1088443ead915e8ccd2320fd64ac1f164ea8d25b | 389 | py | Python | fact/looping2d.py | BicycleWalrus/slop | 300f994cb7d7a58a330fdac1afcd36ffd1da80ff | [
"MIT"
] | null | null | null | fact/looping2d.py | BicycleWalrus/slop | 300f994cb7d7a58a330fdac1afcd36ffd1da80ff | [
"MIT"
] | null | null | null | fact/looping2d.py | BicycleWalrus/slop | 300f994cb7d7a58a330fdac1afcd36ffd1da80ff | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
with open("dnsservers.txt", "r") as dnsfile:
for svr in dnsfile:
svr = svr.rstrip('\n')
if svr.endswith('org'):
with open("org-domain.txt", "a") as srvfile:
srvfile.write(svr + "\n")
elif svr.endswith('com'):
with open("com-domain.txt", "a") as srvfile:
srvfile.write(svr + "\n")
| 29.923077 | 56 | 0.51671 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.25964 |
10894b46907ea75299b8184798d4baa3caa348c3 | 322 | py | Python | src/controller/tower_controller.py | DockerTower/Tower | c44528007ef566a3e2e461a16ff426f326f9004e | [
"MIT"
] | 1 | 2016-10-28T10:03:08.000Z | 2016-10-28T10:03:08.000Z | src/controller/tower_controller.py | DockerTower/Tower | c44528007ef566a3e2e461a16ff426f326f9004e | [
"MIT"
] | null | null | null | src/controller/tower_controller.py | DockerTower/Tower | c44528007ef566a3e2e461a16ff426f326f9004e | [
"MIT"
] | null | null | null | from cement.core.controller import CementBaseController, expose
class TowerController(CementBaseController):
class Meta:
label = 'base'
stacked_on = 'base'
stacked_type = 'nested'
description = "Tower"
@expose(hide=True)
def default(self):
self.app.args.print_help()
| 23 | 63 | 0.65528 | 255 | 0.791925 | 0 | 0 | 76 | 0.236025 | 0 | 0 | 27 | 0.083851 |
1089f5b4102ec85af3cbcbee9d9a0843f6277c92 | 1,113 | py | Python | download.py | reed-hackathon-2022/mc-bot | d770fbb61b6d6bb3c1fc4055bb9e8bc6d45412c6 | [
"MIT"
] | 1 | 2022-01-23T19:10:48.000Z | 2022-01-23T19:10:48.000Z | download.py | reed-hackathon-2022/mc-bot | d770fbb61b6d6bb3c1fc4055bb9e8bc6d45412c6 | [
"MIT"
] | null | null | null | download.py | reed-hackathon-2022/mc-bot | d770fbb61b6d6bb3c1fc4055bb9e8bc6d45412c6 | [
"MIT"
] | 1 | 2022-02-27T20:44:38.000Z | 2022-02-27T20:44:38.000Z | import io
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
from google.oauth2 import service_account
from oauth2client.service_account import ServiceAccountCredentials
SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly']
def get_drive_service():
creds = Credentials.from_authorized_user_file('googletoken.json')
if not creds or not creds.valid:
creds = ServiceAccountCredentials.from_json_keyfile_dict('serviceaccountcreds.json', scopes=SCOPES)
return build('drive', 'v3', credentials=creds)
def fetch_document():
drive_service = get_drive_service()
file_id = "1s0S4oau1GuJSrzaFhYHfLLq0KRliO4rG2uerNHpXRCk"
request = drive_service.files().export_media(
fileId=file_id, mimeType='text/plain'
)
fh = io.StringIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while not done:
status, done = downloader.next_chunk()
print(f"Download status: {status :%}%")
drive_service.close()
return fh.getvalue()
| 33.727273 | 107 | 0.75292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.181491 |
108a668911310c69b94dbdbbb3311bbc3c59a242 | 5,471 | py | Python | lumbermill/output/Zabbix.py | dstore-dbap/LumberMill | b7cbadc209a83386871735b8ad88b61da917a6ab | [
"Apache-2.0"
] | 15 | 2015-12-14T19:07:28.000Z | 2022-02-28T13:32:11.000Z | lumbermill/output/Zabbix.py | dstore-dbap/LumberMill | b7cbadc209a83386871735b8ad88b61da917a6ab | [
"Apache-2.0"
] | null | null | null | lumbermill/output/Zabbix.py | dstore-dbap/LumberMill | b7cbadc209a83386871735b8ad88b61da917a6ab | [
"Apache-2.0"
] | 4 | 2017-02-08T10:49:55.000Z | 2019-03-19T18:47:46.000Z | # -*- coding: utf-8 -*-
import os
import sys
from pyzabbix import ZabbixMetric, ZabbixSender
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Buffers import Buffer
from lumbermill.utils.Decorators import ModuleDocstringParser
from lumbermill.utils.DynamicValues import mapDynamicValue
@ModuleDocstringParser
class Zabbix(BaseThreadedModule):
"""
Send events to zabbix.
hostname: Hostname for which the metrics should be stored.
fields: Event fields to send.
field_prefix: Prefix to prepend to field names. For e.g. cpu_count field with default prefix, the Zabbix key is lumbermill_cpu_count.
timestamp_field: Field to provide timestamp. If not provided, current timestamp is used.
agent_conf: Path to zabbix_agent configuration file. If set to True defaults to /etc/zabbix/zabbix_agentd.conf.
server: Address of zabbix server. If port differs from default it can be set by appending it, e.g. 127.0.0.1:10052.
store_interval_in_secs: sending data to es in x seconds intervals.
batch_size: sending data to es if event count is above, even if store_interval_in_secs is not reached.
backlog_size: maximum count of events waiting for transmission. Events above count will be dropped.
Configuration template:
- output.Zabbix:
hostname: # <type: string; is: required>
fields: # <type: list; is: required>
field_prefix: # <default: "lumbermill_"; type: string; is: optional>
timestamp_field: # <default: "timestamp"; type: string; is: optional>
agent_conf: # <default: True; type: boolean||string; is: optional>
server: # <default: False; type: boolean||string; is: required if agent_conf is False else optional>
store_interval_in_secs: # <default: 10; type: integer; is: optional>
batch_size: # <default: 500; type: integer; is: optional>
backlog_size: # <default: 500; type: integer; is: optional>
"""
module_type = "output"
"""Set module type"""
def configure(self, configuration):
BaseThreadedModule.configure(self, configuration)
self.hostname = self.getConfigurationValue("hostname")
self.fields = self.getConfigurationValue("fields")
self.field_prefix = self.getConfigurationValue("field_prefix")
self.timestamp_field = self.getConfigurationValue("timestamp_field")
self.batch_size = self.getConfigurationValue('batch_size')
self.backlog_size = self.getConfigurationValue('backlog_size')
self.agent_conf = self.getConfigurationValue("agent_conf")
if self.agent_conf:
if self.agent_conf is True:
self.agent_conf = "/etc/zabbix/zabbix_agentd.conf"
if not os.path.isfile(self.agent_conf):
self.logger.error("%s does not point to an existing file." % self.agent_conf)
self.lumbermill.shutDown()
self.zabbix_sender = ZabbixSender(use_config=self.agent_conf)
else:
self.logger.error("asdads")
server = self.getConfigurationValue("server")
port = 10051
if ":" in self.server:
server, port = self.server.split(":")
self.zabbix_sender = ZabbixSender(zabbix_server=server, port=port)
self.buffer = Buffer(self.getConfigurationValue('batch_size'), self.storeData,
self.getConfigurationValue('store_interval_in_secs'),
maxsize=self.getConfigurationValue('backlog_size'))
def getStartMessage(self):
if self.agent_conf:
return "Config: %s. Max buffer size: %d" % (self.agent_conf, self.getConfigurationValue('backlog_size'))
else:
return "Server: %s. Max buffer size: %d" % (self.getConfigurationValue("server"), self.getConfigurationValue('backlog_size'))
def initAfterFork(self):
BaseThreadedModule.initAfterFork(self)
self.buffer = Buffer(self.getConfigurationValue('batch_size'), self.storeData,
self.getConfigurationValue('store_interval_in_secs'),
maxsize=self.getConfigurationValue('backlog_size'))
def handleEvent(self, event):
self.buffer.append(event)
yield None
def storeData(self, events):
packet = []
for event in events:
if self.timestamp_field:
try:
timestamp = event[self.timestamp_field]
except KeyError:
timestamp = None
hostname = mapDynamicValue(self.hostname, mapping_dict=event, use_strftime=True)
for field_name in self.fields:
try:
packet.append(ZabbixMetric(hostname, "%s%s" % (self.field_prefix, field_name), event[field_name], timestamp))
except KeyError:
pass
#self.logger.warning("Could not send metrics for %s:%s. Field not found." % (hostname, field_name))
response = self.zabbix_sender.send(packet)
if response.failed != 0:
self.logger.warning("%d of %d metrics were not processed correctly." % (response.total-response.processed, response.total))
def shutDown(self):
self.buffer.flush()
| 50.192661 | 138 | 0.641565 | 5,125 | 0.936757 | 82 | 0.014988 | 5,148 | 0.940961 | 0 | 0 | 2,338 | 0.427344 |
108ab9c9df5204ab2c5130ba939418e47c9f9a0b | 1,771 | py | Python | communicate.py | IloveKanade/k3cgrouparch | e59ca503b076ddfcdd30bea26604f9fbf5509a2c | [
"MIT"
] | null | null | null | communicate.py | IloveKanade/k3cgrouparch | e59ca503b076ddfcdd30bea26604f9fbf5509a2c | [
"MIT"
] | 2 | 2021-11-11T07:14:50.000Z | 2022-03-23T06:49:40.000Z | communicate.py | IloveKanade/k3cgrouparch | e59ca503b076ddfcdd30bea26604f9fbf5509a2c | [
"MIT"
] | 1 | 2021-08-30T08:54:19.000Z | 2021-08-30T08:54:19.000Z | #!/usr/bin/env python2
# coding: utf-8
import logging
from collections import OrderedDict
from geventwebsocket import Resource
from geventwebsocket import WebSocketApplication
from geventwebsocket import WebSocketServer
import k3utfjson
from k3cgrouparch import account
global_value = {}
logger = logging.getLogger(__name__)
class CgroupArchWebSocketApplication(WebSocketApplication):
def on_open(self):
logger.info('on open')
def on_message(self, message_str):
if message_str is None:
return
try:
self.process_message(message_str)
except Exception as e:
logger.exception('failed to process message: ' + repr(e))
self.send_json({'error': repr(e)})
def on_close(self, reason):
logger.info('on close')
def process_message(self, message_str):
message = k3utfjson.load(message_str)
cmd = message['cmd']
args = message.get('args')
if args is None:
args = {}
result = self.do_cmd(cmd, args)
self.send_json(result)
def do_cmd(self, cmd, args):
if cmd == 'show_account':
return self.show_account(args)
elif cmd == 'get_conf':
return global_value['context']['arch_conf']
else:
return {'error': 'invalid cmd: %s' % cmd}
def show_account(self, args):
return account.show(global_value['context'], args)
def send_json(self, value):
value_str = k3utfjson.dump(value)
self.ws.send(value_str)
def run(context, ip='0.0.0.0', port=22348):
global_value['context'] = context
WebSocketServer(
(ip, port),
Resource(OrderedDict({'/': CgroupArchWebSocketApplication})),
).serve_forever()
| 25.3 | 69 | 0.636928 | 1,221 | 0.689441 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.113495 |
108b0cf815da5ee0679b962a78a681d92caec0a9 | 1,171 | py | Python | example/BEC.py | zhaofeng-shu33/ace_cream | 7c1d853f42b5451b656615474d4c6d561f9e168c | [
"Apache-1.1"
] | 6 | 2018-06-25T02:02:33.000Z | 2020-06-16T19:39:24.000Z | example/BEC.py | zhaofeng-shu33/ace_cream | 7c1d853f42b5451b656615474d4c6d561f9e168c | [
"Apache-1.1"
] | null | null | null | example/BEC.py | zhaofeng-shu33/ace_cream | 7c1d853f42b5451b656615474d4c6d561f9e168c | [
"Apache-1.1"
] | 2 | 2018-06-25T01:58:10.000Z | 2020-12-06T04:07:57.000Z | #!/usr/bin/python
#author: zhaofeng-shu33
import numpy as np
from ace_cream import ace_cream
def pearson_correlation(X,Y):
return (np.mean(X*Y, axis=0) -np.mean(X, axis = 0)* np.mean(Y, axis = 0)) / ( np.std(X, axis = 0) * np.std(Y, axis = 0))
if __name__ == '__main__':
N_SIZE = 1000
ERROR_PROBABILITY = 0.1
x = np.random.choice([0,1],size=N_SIZE)
y = np.random.uniform(size=N_SIZE)
for i in range(len(x)):
if(y[i] < ERROR_PROBABILITY):
y[i] = 2
else:
y[i] = x[i]
dic_Y = {0:6, 1:8, 2:3}
dic_X = {0:7, 1:9}
for i in range(len(y)):
y[i] = dic_Y[y[i]]
x[i] = dic_X[x[i]]
print('rho(x,y)',pearson_correlation(x,y))
# use fortran ace by 1985 article author
tx, ty = ace_cream(x, y, cat = [-1,0])
print('mapped X symbol list: ')
print(np.unique(tx))
print('mapped Y symbol list: ')
print(np.unique(ty))
print('mean(tx) = %f, std(tx) = %f'%(np.mean(tx), np.std(tx)))
print('mean(ty) = %f, std(ty) = %f'%(np.mean(ty), np.std(ty)))
print('rho(tx,ty)',pearson_correlation(tx,ty))
# matches theoretical result: np.sqrt(1-ERROR_PROBABILITY)
| 31.648649 | 124 | 0.571307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.235696 |
108c705719ada1461412a2bd07e90e466ee244b5 | 446 | py | Python | src/main.py | snaka0213/dot_converter | 2851246ef3d454e74ac3135c1a15c0a9d2d22db3 | [
"MIT"
] | null | null | null | src/main.py | snaka0213/dot_converter | 2851246ef3d454e74ac3135c1a15c0a9d2d22db3 | [
"MIT"
] | null | null | null | src/main.py | snaka0213/dot_converter | 2851246ef3d454e74ac3135c1a15c0a9d2d22db3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from converter import DotConverter
filter_size = int(input("Filter size? >> "))
colors = int(input("Number of colors? >> "))
dtcv = DotConverter(filter_size=filter_size, colors=colors)
path = input("File name? >> ")
dtcv.load(path)
dtcv.convert()
dtcv.show()
select = input("Save this file? [y/n] >> ")
if select != 'y':
sys.exit()
else:
new_path = input("New file name? >> ")
dtcv.save(new_path)
| 22.3 | 59 | 0.663677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.289238 |
108d2f5a8e400cef7db3b4c71125296528340c61 | 5,148 | py | Python | mahiru/rest/internal_client.py | SecConNet/proof_of_concept | 80f6b27ff6b97796803e554387ca2881a792be79 | [
"Apache-2.0"
] | 4 | 2021-03-26T09:17:51.000Z | 2021-05-17T10:31:59.000Z | mahiru/rest/internal_client.py | SecConNet/proof_of_concept | 80f6b27ff6b97796803e554387ca2881a792be79 | [
"Apache-2.0"
] | 58 | 2020-03-02T10:02:51.000Z | 2021-07-09T09:23:49.000Z | mahiru/rest/internal_client.py | SecConNet/proof_of_concept | 80f6b27ff6b97796803e554387ca2881a792be79 | [
"Apache-2.0"
] | null | null | null | """Client for internal REST APIs."""
from copy import copy
from pathlib import Path
from urllib.parse import quote, urlparse
import time
import requests
from mahiru.definitions.assets import Asset
from mahiru.definitions.execution import JobResult
from mahiru.definitions.policy import Rule
from mahiru.definitions.workflows import Job
from mahiru.rest.serialization import deserialize, serialize
from mahiru.rest.validation import validate_json
_CHUNK_SIZE = 1024 * 1024
_JOB_RESULT_WAIT_TIME = 0.5 # seconds
_STANDARD_PORTS = {'http': 80, 'https': 443}
class InternalSiteRestClient:
"""Handles connections to a local site."""
def __init__(self, site: str, endpoint: str) -> None:
"""Create an InternalSiteRestClient.
Args:
site: Site this client is at.
endpoint: Network location of the site's internal endpoint.
"""
self._site = site
self._endpoint = endpoint
def store_asset(self, asset: Asset) -> None:
"""Stores an asset in the site's asset store.
Args:
asset: The asset to store.
"""
stripped_asset = copy(asset)
stripped_asset.image_location = None
r = requests.post(f'{self._endpoint}/assets', json=serialize(
stripped_asset))
if r.status_code != 201:
raise RuntimeError('Error uploading asset to site')
if asset.image_location is not None:
with Path(asset.image_location).open('rb') as f:
r = requests.put(
f'{self._endpoint}/assets/{quote(asset.id)}/image',
headers={'Content-Type': 'application/octet-stream'},
data=f)
if r.status_code != 201:
raise RuntimeError('Error uploading asset image to site')
def add_rule(self, rule: Rule) -> None:
"""Adds a rule to the site's policy store.
Args:
rule: The rule to add.
"""
r = requests.post(f'{self._endpoint}/rules', json=serialize(rule))
if r.status_code != 201:
raise RuntimeError(f'Error adding rule to site: {r.text}')
def submit_job(self, job: Job) -> str:
"""Submits a job to the DDM via the local site.
Args:
job: The job to execute.
Returns:
The new job's id.
"""
r = requests.post(
f'{self._endpoint}/jobs', json=serialize(job),
params={'requester': self._site}, allow_redirects=False)
if r.status_code != 303:
raise RuntimeError(f'Error submitting job: {r.text}')
if 'location' not in r.headers:
raise RuntimeError('Invalid server response when submitting job')
# Protect against malicious servers redirecting us elsewhere
job_uri = r.headers['location']
job_uri_parts = urlparse(job_uri)
job_uri_port = job_uri_parts.port
if job_uri_port is None:
job_uri_port = _STANDARD_PORTS.get(job_uri_parts.scheme)
prefix = f'{self._endpoint}/jobs/'
prefix_parts = urlparse(prefix)
prefix_port = prefix_parts.port
if prefix_port is None:
prefix_port = _STANDARD_PORTS.get(prefix_parts.scheme)
if (
job_uri_parts.scheme != prefix_parts.scheme or
job_uri_parts.netloc != prefix_parts.netloc or
not job_uri_parts.path.startswith(prefix_parts.path) or
job_uri_port != prefix_port):
raise RuntimeError(
f'Unexpected server response {job_uri} when'
' submitting job')
return job_uri
def is_job_done(self, job_id: str) -> bool:
"""Checks whether a job is done.
Args:
job_id: The job's id from :func:`submit_job`.
Returns:
True iff the job is done.
Raises:
KeyError: if the job id does not exist.
"""
return self._get_job_result(job_id).is_done
def get_job_result(self, job_id: str) -> JobResult:
"""Gets the results of a submitted job.
This waits until the job is done before returning.
Args:
job_id: The job's id from :func:`submit_job`.
Returns:
The job's results.
Raises:
KeyError: If the job id does not exist.
RuntimeError: If there was an error communicating with the
server.
"""
while True:
result = self._get_job_result(job_id)
if result.is_done:
break
time.sleep(_JOB_RESULT_WAIT_TIME)
return result
def _get_job_result(self, job_id: str) -> JobResult:
"""Gets the job's current result from the server."""
r = requests.get(job_id)
if r.status_code == 404:
raise KeyError('Job not found')
if r.status_code != 200:
raise RuntimeError(f'Error getting job status: {r.text}')
validate_json('JobResult', r.json())
return deserialize(JobResult, r.json())
| 32.582278 | 77 | 0.596348 | 4,581 | 0.88986 | 0 | 0 | 0 | 0 | 0 | 0 | 1,943 | 0.377428 |
108d7f449381a3192027b88e7c4525cac6f4b668 | 4,266 | py | Python | image_dataset.py | samlaf/self-ensemble-visual-domain-adapt-photo | 77fe1060a0fd05be9219470ff0c1e8ad220fbb73 | [
"MIT"
] | 76 | 2018-02-19T21:51:02.000Z | 2021-08-17T08:31:57.000Z | image_dataset.py | imkhubaibraza/self-ensemble-visual-domain-adapt-photo | 77fe1060a0fd05be9219470ff0c1e8ad220fbb73 | [
"MIT"
] | 6 | 2018-05-29T13:44:28.000Z | 2021-08-24T17:08:26.000Z | image_dataset.py | imkhubaibraza/self-ensemble-visual-domain-adapt-photo | 77fe1060a0fd05be9219470ff0c1e8ad220fbb73 | [
"MIT"
] | 16 | 2018-02-20T18:41:12.000Z | 2022-01-22T20:45:06.000Z | import numpy as np
import cv2
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
class ImageDataset (object):
class ImageAccessor (object):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset.paths)
def __getitem__(self, item):
if isinstance(item, int):
return self.dataset.load_image(self.dataset.paths[item])
else:
xs = []
if isinstance(item, slice):
indices = range(*item.indices(len(self)))
elif isinstance(item, np.ndarray):
indices = item
else:
raise TypeError('item should be an int/long, a slice or an array, not a {}'.format(
type(item)
))
for i in indices:
img = self.dataset.load_image(self.dataset.paths[i])
xs.append(img)
return xs
def __init__(self, img_size, range01, rgb_order, class_names, n_classes, names, paths, y,
dummy=False):
self.img_size = img_size
self.range01 = range01
self.rgb_order = rgb_order
self.dummy = dummy
self.images = self.ImageAccessor(self)
self.class_names = class_names
self.n_classes = n_classes
self.names = names
self.paths = paths
if y is not None:
self.y = np.array(y, dtype=np.int32)
self.has_ground_truth = True
else:
self.has_ground_truth = False
def load_image(self, path):
if self.dummy:
return np.random.randint(0, 256, size=self.img_size + (3,)).astype(np.uint8)
else:
img = cv2.imread(path)
if self.rgb_order:
img = img[:, :, ::-1]
return img
def prediction_evaluator(self, sample_indices=None):
if not self.has_ground_truth:
raise ValueError('Cannot create evaluator; dataset has no ground truth')
if sample_indices is None:
return PredictionEvaluator(self.y, self.n_classes, self.class_names)
else:
return PredictionEvaluator(self.y[sample_indices], self.n_classes, self.class_names)
class PredictionEvaluator (object):
def __init__(self, y, n_classes, class_names):
self.y = y
self.n_classes = n_classes
self.class_names = class_names
self.hist = np.bincount(y, minlength=self.n_classes)
def evaluate(self, tgt_pred_prob_y):
tgt_pred_y = np.argmax(tgt_pred_prob_y, axis=1)
aug_class_true_pos = np.zeros((self.n_classes,))
# Compute per-class accuracy
for cls_i in range(self.n_classes):
aug_class_true_pos[cls_i] = ((self.y == cls_i) & (tgt_pred_y == cls_i)).sum()
aug_cls_acc = aug_class_true_pos.astype(float) / np.maximum(self.hist.astype(float), 1.0)
mean_aug_class_acc = aug_cls_acc.mean()
aug_cls_acc_str = ', '.join(['{}: {:.3%}'.format(self.class_names[cls_i], aug_cls_acc[cls_i])
for cls_i in range(self.n_classes)])
return mean_aug_class_acc, aug_cls_acc_str
def subset_indices(d_source, d_target, subsetsize, subsetseed):
if subsetsize > 0:
if subsetseed != 0:
subset_rng = np.random.RandomState(subsetseed)
else:
subset_rng = np.random
strat = StratifiedShuffleSplit(n_splits=1, test_size=subsetsize, random_state=subset_rng)
shuf = ShuffleSplit(n_splits=1, test_size=subsetsize, random_state=subset_rng)
_, source_indices = next(strat.split(d_source.y, d_source.y))
n_src = source_indices.shape[0]
if d_target.has_ground_truth:
_, target_indices = next(strat.split(d_target.y, d_target.y))
else:
_, target_indices = next(shuf.split(np.arange(len(d_target.images))))
n_tgt = target_indices.shape[0]
else:
source_indices = None
target_indices = None
n_src = len(d_source.images)
n_tgt = len(d_target.images)
return source_indices, target_indices, n_src, n_tgt
| 34.682927 | 103 | 0.599391 | 3,188 | 0.747304 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.037037 |
10927e8a7c88b7a095ecc1c95079d91466988ee1 | 2,106 | py | Python | tests/test_losses_config.py | blazejdolicki/vissl | 9c10748a19fb1c637f32687142c8cd685f2410ff | [
"MIT"
] | 2,512 | 2021-01-27T18:44:44.000Z | 2022-03-31T19:33:49.000Z | tests/test_losses_config.py | blazejdolicki/vissl | 9c10748a19fb1c637f32687142c8cd685f2410ff | [
"MIT"
] | 361 | 2021-01-27T20:12:09.000Z | 2022-03-31T12:39:34.000Z | tests/test_losses_config.py | blazejdolicki/vissl | 9c10748a19fb1c637f32687142c8cd685f2410ff | [
"MIT"
] | 277 | 2021-01-29T08:09:02.000Z | 2022-03-31T07:57:35.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
from collections import namedtuple
from classy_vision.generic.distributed_util import set_cpu_device
from parameterized import parameterized
from utils import ROOT_LOSS_CONFIGS, SSLHydraConfig
from vissl.trainer.train_task import SelfSupervisionTask
from vissl.utils.hydra_config import convert_to_attrdict
logger = logging.getLogger("__name__")
set_cpu_device()
BATCH_SIZE = 2048
EMBEDDING_DIM = 128
NUM_CROPS = 2
BUFFER_PARAMS_STRUCT = namedtuple(
"BUFFER_PARAMS_STRUCT", ["effective_batch_size", "world_size", "embedding_dim"]
)
BUFFER_PARAMS = BUFFER_PARAMS_STRUCT(BATCH_SIZE, 1, EMBEDDING_DIM)
class TestRootConfigsLossesBuild(unittest.TestCase):
@parameterized.expand(ROOT_LOSS_CONFIGS)
def test_loss_build(self, filepath):
logger.info(f"Loading {filepath}")
cfg = SSLHydraConfig.from_configs(
[
filepath,
"config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
"config.DATA.TEST.DATA_SOURCES=[synthetic]",
]
)
_, config = convert_to_attrdict(cfg.default_cfg)
task = SelfSupervisionTask.from_config(config)
task.datasets, _ = task.build_datasets()
self.assertTrue(task._build_loss(), "failed to build loss")
def test_pytorch_loss(self):
cfg = SSLHydraConfig.from_configs(
[
"config=test/integration_test/quick_simclr",
"config.LOSS.name=CosineEmbeddingLoss",
"+config.LOSS.CosineEmbeddingLoss.margin=1.0",
"config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
"config.DATA.TEST.DATA_SOURCES=[synthetic]",
]
)
_, config = convert_to_attrdict(cfg.default_cfg)
task = SelfSupervisionTask.from_config(config)
task.datasets, _ = task.build_datasets()
self.assertTrue(task._build_loss(), "failed to build loss")
| 35.1 | 83 | 0.694682 | 1,287 | 0.611111 | 0 | 0 | 583 | 0.276828 | 0 | 0 | 618 | 0.293447 |
10937391f6e8ece3c5f8b3a5b4d0211aebbf84c3 | 9,915 | py | Python | LossAug/OpticalFlowLoss.py | gexahedron/pytti | 9b8508537f0e8f10780c0027b2c9b308917e7889 | [
"MIT"
] | null | null | null | LossAug/OpticalFlowLoss.py | gexahedron/pytti | 9b8508537f0e8f10780c0027b2c9b308917e7889 | [
"MIT"
] | null | null | null | LossAug/OpticalFlowLoss.py | gexahedron/pytti | 9b8508537f0e8f10780c0027b2c9b308917e7889 | [
"MIT"
] | null | null | null | from pytti.LossAug import MSELoss, LatentLoss
import sys, os, gc
import argparse
import os
import cv2
import glob
import math, copy
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from PIL import Image
import imageio
import matplotlib.pyplot as plt
from pytti.Notebook import Rotoscoper
from torchvision.transforms import functional as TF
os.chdir('GMA')
try:
sys.path.append('core')
from network import RAFTGMA
from utils import flow_viz
from utils.utils import InputPadder
finally:
os.chdir('..')
from pytti.Transforms import apply_flow
from pytti import fetch, to_pil, DEVICE, vram_usage_mode
from pytti.Image.RGBImage import RGBImage
GMA = None
def init_GMA(checkpoint_path):
global GMA
if GMA is None:
with vram_usage_mode('GMA'):
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint", default=checkpoint_path)
parser.add_argument('--model_name', help="define model name", default="GMA")
parser.add_argument('--path', help="dataset for evaluation")
parser.add_argument('--num_heads', default=1, type=int,
help='number of heads in attention and aggregation')
parser.add_argument('--position_only', default=False, action='store_true',
help='only use position-wise attention')
parser.add_argument('--position_and_content', default=False, action='store_true',
help='use position and content-wise attention')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
args = parser.parse_args([])
GMA = torch.nn.DataParallel(RAFTGMA(args))
GMA.load_state_dict(torch.load(checkpoint_path))
GMA.to(DEVICE)
GMA.eval()
def sample(tensor, uv, device=DEVICE):
height, width = tensor.shape[-2:]
max_pos = torch.tensor([width-1,height-1], device=device).view(2,1,1)
grid = uv.div(max_pos/2).sub(1).movedim(0,-1).unsqueeze(0)
return F.grid_sample(tensor.unsqueeze(0), grid, align_corners = True).squeeze(0)
class TargetFlowLoss(MSELoss):
def __init__(self, comp, weight = 0.5, stop = -math.inf, name = "direct target loss", image_shape = None):
super().__init__(comp, weight, stop, name, image_shape)
with torch.no_grad():
self.register_buffer('last_step', comp.clone())
self.mag = 1
@torch.no_grad()
def set_target_flow(self, flow, device=DEVICE):
self.comp.set_(flow.movedim(-1,1).to(device, memory_format = torch.channels_last))
self.mag = float(torch.linalg.norm(self.comp, dim = 1).square().mean())
@torch.no_grad()
def set_last_step(self, last_step_pil, device = DEVICE):
last_step = TF.to_tensor(last_step_pil).unsqueeze(0).to(device, memory_format = torch.channels_last)
self.last_step.set_(last_step)
def get_loss(self, input, img, device=DEVICE):
os.chdir('GMA')
try:
init_GMA('checkpoints/gma-sintel.pth')
image1 = self.last_step
image2 = input
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
_, flow = GMA(image1, image2, iters=3, test_mode=True)
flow = flow.to(device, memory_format = torch.channels_last)
finally:
os.chdir('..')
return super().get_loss(TF.resize(flow, self.comp.shape[-2:]), img)/self.mag
class OpticalFlowLoss(MSELoss):
@staticmethod
@torch.no_grad()
def motion_edge_map(flow_forward, flow_backward, img, border_mode = 'smear', sampling_mode = 'bilinear',device=DEVICE):
# algorithm based on https://github.com/manuelruder/artistic-videos/blob/master/consistencyChecker/consistencyChecker.cpp
# reimplemented in pytorch by Henry Rachootin
# // consistencyChecker
# // Check consistency of forward flow via backward flow.
# //
# // (c) Manuel Ruder, Alexey Dosovitskiy, Thomas Brox 2016
dx_ker = torch.tensor([[[[0,0,0],[1,0,-1],[0, 0,0]]]], device = device).float().div(2).repeat(2,2,1,1)
dy_ker = torch.tensor([[[[0,1,0],[0,0, 0],[0,-1,0]]]], device = device).float().div(2).repeat(2,2,1,1)
f_x = nn.functional.conv2d(flow_backward, dx_ker, padding='same')
f_y = nn.functional.conv2d(flow_backward, dy_ker, padding='same')
motionedge = torch.cat([f_x,f_y]).square().sum(dim=(0,1))
height, width = flow_forward.shape[-2:]
y,x = torch.meshgrid([torch.arange(0,height), torch.arange(0,width)])
x = x.to(device)
y = y.to(device)
p1 = torch.stack([x,y])
v1 = flow_forward.squeeze(0)
p0 = p1 + flow_backward.squeeze()
v0 = sample(v1, p0)
p1_back = p0 + v0
v1_back = flow_backward.squeeze(0)
r1 = torch.floor(p0)
r2 = r1 + 1
max_pos = torch.tensor([width-1,height-1], device=device).view(2,1,1)
min_pos = torch.tensor([0, 0], device=device).view(2,1,1)
overshoot = torch.logical_or(r1.lt(min_pos),r2.gt(max_pos))
overshoot = torch.logical_or(overshoot[0],overshoot[1])
missed = (p1_back - p1).square().sum(dim=0).ge(torch.stack([v1_back,v0]).square().sum(dim=(0,1)).mul(0.01).add(0.5))
motion_boundary = motionedge.ge(v1_back.square().sum(dim=0).mul(0.01).add(0.002))
reliable = torch.ones((height, width), device=device)
reliable[motion_boundary] = 0
reliable[missed] = -1
reliable[overshoot] = 0
mask = TF.gaussian_blur(reliable.unsqueeze(0), 3).clip(0,1)
return mask
@staticmethod
@torch.no_grad()
def get_flow(image1, image2, device=DEVICE):
os.chdir('GMA')
try:
init_GMA('checkpoints/gma-sintel.pth')
if isinstance(image1, Image.Image):
image1 = TF.to_tensor(image1).unsqueeze(0).to(device)
if isinstance(image2, Image.Image):
image2 = TF.to_tensor(image2).unsqueeze(0).to(device)
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
flow_low, flow_up = GMA(image1, image2, iters=12, test_mode=True)
finally:
os.chdir('..')
return flow_up
def __init__(self, comp, weight = 0.5, stop = -math.inf, name = "direct target loss", image_shape = None):
super().__init__(comp, weight, stop, name, image_shape)
with torch.no_grad():
self.latent_loss = MSELoss(comp.new_zeros((1,1,1,1)), weight, stop, name, image_shape)
self.register_buffer('bg_mask',comp.new_zeros((1,1,1,1)))
@torch.no_grad()
def set_flow(self, frame_prev, frame_next, img, path, border_mode = 'smear', sampling_mode = 'bilinear', device = DEVICE):
if path is not None:
img = img.clone()
state_dict = torch.load(path)
img.load_state_dict(state_dict)
gc.collect()
torch.cuda.empty_cache()
image1 = TF.to_tensor(frame_prev).unsqueeze(0).to(device)
image2 = TF.to_tensor(frame_next).unsqueeze(0).to(device)
if self.bg_mask.shape[-2:] != image1.shape[-2:]:
bg_mask = TF.resize(self.bg_mask, image1.shape[-2:])
self.bg_mask.set_(bg_mask)
noise = torch.empty_like(image2)
noise.normal_(mean = 0, std = 0.05)
noise.mul_(self.bg_mask)
#adding the same noise vectors to both images forces
#the flow model to match those parts of the frame, effectively
#disabling the flow in those areas.
image1.add_(noise)
image2.add_(noise)
# bdy = image2.clone().squeeze(0).mean(dim = 0)
# h, w = bdy.shape
# s = 4
# bdy[s:-s,s:-s] = 0
# mean = bdy.sum().div(w*h - (w-2*s)*(h-s*2))
# overlay = image2.gt(0.5) if mean > 0.5 else image2.lt(0.5)
# noise = torch.empty_like(image2)
# noise.normal_(mean = 0, std = 0.05)
# noise[torch.logical_not(overlay)] = 0
# image1.add_(noise)
# image2.add_(noise)
flow_forward = OpticalFlowLoss.get_flow(image1, image2)
flow_backward = OpticalFlowLoss.get_flow(image2, image1)
unwarped_target_direct = img.decode_tensor()
flow_target_direct = apply_flow(img, -flow_backward, border_mode = border_mode, sampling_mode = sampling_mode)
fancy_mask = OpticalFlowLoss.motion_edge_map(flow_forward, flow_backward, img, border_mode, sampling_mode)
target_direct = flow_target_direct
target_latent = img.get_latent_tensor(detach = True)
mask = fancy_mask.unsqueeze(0)
self.comp.set_(target_direct)
self.latent_loss.comp.set_(target_latent)
self.set_flow_mask(mask)
array = flow_target_direct.squeeze(0).movedim(0,-1).mul(255).clamp(0, 255).cpu().detach().numpy().astype(np.uint8)[:,:,:]
return Image.fromarray(array), fancy_mask
@torch.no_grad()
def set_flow_mask(self,mask):
super().set_mask(TF.resize(mask, self.comp.shape[-2:]))
if mask is not None:
self.latent_loss.set_mask(TF.resize(mask, self.latent_loss.comp.shape[-2:]))
else:
self.latent_loss.set_mask(None)
@torch.no_grad()
def set_mask(self, mask, inverted = False, device = DEVICE):
if isinstance(mask, str) and mask != '':
if mask[0] == '-':
mask = mask[1:]
inverted = True
if mask.strip()[-4:] == '.mp4':
r = Rotoscoper(mask,self)
r.update(0)
return
mask = Image.open(fetch(mask)).convert('L')
if isinstance(mask, Image.Image):
with vram_usage_mode('Masks'):
mask = TF.to_tensor(mask).unsqueeze(0).to(device, memory_format = torch.channels_last)
if mask not in ['',None]:
#this is where the inversion is. This mask is naturally inverted :)
#since it selects the background
self.bg_mask.set_(mask if inverted else (1-mask))
def get_loss(self, input, img):
l1 = super().get_loss(input, img)
l2 = self.latent_loss.get_loss(img.get_latent_tensor(), img)
#print(float(l1),float(l2))
return l1+l2*img.latent_strength
| 39.66 | 126 | 0.65648 | 7,743 | 0.780938 | 0 | 0 | 6,267 | 0.632073 | 0 | 0 | 1,527 | 0.154009 |
109422f948a8a29c875dcfdb424e7f71d2bd02c6 | 394 | py | Python | apps/track/migrations/0022_auto_20210319_1551.py | martinlehoux/django_bike | 05373d2649647fe8ebadb0aad54b9a7ec1900fe7 | [
"MIT"
] | 1 | 2020-08-12T17:53:37.000Z | 2020-08-12T17:53:37.000Z | apps/track/migrations/0022_auto_20210319_1551.py | martinlehoux/django_bike | 05373d2649647fe8ebadb0aad54b9a7ec1900fe7 | [
"MIT"
] | 12 | 2020-07-03T03:52:00.000Z | 2021-09-22T18:00:44.000Z | apps/track/migrations/0022_auto_20210319_1551.py | martinlehoux/django_bike | 05373d2649647fe8ebadb0aad54b9a7ec1900fe7 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-19 15:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("track", "0021_auto_20200915_1528"),
]
operations = [
migrations.RemoveField(
model_name="track",
name="parser",
),
migrations.DeleteModel(
name="Point",
),
]
| 18.761905 | 47 | 0.560914 | 309 | 0.784264 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.256345 |
1095eaaa785c226cf93b3695060c9c47f5aa16a8 | 451 | py | Python | solutions/404_sum_of_left_leaves.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/404_sum_of_left_leaves.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/404_sum_of_left_leaves.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | class Solution:
def sumOfLeftLeaves(self, root: TreeNode) -> int:
if not root:
return 0
res = 0
q = deque([root])
while q:
node = q.popleft()
if node.left:
if not node.left.left and not node.left.right:
res += node.left.val
q.append(node.left)
if node.right:
q.append(node.right)
return res
| 28.1875 | 62 | 0.463415 | 450 | 0.997783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
10978ae397aee92267c1e48eddfbe94578ffc55a | 6,123 | py | Python | dev_code/color_kmeans_vis.py | Computational-Plant-Science/plant_image_analysis | 321eaae9531cd5f8eaebf3ee6c68b99eb53e420c | [
"BSD-3-Clause"
] | 3 | 2020-11-23T18:41:21.000Z | 2020-11-24T22:13:06.000Z | dev_code/color_kmeans_vis.py | Computational-Plant-Science/plant_image_analysis | 321eaae9531cd5f8eaebf3ee6c68b99eb53e420c | [
"BSD-3-Clause"
] | 3 | 2020-11-23T17:03:31.000Z | 2021-04-29T20:07:27.000Z | dev_code/color_kmeans_vis.py | Computational-Plant-Science/plant_image_analysis | 321eaae9531cd5f8eaebf3ee6c68b99eb53e420c | [
"BSD-3-Clause"
] | null | null | null | '''
Name: color_segmentation.py
Version: 1.0
Summary: Extract plant traits (leaf area, width, height, ) by paralell processing
Author: suxing liu
Author-email: suxingliu@gmail.com
Created: 2018-09-29
USAGE:
python3 color_kmeans_vis.py -p /home/suxingliu/plant-image-analysis/sample_test/ -i 01.jpg -m 01_seg.jpg -c 5
'''
#!/usr/bin/python
# import the necessary packages
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
import matplotlib.pyplot as plt
import argparse
import utils
import cv2
import numpy as np
import matplotlib.image as mpimg
import pylab as P
import os
def mkdir(path):
# remove space at the beginning
path=path.strip()
# remove slash at the end
path=path.rstrip("\\")
# path exist?
# True
# False
isExists=os.path.exists(path)
# process
if not isExists:
# construct the path and folder
print (path+'folder constructed!')
# make dir
os.makedirs(path)
return True
else:
# if exists, return
print (path+'path exists!')
return False
def color_quantization(image, mask):
#grab image width and height
(h, w) = image.shape[:2]
#change the color storage order
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#apply the mask to get the segmentation of plant
masked_image = cv2.bitwise_and(image, image, mask = mask)
# reshape the image to be a list of pixels
pixels = masked_image.reshape((masked_image.shape[0] * masked_image.shape[1], 3))
############################################################
#Clustering process
###############################################################
# cluster the pixel intensities
clt = MiniBatchKMeans(n_clusters = args["clusters"])
#clt = KMeans(n_clusters = args["clusters"])
clt.fit(pixels)
#assign labels to each cluster
labels = clt.fit_predict(pixels)
#obtain the quantized clusters using each label
quant = clt.cluster_centers_.astype("uint8")[labels]
# reshape the feature vectors to images
quant = quant.reshape((h, w, 3))
image_rec = pixels.reshape((h, w, 3))
# convert from L*a*b* to RGB
quant = cv2.cvtColor(quant, cv2.COLOR_RGB2BGR)
image_rec = cv2.cvtColor(image_rec, cv2.COLOR_RGB2BGR)
# display the images and wait for a keypress
#cv2.imshow("image", np.hstack([image_rec, quant]))
#cv2.waitKey(0)
#define result path for labeled images
result_img_path = save_path + 'cluster_out.png'
# save color_quantization results
cv2.imwrite(result_img_path,quant)
# build a histogram of clusters and then create a figure representing the number of pixels labeled to each color
hist = utils.centroid_histogram(clt)
# remove the background color cluster
if (args["mask"] == "None"):
clt.cluster_centers_ = clt.cluster_centers_[1: len(clt.cluster_centers_)]
else:
clt.cluster_centers_ = clt.cluster_centers_[1: len(clt.cluster_centers_)]
#build a histogram of clusters using center lables
numLabels = utils.plot_centroid_histogram(save_path,clt)
#create a figure representing the distribution of each color
bar = utils.plot_colors(hist, clt.cluster_centers_)
#save a figure of color bar
utils.plot_color_bar(save_path, bar)
if __name__ == '__main__':
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="Current directory for image files.")
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
ap.add_argument("-m", "--mask", required = True, help = "Path to the mask image", default = "None")
ap.add_argument("-c", "--clusters", required = True, type = int, help = "# of clusters")
args = vars(ap.parse_args())
# setting path for results storage
current_path = args["path"]
filename = args["image"]
image_path = current_path + filename
# construct result folder
mkpath = current_path + str(filename[0:-4])
mkdir(mkpath)
global save_path
save_path = mkpath + '/'
print ("results_folder: " + save_path)
# load the image
image = cv2.imread(image_path)
# set mask path
mask_path = current_path + args["mask"]
# load mask image as grayscale
im_gray = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
#extract the binary mask
(thresh, mask) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
color_quantization(image,mask)
'''
#save mask image
if (args["mask"] == "None"):
#read mask image as gray scale
im_gray = cv2.imread(fig_path_save, cv2.CV_LOAD_IMAGE_GRAYSCALE)
(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#fill samll holes and area along edge of image
from skimage.segmentation import clear_border
# remove artifacts connected to image border
cleared = im_bw.copy()
im_bw_cleared = clear_border(cleared)
#from skimage import morphology
#im_bw_cleared = morphology.remove_small_objects(im_bw, 20000, connectivity=2)
#remove small holes and objects
from scipy import ndimage as ndi
label_objects, num_labels = ndi.label(im_bw_cleared)
#print num_labels
sizes = np.bincount(label_objects.ravel())
mask_sizes = sizes > 500
mask_sizes[0] = 0
img_cleaned = mask_sizes[label_objects]
#change output image type
from skimage import img_as_ubyte
img_cleaned = img_as_ubyte(img_cleaned)
#save output mask image
fig_name = (str(filename[0:-4]) + '_' +'mask.png')
fig_path_mask = current_path + fig_name
cv2.imwrite(fig_path_mask, img_cleaned)
fig_path_mask = save_path + fig_name
cv2.imwrite(fig_path_mask, img_cleaned)
'''
| 30.311881 | 116 | 0.645109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,444 | 0.562469 |
1097973ec1380219467a3cca784a3a5967f37e86 | 120 | py | Python | results/results_eval.py | eym55/power_grid_sim | 35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa | [
"MIT"
] | 1 | 2022-01-26T15:19:15.000Z | 2022-01-26T15:19:15.000Z | results/results_eval.py | eym55/power_grid_sim | 35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa | [
"MIT"
] | null | null | null | results/results_eval.py | eym55/power_grid_sim | 35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
import pickle
df = pd.read_json('result.json',lines=True)
print(df) | 20 | 43 | 0.783333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.108333 |
109969c2ebef94313329e4c92a1901ae1f014cd6 | 77 | py | Python | fabfile/validation.py | b-cube/Response-Identification-Info | d2fa24c9f0d7db7d8bbf5cda937e1a9dd29a8f6e | [
"MIT"
] | null | null | null | fabfile/validation.py | b-cube/Response-Identification-Info | d2fa24c9f0d7db7d8bbf5cda937e1a9dd29a8f6e | [
"MIT"
] | 1 | 2015-09-23T16:30:34.000Z | 2015-09-23T16:30:34.000Z | fabfile/validation.py | b-cube/Response-Identification-Info | d2fa24c9f0d7db7d8bbf5cda937e1a9dd29a8f6e | [
"MIT"
] | 1 | 2020-03-25T09:41:03.000Z | 2020-03-25T09:41:03.000Z | # to run the metadata validation process via some ec2 and an rds connection
| 25.666667 | 75 | 0.792208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.974026 |
109a679e147b4f56e0888419c137399febf66634 | 5,432 | py | Python | papers_clf/tfidf_2_sentence.py | KellyShao/Writing-robots-vs.-Human | 86d0a2a0f4ca773417f231a2d6796b429f182e38 | [
"CECILL-B"
] | 4 | 2018-10-18T23:02:41.000Z | 2019-10-21T14:44:54.000Z | papers_clf/tfidf_2_sentence.py | KellyShao/Writing-robots-vs.-Human | 86d0a2a0f4ca773417f231a2d6796b429f182e38 | [
"CECILL-B"
] | null | null | null | papers_clf/tfidf_2_sentence.py | KellyShao/Writing-robots-vs.-Human | 86d0a2a0f4ca773417f231a2d6796b429f182e38 | [
"CECILL-B"
] | null | null | null | import csv
import math
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import text
from sklearn import metrics
from sklearn.metrics import roc_curve,auc,f1_score
import matplotlib.pyplot as plt
from gensim.models import word2vec
from gensim import corpora
from gensim.parsing.preprocessing import strip_numeric
from gensim.parsing.preprocessing import remove_stopwords
from gensim.parsing.preprocessing import strip_short
from gensim.parsing.preprocessing import strip_non_alphanum
stop_words = text.ENGLISH_STOP_WORDS.union([u'apr',u'archetypr',u'aug',u'configuration',u'conference',u'continuing'])#estimate
sci_file = "cs_papers/sci_after_filter.csv"
scigen_file = "cs_papers/scigen_after_filter.csv"
def import_data(file,row_content,x):
content = []
label = []
content_1 = open(file, 'r')
csv_reader = csv.reader(content_1)
for row in csv_reader:
row_new = remove_stopwords(row[row_content])
row_new = strip_numeric(row_new)
#row_new = strip_non_alphanum(row_new)
row_new = strip_short(row_new,minsize = 3)
content.append(row_new)
length = len(content)
for i in range(0,length):
label.append(x)
return content,label
sci_content, sci_label = import_data(sci_file,1,1)
scigen_content, scigen_label = import_data(scigen_file,1,0)
len1=len(sci_content)
len2=len(scigen_content)
data = sci_content+scigen_content
label = sci_label+scigen_label
def extract_sentence(content,percent):
new_content = []
for line in content:
new = line.split('.')
new_filter = []
for i in new:
if len(i)>15:
new_filter.append(i)
sum = len(new_filter)+1
sum = math.ceil(sum*percent)
cnt = 0
new_line = ''
for sent in new_filter:
cnt+=1
if(cnt<=sum):
new_line=new_line+sent
new_content.append(new_line)
return new_content
def auc(content, label,cross_fold):
f1_mean = np.zeros(20)
for i in range(0,cross_fold):
print 'cross_v'+str(i)
content_auto = content[0:928]
content_human = content[928:1836]
label_auto = label[0:928]
label_human = label[928:1836]
random_num = np.random.randint(low=0, high=100)
print 'random_num_auto:' +str(random_num)
content_train_auto,content_test_auto,label_train_auto,label_test_auto = train_test_split(content_auto, label_auto, test_size=0.2,random_state=random_num)
random_num = np.random.randint(low=0, high=100)
print 'random_num_human:' +str(random_num)
content_train_human,content_test_human,label_train_human,label_test_human = train_test_split(content_human, label_human, test_size=0.2,random_state=random_num)
content_train = content_train_auto+content_train_human
content_test = content_test_auto+content_test_human
label_train = label_train_auto+label_train_human
label_test = label_test_auto+label_test_human
vectorizer_train=TfidfVectorizer(encoding='utf-8', decode_error='ignore', strip_accents='unicode',
token_pattern=u'(?ui)\\b\\w*[a-z]+\\w*\\b', stop_words=stop_words,
lowercase=True, analyzer='word',max_features=100)# ngram_range=(1,2),
tfidf_train = vectorizer_train.fit_transform(content_train)
word_train = vectorizer_train.get_feature_names()
tfidf_metric_train = tfidf_train.toarray()
vectorizer_test=TfidfVectorizer(encoding='utf-8', decode_error='ignore', strip_accents='unicode',
token_pattern=u'(?ui)\\b\\w*[a-z]+\\w*\\b', stop_words=stop_words,
lowercase=True, analyzer='word',vocabulary=vectorizer_train.vocabulary_)
#build clf
clf = svm.SVC(kernel='linear')#, probability=True)
clf_res = clf.fit(tfidf_train, label_train)
#input sentence
for percent in range(1,101,5):
print 'sentence'+str(percent*0.01)
new_content_test = extract_sentence(content_test,percent*0.01)
tfidf_test = vectorizer_test.fit_transform(new_content_test)
word_test = vectorizer_test.get_feature_names()
pred = clf_res.predict(tfidf_test)
score_micro = f1_score(label_test, pred, average='micro')
score_macro = f1_score(label_test, pred, average='macro')
f1=(score_macro+score_micro)/2
f1_mean[(percent-1)/5]+=f1
f1_mean = f1_mean/cross_fold
#pred = clf_res.predict(tfidf_test)
##predict_prob = clf_res.predict_proba(tfidf_test)[:,1]
#auc = metrics.roc_auc_score(label_test,pred)
#print 'auc: %0.20f'%auc
#auc_mean = auc_mean+auc
#auc_mean = auc_mean/cross_fold
x_axis = range(1,21)
x=np.array(x_axis)
plt.plot(x,f1_mean)
plt.show()
print f1_mean
f1_mean = list(f1_mean)
f1_mean_csv=pd.DataFrame(f1_mean)
f1_mean_csv.to_csv('f1/f1_tfidf_sentence.csv',mode='a',header=False)
auc(data,label,10)
| 40.537313 | 168 | 0.660714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 707 | 0.130155 |
109b329d3c687011ac089097beb800b987a7d727 | 1,108 | py | Python | chat_manager_api/categories/account.py | lordralinc/chat_manager_api | 6c1d57f1d4942d53ee70099ea91f4cbb14b63aab | [
"MIT"
] | null | null | null | chat_manager_api/categories/account.py | lordralinc/chat_manager_api | 6c1d57f1d4942d53ee70099ea91f4cbb14b63aab | [
"MIT"
] | null | null | null | chat_manager_api/categories/account.py | lordralinc/chat_manager_api | 6c1d57f1d4942d53ee70099ea91f4cbb14b63aab | [
"MIT"
] | null | null | null | from chat_manager_api.categories.base import BaseAPICategory
from chat_manager_api.models import account
class AccountAPICategory(BaseAPICategory):
def get_web_hook_info(self) -> account.GetWebHookInfo:
return self.api.make_request("account.getWebHookInfo", dataclass=account.GetWebHookInfo)
async def get_web_hook_info_async(self) -> account.GetWebHookInfo:
return await self.api.make_request_async("account.getWebHookInfo", dataclass=account.GetWebHookInfo)
def set_web_hook(self, url: str) -> account.SetWebHook:
return self.api.make_request("account.setWebHook", data=dict(url=url), dataclass=account.SetWebHook)
async def set_web_hook_async(self, url: str) -> account.SetWebHook:
return await self.api.make_request_async("account.setWebHook", data=dict(url=url), dataclass=account.SetWebHook)
def remove_web_hook(self) -> str:
return self.api.make_request("account.removeWebHook", dataclass=str)
async def remove_web_hook_async(self) -> str:
return await self.api.make_request_async("account.removeWebHook", dataclass=str)
| 46.166667 | 120 | 0.768953 | 1,000 | 0.902527 | 0 | 0 | 0 | 0 | 497 | 0.448556 | 134 | 0.120939 |
109ec4c3520aaa8f1a607267ca7bf3c73ab92e09 | 1,674 | py | Python | apps/health/views.py | dtisza1/bluebutton-web-server | 6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb | [
"Apache-2.0"
] | null | null | null | apps/health/views.py | dtisza1/bluebutton-web-server | 6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb | [
"Apache-2.0"
] | null | null | null | apps/health/views.py | dtisza1/bluebutton-web-server | 6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb | [
"Apache-2.0"
] | null | null | null | import logging
from django.core.exceptions import ImproperlyConfigured
from rest_framework.exceptions import APIException
from rest_framework.views import APIView
from rest_framework.response import Response
from .checks import (
internal_services,
external_services,
)
logger = logging.getLogger('hhs_server.%s' % __name__)
class ServiceUnavailable(APIException):
status_code = 503
default_detail = 'Service temporarily unavailable, try again later.'
default_code = 'service_unavailable'
class Check(APIView):
def get(self, request, format=None):
try:
for check in self.get_services():
v2 = True if request.path.endswith('external_v2') else False
if not check(v2):
raise ServiceUnavailable()
except ServiceUnavailable:
raise
except Exception as e:
logger.exception("health check raised exception. {reason}".format(reason=e))
raise ServiceUnavailable(detail="Service temporarily unavailable, try again later. There is an issue with the - {svc}"
" - service check. Reason: {reason}".format(svc=check.__name__, reason=e))
return Response({'message': 'all\'s well'})
def get_services(self):
if not hasattr(self, "services"):
raise ImproperlyConfigured
if len(self.services) < 1:
raise ImproperlyConfigured(
"please specify at least one service to check")
return self.services
class CheckInternal(Check):
services = internal_services
class CheckExternal(Check):
services = external_services
| 32.823529 | 130 | 0.664875 | 1,328 | 0.793309 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.203704 |
10a1b996b725d39d3e7091726d568372d4862d43 | 9,623 | py | Python | tripleohelper/ovb_baremetal.py | redhat-openstack/python-tripleo-helper | bfa165538335edb1088170c7a92f097167225c81 | [
"Apache-2.0"
] | 2 | 2016-12-20T11:19:20.000Z | 2017-03-17T22:39:32.000Z | tripleohelper/ovb_baremetal.py | redhat-openstack/python-tripleo-helper | bfa165538335edb1088170c7a92f097167225c81 | [
"Apache-2.0"
] | null | null | null | tripleohelper/ovb_baremetal.py | redhat-openstack/python-tripleo-helper | bfa165538335edb1088170c7a92f097167225c81 | [
"Apache-2.0"
] | 1 | 2020-08-07T21:55:37.000Z | 2020-08-07T21:55:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import concurrent.futures
import logging
import tripleohelper.baremetal
from tripleohelper import ovb_bmc
import tripleohelper.provisioners.openstack.provisioner as os_provisioner
from tripleohelper.provisioners.openstack import utils as os_utils
import tripleohelper.server as server
LOG = logging.getLogger('tripleohelper')
class Baremetal(server.Server):
"""A baremetal node."""
def __init__(self, nova_api, neutron, keypair, key_filename, security_groups, name):
server.Server.__init__(self, None, via_ip='192.0.2.240', key_filename=key_filename)
self.nova_api = nova_api
self.neutron = neutron
self.mac = None
self._os_instance = None
self._provision_port_id = None
self._keypair = keypair
self._key_filename = key_filename
self._security_groups = security_groups
self.name = name
self.flavor = None
self.status = None
super(Baremetal, self).__init__(hostname=None)
def deploy(self, image_name, ip, flavor='m1.small'):
"""Create the node.
This method should only be called by the BaremetalFactory.
"""
body_value = {
"port": {
"admin_state_up": True,
"name": self.name + '_provision',
"network_id": os_utils.get_network_id(self.nova_api, 'provision_bob'),
'fixed_ips': [{'ip_address': ip}]}}
response = self.neutron.create_port(body=body_value)
self._provision_port_id = response['port']['id']
self.mac = response['port']['mac_address']
image_id_to_boot_from = os_utils.get_image_id(self.nova_api, image_name)
flavor_id = os_utils.get_flavor_id(self.nova_api, flavor)
# TODO(Gonéri): We don't need keypair for the BM nodes
keypair_id = os_utils.get_keypair_id(self.nova_api, self._keypair)
# Ensure with get DHCP lease on the provision network first
nics = [{'port-id': self._provision_port_id}]
self._os_instance = os_provisioner.build_openstack_instance(
self.nova_api,
self.name,
image_id_to_boot_from,
flavor_id,
keypair_id,
nics)
if not self._os_instance:
LOG.error("deployment has failed")
raise Exception()
os_provisioner.add_provision_security_group(self.nova_api)
os_utils.add_security_groups(self._os_instance, ['provision'])
os_utils.add_security_groups(self._os_instance, self._security_groups)
LOG.info("add security groups '%s'" % self._security_groups)
LOG.info("instance '%s' ready to use" % self.name)
# the instance should be off for Ironic
self._os_instance.stop()
def admin_state_up(self, state):
"""Turns up/down the network connection."""
self.neutron.update_port(self._provision_port_id, {'port': {'admin_state_up': state}})
def pxe_netboot(self, filename):
"""Specify which file ipxe should load during the netboot."""
new_port = {
'extra_dhcp_opts': [
{'opt_name': 'bootfile-name', 'opt_value': 'http://192.0.2.240:8088/' + filename, 'ip_version': 4, },
{'opt_name': 'tftp-server', 'opt_value': '192.0.2.240', 'ip_version': '4'},
{'opt_name': 'server-ip-address', 'opt_value': '192.0.2.240', 'ip_version': '4'}
]
}
self.neutron.update_port(self._provision_port_id, {'port': new_port})
def refresh_nova_instance(self):
self._os_instance = self.nova_api.servers.get(self._os_instance.id)
def shutdown(self):
self.refresh_nova_instance()
if self._os_instance.status == 'ACTIVE':
self._os_instance.stop()
def refresh_status(self, undercloud):
self.refresh_nova_instance()
ports = self.neutron.list_ports(name='%s_provision' % self.name)
self.hostname = ports['ports'][0]['fixed_ips'][0]['ip_address']
self.via_ip = undercloud.hostname
self._provision_port_id = ports['ports'][0]['id']
if self._os_instance.status == 'SHUTOFF':
return
command = """cat .ssh/authorized_keys | ssh -o UserKnownHostsFile=/dev/null -o PasswordAuthentication=no -o stricthostkeychecking=no heat-admin@{node_ip} 'sudo bash -c "cat >> ~root/.ssh/authorized_keys"'"""
# The VM may be blocked because of ipxe
undercloud.run(command.format(node_ip=self.hostname), user='stack', success_status=(0, 255,))
class BaremetalFactory(tripleohelper.baremetal.BaremetalFactory):
def __init__(self, nova_api, neutron, keypair, key_filename, security_groups,
os_params={}):
self.instackenv = []
self.nova_api = nova_api
self.neutron = neutron
self._idx = 100
self._keypair = keypair
self._key_filename = key_filename
self._security_groups = security_groups
self.nodes = []
if os_params:
self.bmc = self.create_bmc(**os_params)
def initialize(self, size=2):
"""Populate the node poll.
:param size: the number of node to create.
"""
# The IP should be in this range, this is the default DHCP range used by the introspection.
# inspection_iprange = 192.0.2.100,192.0.2.120
for i in range(0, size):
self.nodes.append(
Baremetal(
self.nova_api,
self.neutron,
self._keypair,
self._key_filename,
self._security_groups,
name='baremetal_%d' % i))
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
for bm_node in self.nodes:
future = executor.submit(
bm_node.deploy,
'ipxe.usb',
'192.0.2.%d' % self._idx,
flavor='m1.large')
self._idx += 1
bm_node._future = future
for bm_node in self.nodes:
bm_node._future.result()
pm_addr = self.bmc.register_host(bm_node.name)
self.instackenv.append({
"pm_type": "pxe_ipmitool",
"mac": [bm_node.mac],
# TODO(Gonéri): We should get these informations from the baremetal node's flavor
"cpu": "4",
"memory": "8196",
"disk": "80",
"arch": "x86_64",
"pm_user": "admin",
"pm_password": "password",
"pm_addr": pm_addr
})
self.bmc.ssh_pool.stop_all()
def reload_environment(self, undercloud):
servers = {}
for s in self.nova_api.servers.list():
if s.name.startswith('baremetal_'):
servers[s.name] = s
for name, s in sorted(servers.items()):
node = Baremetal(
self.nova_api,
self.neutron,
keypair=self._keypair,
key_filename=self._key_filename,
security_groups=self._security_groups,
name=s.name)
node._os_instance = s
self.nodes.append(node)
i = iter(self.nodes)
for instack_node in self.load_instackenv_content(undercloud):
node = next(i)
node.mac = instack_node['mac'][0]
node.refresh_status(undercloud)
# restore the flavor
undercloud.add_environment_file(user='stack', filename='stackrc')
command = """ironic node-list --fields properties|sed -n 's/.*profile:\([-_a-z]*\),.*/\\1/p'"""
flavor_list = undercloud.run(command, user='stack')[0].split()
if flavor_list:
i = iter(flavor_list)
for node in self.nodes:
node.flavor = next(i)
def create_bmc(self, os_username, os_password, os_project_id, os_auth_url):
"""Deploy the BMC machine.
This machine hosts the ipmi servers, each ipmi server is associated to a baremetal
node and has its own IP.
"""
bmc = ovb_bmc.OvbBmc(
nova_api=self.nova_api,
neutron=self.neutron,
keypair=self._keypair,
key_filename=self._key_filename,
security_groups=self._security_groups,
image_name='Fedora 23 x86_64',
ip='192.0.2.254',
os_username=os_username,
os_password=os_password,
os_project_id=os_project_id,
os_auth_url=os_auth_url)
return bmc
def pxe_netboot(self, filename='boot.ipxe'):
"""Configure the OVB underlying Neutron to do a network boot
:param filename: the name of the IPXE script to boot on. Default
is boot.ipxe.
"""
for bm_node in self.nodes:
bm_node.pxe_netboot(filename)
| 39.929461 | 215 | 0.602827 | 8,666 | 0.900364 | 0 | 0 | 0 | 0 | 0 | 0 | 2,800 | 0.290909 |
10a276b2286ab552ab578380c0270c9f71f18d28 | 524 | py | Python | main.py | soup-bowl/lu-jubilee-btn | 8791044acd7e4d6a2dc21d71781f6365d2e7ce14 | [
"MIT"
] | null | null | null | main.py | soup-bowl/lu-jubilee-btn | 8791044acd7e4d6a2dc21d71781f6365d2e7ce14 | [
"MIT"
] | null | null | null | main.py | soup-bowl/lu-jubilee-btn | 8791044acd7e4d6a2dc21d71781f6365d2e7ce14 | [
"MIT"
] | null | null | null | from machine import Pin
import time
from led import LED
tube_btn = Pin(21, Pin.IN, Pin.PULL_UP)
sys_led = Pin(25, Pin.OUT)
print('Blinking LED to power check (no LED? Check LED batteries and/or script).')
LED.led_blink(5)
print('Blink code finish - Listening for presses.')
while True:
first = tube_btn.value()
time.sleep(0.01)
second = tube_btn.value()
if first and not second:
print('Button pressed.')
LED.led_display(2)
elif not first and second:
print('Button released.')
| 24.952381 | 81 | 0.677481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.291985 |
10a2a06189cfd607200f594682ebb88158b3ddb8 | 2,285 | py | Python | trampolino/workflows/dsi_trk.py | matteomancini/trampolino | af904844404f97c49e0452527b01fd9f415677ba | [
"MIT"
] | 3 | 2020-06-17T09:25:22.000Z | 2021-06-22T23:16:53.000Z | trampolino/workflows/dsi_trk.py | matteomancini/trampolino | af904844404f97c49e0452527b01fd9f415677ba | [
"MIT"
] | 1 | 2020-06-26T08:47:01.000Z | 2020-07-21T14:02:40.000Z | trampolino/workflows/dsi_trk.py | matteomancini/trampolino | af904844404f97c49e0452527b01fd9f415677ba | [
"MIT"
] | 3 | 2020-06-16T11:55:50.000Z | 2020-11-25T01:37:17.000Z | from nipype.interfaces import utility as util
from nipype.pipeline import engine as pe
from .interfaces import dsi_studio as dsi
import nipype.interfaces.diffusion_toolkit as dtk
from nipype.algorithms.misc import Gunzip
import os.path
def create_pipeline(name="dsi_track", opt="", ensemble=""):
parameters = {'nos': 5000}
ensemble_dict = {'angle': 'angle_thres',
'min_length': 'min_length'}
inputnode = pe.Node(
interface=util.IdentityInterface(
fields=["odf", "seed", "angle", "algorithm", "min_length"]),
name="inputnode")
if opt is not None:
opt_list = opt.split(',')
for o in opt_list:
try:
[key, value] = o.split(':')
parameters[key] = value
except ValueError:
print(o + ': irregular format, skipping')
if ensemble:
tckgen = pe.MapNode(dsi.FiberTrack(),
name='track', iterfield=ensemble_dict[ensemble])
gunzip = pe.MapNode(interface=Gunzip(), name="gunzip",
iterfield='in_file')
else:
tckgen = pe.Node(dsi.FiberTrack(), name='track')
gunzip = pe.Node(interface=Gunzip(), name="gunzip")
tckgen.inputs.nos = int(parameters['nos'])
tckmerge = pe.Node(interface=dtk.TrackMerge(), name="merge")
output_fields = ["tck"]
outputnode = pe.Node(
interface=util.IdentityInterface(fields=output_fields),
name="outputnode")
workflow = pe.Workflow(name=name)
workflow.base_output_dir = name
workflow.connect([(inputnode, tckgen, [("odf", "in_file"),
("angle", "angle_thres"),
("min_length", "min_length")]),
(tckgen, gunzip, [("out_file", "in_file")])])
if inputnode.inputs.seed:
workflow.connect([(inputnode, tckgen, [("seed", "seed_image")])])
if ensemble:
workflow.connect([
(gunzip, tckmerge, [("out_file", "track_files")]),
(tckmerge, outputnode, [("track_file", "tck")])
])
else:
workflow.connect([(gunzip, outputnode, [("out_file", "tck")])])
return workflow
def get_parent():
return "dsi_rec"
| 32.642857 | 76 | 0.569803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 379 | 0.165864 |
10a3272e6ee4e0b4d86ddd253e763f6e83ab8cca | 780 | py | Python | sponge-examples-projects/sponge-examples-project-spring-boot/sponge/remote_api_security.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 9 | 2017-12-16T21:48:57.000Z | 2022-01-06T12:22:24.000Z | sponge-examples-projects/sponge-examples-project-spring-boot/sponge/remote_api_security.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 3 | 2020-12-18T11:56:46.000Z | 2022-03-31T18:37:10.000Z | sponge-examples-projects/sponge-examples-project-spring-boot/sponge/remote_api_security.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 2 | 2019-12-29T16:08:32.000Z | 2020-06-15T14:05:34.000Z | """
Sponge Knowledge Base
Remote API security
"""
def configureAccessService():
# Configure the RoleBasedAccessService.
# Simple access configuration: role -> knowledge base names regexps.
remoteApiServer.accessService.addRolesToKb({ "ROLE_ADMIN":[".*"], "ROLE_ANONYMOUS":["boot", "python"]})
# Simple access configuration: role -> event names regexps.
remoteApiServer.accessService.addRolesToSendEvent({ "ROLE_ADMIN":[".*"], "ROLE_ANONYMOUS":[]})
remoteApiServer.accessService.addRolesToSubscribeEvent({ "ROLE_ADMIN":[".*"], "ROLE_ANONYMOUS":[".*"]})
def onStartup():
# Configure the access service on startup.
configureAccessService()
def onAfterReload():
# Reconfigure the access service after each reload.
configureAccessService()
| 33.913043 | 107 | 0.723077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.541026 |
10a4265ffb9a4bb38670b4dbc1ea1167b07ffcc8 | 753 | py | Python | ConsecutiveCharacters.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | 14 | 2020-10-15T21:47:18.000Z | 2021-12-01T06:06:51.000Z | ConsecutiveCharacters.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | null | null | null | ConsecutiveCharacters.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | 4 | 2020-06-15T14:40:45.000Z | 2021-06-15T06:22:03.000Z | '''
Given a string s, the power of the string is the maximum length of a non-empty substring that contains only one unique character.
Return the power of the string.
Example 1:
Input: s = "leetcode"
Output: 2
Explanation: The substring "ee" is of length 2 with the character 'e' only.
Example 2:
Input: s = "abbcccddddeeeeedcba"
Output: 5
Explanation: The substring "eeeee" is of length 5 with the character 'e' only.
'''
class Solution:
def maxPower(self, s: str) -> int:
current = 1
max_freq = 0
for i in range(1, len(s)):
if s[i] == s[i - 1]:
current += 1
else:
max_freq = max(current, max_freq)
current = 1
return max(max_freq, current)
| 26.892857 | 129 | 0.61089 | 326 | 0.432935 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.563081 |
10a46c5b25f3ebf58bddf11dc9dfc17dcf31469d | 8,540 | py | Python | tests/ocr/test_suggestion_medication_administration.py | lifeomic/phc-sdk-py | 51709c1c2f129a3fbe336a44e4d501ae0249859e | [
"MIT"
] | 1 | 2020-07-22T12:46:58.000Z | 2020-07-22T12:46:58.000Z | tests/ocr/test_suggestion_medication_administration.py | lifeomic/phc-sdk-py | 51709c1c2f129a3fbe336a44e4d501ae0249859e | [
"MIT"
] | 54 | 2019-10-09T16:19:04.000Z | 2022-01-19T20:28:59.000Z | tests/ocr/test_suggestion_medication_administration.py | lifeomic/phc-sdk-py | 51709c1c2f129a3fbe336a44e4d501ae0249859e | [
"MIT"
] | 2 | 2019-10-30T19:54:43.000Z | 2020-12-03T18:57:15.000Z | import pandas as pd
from phc.easy.ocr.suggestion import (expand_array_column,
expand_medication_administrations,
frame_for_type)
sample = expand_array_column(
pd.DataFrame(
[
{
"suggestions": [
{
"id": "728e79cd-6cd2-421f-9e38-3181200c301",
"condition": {
"conditionCode": [],
"onsetDate": [],
"abatementDate": [],
"bodySite": [],
},
"observation": {},
"medicationAdministration": {
"medicationCode": [
{
"value": {
"system": "http://www.nlm.nih.gov/research/umls/rxnorm",
"code": "3640",
"display": "doxycycline",
},
"dataSource": {"source": "comprehend"},
"confidence": 0.996650755405426,
"sourceText": {
"text": "doxycycline",
"location": {
"startIndex": 11,
"endIndex": 22,
},
},
}
],
"date": [],
"endDate": [],
"status": [
{
"value": "unknown",
"dataSource": {"source": "comprehend"},
"confidence": 0.9,
},
{
"value": "completed",
"dataSource": {"source": "comprehend"},
"confidence": 0.9,
},
{
"value": "in-progress",
"dataSource": {"source": "comprehend"},
"confidence": 0.9,
},
],
"dosage": [
{
"value": {
"id": "0",
"strength": None,
"dosage": None,
"duration": None,
"form": None,
"frequencey": None,
"rate": None,
"route": "po",
},
"dataSource": {"source": "comprehend"},
"confidence": 0.996650755405426,
"sourceText": {
"text": "po",
"location": {
"startIndex": 23,
"endIndex": 25,
},
},
}
],
},
}
],
"anchorDate": "2021-02-24T12:58:32.058Z",
"version": 4,
"suggestionId": "00022-00007-00001",
}
]
),
key="suggestions",
)
def test_medication_administration_expansion():
df = expand_medication_administrations(
frame_for_type(sample, "medicationAdministration")
)
pd.testing.assert_frame_equal(
df,
pd.DataFrame(
[
{
"anchorDate": "2021-02-24T12:58:32.058Z",
"version": 4,
"suggestionId": "00022-00007-00001",
"id": "728e79cd-6cd2-421f-9e38-3181200c301",
"status_value": "unknown",
"status_confidence": 0.9,
"status_dataSource_source": "comprehend",
"dosage_confidence": 0.996650755405426,
"dosage_dataSource_source": "comprehend",
"dosage_value_id": "0",
"dosage_value_strength": None,
"dosage_value_dosage": None,
"dosage_value_duration": None,
"dosage_value_form": None,
"dosage_value_frequencey": None,
"dosage_value_rate": None,
"dosage_value_route": "po",
"code_confidence": 0.996650755405426,
"code_dataSource_source": "comprehend",
"code_value_system": "http://www.nlm.nih.gov/research/umls/rxnorm",
"code_value_code": "3640",
"code_value_display": "doxycycline",
"dosage_sourceText": "po",
"code_sourceText": "doxycycline",
"type": "medicationAdministration",
},
{
"anchorDate": "2021-02-24T12:58:32.058Z",
"version": 4,
"suggestionId": "00022-00007-00001",
"id": "728e79cd-6cd2-421f-9e38-3181200c301",
"status_value": "completed",
"status_confidence": 0.9,
"status_dataSource_source": "comprehend",
"dosage_confidence": 0.996650755405426,
"dosage_dataSource_source": "comprehend",
"dosage_value_id": "0",
"dosage_value_strength": None,
"dosage_value_dosage": None,
"dosage_value_duration": None,
"dosage_value_form": None,
"dosage_value_frequencey": None,
"dosage_value_rate": None,
"dosage_value_route": "po",
"code_confidence": 0.996650755405426,
"code_dataSource_source": "comprehend",
"code_value_system": "http://www.nlm.nih.gov/research/umls/rxnorm",
"code_value_code": "3640",
"code_value_display": "doxycycline",
"dosage_sourceText": "po",
"code_sourceText": "doxycycline",
"type": "medicationAdministration",
},
{
"anchorDate": "2021-02-24T12:58:32.058Z",
"version": 4,
"suggestionId": "00022-00007-00001",
"id": "728e79cd-6cd2-421f-9e38-3181200c301",
"status_value": "in-progress",
"status_confidence": 0.9,
"status_dataSource_source": "comprehend",
"dosage_confidence": 0.996650755405426,
"dosage_dataSource_source": "comprehend",
"dosage_value_id": "0",
"dosage_value_strength": None,
"dosage_value_dosage": None,
"dosage_value_duration": None,
"dosage_value_form": None,
"dosage_value_frequencey": None,
"dosage_value_rate": None,
"dosage_value_route": "po",
"code_confidence": 0.996650755405426,
"code_dataSource_source": "comprehend",
"code_value_system": "http://www.nlm.nih.gov/research/umls/rxnorm",
"code_value_code": "3640",
"code_value_display": "doxycycline",
"dosage_sourceText": "po",
"code_sourceText": "doxycycline",
"type": "medicationAdministration",
},
]
),
)
| 45.668449 | 96 | 0.353396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,959 | 0.346487 |
10a4e5fa0983c2458d0d23edcfdb3d0d1a09e3d6 | 482 | py | Python | spacing/json_load.py | Pixir/Pixir | 63a6fc0728403af92eadf188f532f9f41cd9f912 | [
"MIT"
] | null | null | null | spacing/json_load.py | Pixir/Pixir | 63a6fc0728403af92eadf188f532f9f41cd9f912 | [
"MIT"
] | 1 | 2020-02-10T08:11:23.000Z | 2020-02-10T08:11:23.000Z | spacing/json_load.py | Pixir/Pixir | 63a6fc0728403af92eadf188f532f9f41cd9f912 | [
"MIT"
] | 3 | 2020-02-09T11:14:33.000Z | 2020-04-11T16:10:17.000Z | import json
from bs4 import BeautifulSoup
import re
for i in range(0, 39):
with open(f'./KorQuAD/korquad2.1_train_{i}.json', 'r', encoding='utf-8') as f:
js = json.load(f)
texts = []
for i in range(len(js['data'])):
text = js['data'][i]['raw_html']
soup = BeautifulSoup(text, 'html5lib')
text = soup.get_text()
text = re.sub('[^가-힣ㄱ-ㅣ\s]', '', text)
texts.append(text)
with open('korquad.txt', 'w', encoding='utf-8') as f:
f.writelines(texts)
| 25.368421 | 82 | 0.609959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.257143 |
10a59a24a85be6682daa683fce4544a51ae31094 | 312 | py | Python | auction/admin.py | AnthonyNicklin/newage-auctions | f829c9761e4fef0c084cf0244a4617a4bda8e0c2 | [
"FSFAP"
] | 1 | 2021-07-29T07:47:10.000Z | 2021-07-29T07:47:10.000Z | auction/admin.py | AnthonyNicklin/newage-auctions | f829c9761e4fef0c084cf0244a4617a4bda8e0c2 | [
"FSFAP"
] | 9 | 2019-12-19T21:27:23.000Z | 2022-01-13T01:59:10.000Z | auction/admin.py | AnthonyNicklin/newage-auctions | f829c9761e4fef0c084cf0244a4617a4bda8e0c2 | [
"FSFAP"
] | 1 | 2020-02-11T19:50:45.000Z | 2020-02-11T19:50:45.000Z | from django.contrib import admin
from .models import Auction, Lot, Bid
class BidAdmin(admin.ModelAdmin):
readonly_fields = (
'user',
'auction',
'bid_amount',
'bid_time',
)
admin.site.register(Auction)
admin.site.register(Lot)
admin.site.register(Bid, BidAdmin)
| 16.421053 | 37 | 0.644231 | 142 | 0.455128 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.11859 |
10a637ed134f858c509d1512e086fa81036f6f19 | 922 | py | Python | scale/source/apps.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 121 | 2015-11-18T18:15:33.000Z | 2022-03-10T01:55:00.000Z | scale/source/apps.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 1,415 | 2015-12-23T23:36:04.000Z | 2022-01-07T14:10:09.000Z | scale/source/apps.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 66 | 2015-12-03T20:38:56.000Z | 2020-07-27T15:28:11.000Z | """Defines the application configuration for the source application"""
from __future__ import unicode_literals
from django.apps import AppConfig
class SourceConfig(AppConfig):
"""Configuration for the source app
"""
name = 'source'
label = 'source'
verbose_name = 'Source'
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
# Register source file parse saver
from job.configuration.data.data_file import DATA_FILE_PARSE_SAVER
from source.configuration.source_data_file import SourceDataFileParseSaver
DATA_FILE_PARSE_SAVER['DATA_FILE_PARSE_SAVER'] = SourceDataFileParseSaver()
# Register source message types
from messaging.messages.factory import add_message_type
from source.messages.purge_source_file import PurgeSourceFile
add_message_type(PurgeSourceFile)
| 30.733333 | 83 | 0.724512 | 773 | 0.838395 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.341649 |
10a6aa884d2b424b836a35683369b782a937c1ff | 1,556 | py | Python | aim/web/api/views.py | admariner/aim | 4c143ea40acf3531abfa69f66503428d73d9fedc | [
"Apache-2.0"
] | 1 | 2021-07-19T19:21:30.000Z | 2021-07-19T19:21:30.000Z | aim/web/api/views.py | admariner/aim | 4c143ea40acf3531abfa69f66503428d73d9fedc | [
"Apache-2.0"
] | null | null | null | aim/web/api/views.py | admariner/aim | 4c143ea40acf3531abfa69f66503428d73d9fedc | [
"Apache-2.0"
] | null | null | null | import os
from aim.web.api.utils import APIRouter # wrapper for fastapi.APIRouter
from fastapi.responses import FileResponse
from aim.web.api.projects.project import Project
general_router = APIRouter()
@general_router.get('/static-files/{path:path}/')
async def serve_static_files(path):
from aim import web
static_file_name = os.path.join(os.path.dirname(web.__file__), 'ui', 'build', path)
compressed_file_name = '{}.gz'.format(static_file_name)
if os.path.exists(compressed_file_name):
return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'})
return FileResponse(static_file_name)
@general_router.get('/static/{exp_name}/{commit_hash}/media/images/{path}/')
async def serve_images(exp_name, commit_hash, path):
project = Project()
image_file = os.path.join(project.repo_path,
exp_name, commit_hash,
'objects', 'media', 'images',
path)
return FileResponse(image_file)
# do not change the placement of this method
# as it also serves as a fallback for wrong url routes
@general_router.get('/{path:path}/')
async def serve_index_html():
from aim import web
static_file_name = os.path.join(os.path.dirname(web.__file__), 'ui', 'build', 'index.html')
compressed_file_name = '{}.gz'.format(static_file_name)
if os.path.exists(compressed_file_name):
return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'})
return FileResponse(static_file_name)
| 37.95122 | 95 | 0.701157 | 0 | 0 | 0 | 0 | 1,240 | 0.796915 | 1,076 | 0.691517 | 347 | 0.223008 |
10a73cc97171571950fa706f1f5a0031f3cdc1f9 | 38,441 | py | Python | merceedge/core.py | merceedge/MerceEdge | b1448fbaed912e04e1c71d3f2101f2f297eca794 | [
"Apache-2.0"
] | 6 | 2019-01-25T09:03:27.000Z | 2021-02-17T14:30:06.000Z | merceedge/core.py | merceedge/MerceEdge | b1448fbaed912e04e1c71d3f2101f2f297eca794 | [
"Apache-2.0"
] | null | null | null | merceedge/core.py | merceedge/MerceEdge | b1448fbaed912e04e1c71d3f2101f2f297eca794 | [
"Apache-2.0"
] | null | null | null | import threading
import enum
import os
import sys
import copy
import json
import asyncio
import attr
import uuid
import functools
import datetime
import multiprocessing
from time import monotonic
import time
import copy
from collections import deque
from concurrent.futures import ThreadPoolExecutor
from async_timeout import timeout
from collections import namedtuple
from typing import ( # noqa: F401 pylint: disable=unused-import
Optional, Any, Callable, List, TypeVar, Dict, Coroutine, Set,
TYPE_CHECKING, Awaitable, Iterator)
from os.path import join
dir_path = os.path.dirname(os.path.realpath(__file__))
import merceedge.util as util
import merceedge.util.dt as dt_util
import merceedge.util.id as id_util
import merceedge.util.yaml as yaml_util
import merceedge.util.module as module_util
from merceedge.util.async_util import (
Context,
callback,
is_callback,
run_callback_threadsafe,
run_coroutine_threadsafe,
fire_coroutine_threadsafe,
CALLBACK_TYPE,
T
)
from merceedge.util.signal import async_register_signal_handling
from merceedge.exceptions import (
MerceEdgeError,
ComponentTemplateNotFound
)
from merceedge.const import (
MATCH_ALL,
EVENT_TIME_CHANGED,
EVENT_SERVICE_EXECUTED,
EVENT_CALL_SERVICE,
EVENT_STATE_CHANGED,
EVENT_TIMER_OUT_OF_SYNC,
EVENT_EDGE_STOP,
ATTR_NOW,
ATTR_DATE,
ATTR_TIME,
ATTR_SECONDS,
)
from merceedge.service import ServiceRegistry
from merceedge.providers import ServiceProviderFactory
from merceedge.api_server.models import (
ComponentDBModel,
WireDBModel
)
from merceedge.settings import (
logger_access,
logger_code,
logger_console
)
DOMAIN = "merceedge"
_LOGGER = logger_code
class MerceEdge(object):
"""Root object of Merce Edge node"""
def __init__(self, user_config):
self.user_config = user_config
self.loop = asyncio.get_event_loop()
executor_opts = {'max_workers': None} # type: Dict[str, Any]
if sys.version_info[:2] >= (3, 6):
executor_opts['thread_name_prefix'] = 'SyncWorker'
self.executor = ThreadPoolExecutor(**executor_opts)
self.loop.set_default_executor(self.executor)
self._pending_tasks = [] # type: list
self._track_task = True
self.exit_code = 0
# _async_stop will set this instead of stopping the loop
# self._stopped = asyncio.Event()
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.component_templates = {} # key: component template name
self.components = {} # key: component id
self.wires = {} # key: wire id
self.wireload_factory = WireLoadFactory(user_config)
def dyload_component(self, component_config):
"""dynamic load new component"""
# TODO
def start(self):
"""Start.
Note: This function is only used for testing.
For regular use, use "await edge.run()".
"""
# Register the async start
fire_coroutine_threadsafe(self.async_start(), self.loop)
# Run forever
try:
# Block until stopped
_LOGGER.info("Starting MerceEdge core loop")
self.loop.run_forever()
except KeyboardInterrupt:
# Optionally show a message if the shutdown may take a while
_LOGGER.info("Attempting graceful shutdown, press Ctrl+C again to exit…", flush=True)
# Do not show `asyncio.CancelledError` exceptions during shutdown
# (a lot of these may be generated, skip this if you prefer to see them)
def shutdown_exception_handler(loop, context):
if "exception" not in context \
or not isinstance(context["exception"], asyncio.CancelledError):
loop.default_exception_handler(context)
self.loop.set_exception_handler(shutdown_exception_handler)
# Handle shutdown gracefully by waiting for all tasks to be cancelled
tasks = asyncio.gather(*asyncio.Task.all_tasks(loop=self.loop), loop=self.loop, return_exceptions=True)
tasks.add_done_callback(lambda t: self.loop.stop())
tasks.cancel()
# Keep the event loop running until it is either destroyed or all
# tasks have really terminated
while not tasks.done() and not self.loop.is_closed():
self.loop.run_forever()
finally:
self.loop.close()
return self.exit_code
def stop(self):
fire_coroutine_threadsafe(self.async_stop(), self.loop)
def load_local_component_templates(self, config_yml_dict):
# 1. Absolute path 2. MERCE_EDGE_HOME path
try:
component_template_paths = config_yml_dict['component_template']['paths']
for path in component_template_paths:
ab_path = ''
if path.startswith('/') or path[1]==":":
ab_path = path
else:
ab_path = os.path.join(os.environ['MERCE_EDGE_HOME'], 'merceedge', path)
self._load_local_component_templates(ab_path)
except KeyError:
raise MerceEdgeError('config.yaml foramt invalide')
def _load_local_component_templates(self, component_template_path):
"""Read local component templates path, generate component template objects
"""
template_configs = []
template_configs += [each for each in os.listdir(component_template_path) if each.endswith('.yaml')]
for template_config in template_configs:
com_tmp_yaml = yaml_util.load_yaml(join(component_template_path, template_config))
# new_com_tmp = Component(com_tmp_yaml)
self.component_templates[com_tmp_yaml['component']['name']] = com_tmp_yaml
def _generate_component_instance(self, component_template_name, id=None, init_params=None):
"""Deepcopy component from component template
"""
com_tmp_yaml = self.component_templates.get(component_template_name, None)
if com_tmp_yaml:
if com_tmp_yaml['component'].get('virtual', False):
new_com_cls = self.wireload_factory.get_class(com_tmp_yaml['component']['name'])
new_com = new_com_cls(self, com_tmp_yaml, id, init_params)
else:
new_com = Component(self, com_tmp_yaml, id, init_params)
self.components[new_com.id] = new_com
return new_com
else:
# TODO logger warn no such name component compnent
pass
return None
def generate_component_instance(self, component_template_name, component_id, init_params=None):
""" Get component from self.components dict by id, if not exit, create new one, and
save into self.components
"""
component = self.components.get(component_id, None)
if component is None:
component = self._generate_component_instance(component_template_name, component_id, init_params)
if component:
return component
raise ComponentTemplateNotFound
async def connect_interface(self,
output_component_id, output_name,
input_component_id, input_name,
output_params={}, input_params={},
wire_id=None):
""" connenct wire
"""
output_sink = self.components[output_component_id].outputs[output_name]
input_slot = self.components[input_component_id].inputs[input_name]
wire = Wire(edge=self, output_sink=output_sink, input_slot=input_slot, id=wire_id)
wire.set_input_params(output_params)
wire.set_output_params(input_params)
# print(wire.output_sink.name, wire.output_sink, output_params, wire.output_sink.attrs)
# print(wire.input_slot.name, wire.input_slot, input_params, wire.input_slot.attrs)
self.wires[wire.id] = wire
await self.components[output_component_id].outputs[output_name].conn_output_sink(output_wire_params=output_params)
await self.components[input_component_id].inputs[input_name].conn_input_slot(input_wire_params=input_params)
wire.connect()
return wire
def delete_wire(self, wire_id):
"""Disconnect wire
"""
try:
wire = self.wires[wire_id]
wire.disconnect()
del self.wires[wire.id]
return wire
except KeyError:
return None
def stop_wireload_exec(self):
# for wireid, wire in self.wires.items():
# if wire.wire_load:
# wire.wire_load.is_stop = True
# TODO
pass
def restore_entities_from_db(self):
"""Restore components / wires from local db when edge start.
1. 获取所有的组件信息, 根据组件类型名称创建组件对象, 注意:组件的uuid从记录读取
2. 获取所有的连线信息,连接相关接口
"""
# TODO
# Restruct components
component_db_list = ComponentDBModel.query.all()
for component_db_record in component_db_list:
self._generate_component_instance(component_db_record.template_name,
component_db_record.uuid)
# Restruct wires
wire_db_list = WireDBModel.query.all()
for wire_db_record in wire_db_list:
try:
output_component_uuid = wire_db_record.output_component_uuid
input_component_uuid = wire_db_record.input_component_uuid
output_name = wire_db_record.output_name
input_name = wire_db_record.input_name
wire_id = wire_db_record.id
# TODO need modify
self.connect_interface(output_component_uuid, output_name,
input_component_uuid, input_name,
wire_id)
except KeyError:
# TODO logger warn
continue
async def load_formula(self, formula_path):
formula_yaml = yaml_util.load_yaml(formula_path)
try:
components = formula_yaml['components']
wires = formula_yaml['wires']
for component in components:
# TODO init component parameters
self.generate_component_instance(component['template'],
component['id'],
component.get('parameters', None))
for wire in wires:
# struct components
output_com = self.components[wire['output_sink']['component_id']]
input_com = self.components[wire['input_slot']['component_id']]
# struct wire
output_name = wire['output_sink']['output']
input_name = wire['input_slot']['input']
# wire interface paramaters
output_params = wire['output_sink'].get('parameters', {})
input_params = wire['input_slot'].get('parameters', {})
await self.connect_interface(output_com.id, output_name,
input_com.id, input_name,
output_params, input_params)
except KeyError as e:
_LOGGER.error("Load formula error, program exit!: {}".format(e))
sys.exit(-1)
except ComponentTemplateNotFound:
_LOGGER.error(ComponentTemplateNotFound.__str__)
def add_job(self, target: Callable[..., None], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def async_add_job(
self,
target: Callable[..., Any],
*args: Any) -> Optional[asyncio.Future]:
"""Add a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
task = None
# Check for partials to properly determine if coroutine function
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutine(check_target):
task = self.loop.create_task(target) # type: ignore
elif is_callback(check_target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(check_target):
# print('iscoroutinefunction {}'.format(check_target.__name__))
task = self.loop.create_task(target(*args))
else:
task = self.loop.run_in_executor( # type: ignore
None, target, *args)
# If a task is scheduled
if self._track_task and task is not None:
# print("5!!!")
self._pending_tasks.append(task)
return task
@callback
def async_run_job(self, target: Callable[..., None], *args: Any) -> None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if not asyncio.iscoroutine(target) and is_callback(target):
target(*args)
else:
self.async_add_job(target, *args)
@callback
def async_create_task(self, target: Coroutine) -> asyncio.tasks.Task:
"""Create a task from within the eventloop.
This method must be run in the event loop.
target: target to call.
"""
task = self.loop.create_task(target) # type: asyncio.tasks.Task
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_add_executor_job(
self,
target: Callable[..., T],
*args: Any) -> Awaitable[T]:
"""Add an executor job from within the event loop."""
task = self.loop.run_in_executor(
None, target, *args)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_track_tasks(self) -> None:
"""Track tasks so you can wait for all tasks to be done."""
self._track_task = True
@callback
def async_stop_track_tasks(self) -> None:
"""Stop track tasks so you can't wait for all tasks to be done."""
self._track_task = False
def block_till_done(self) -> None:
"""Block till all pending work is done."""
run_coroutine_threadsafe(
self.async_block_till_done(), loop=self.loop).result()
async def async_block_till_done(self) -> None:
"""Block till all pending work is done."""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
while self._pending_tasks:
_LOGGER.debug("async_block_till_done -----")
pending = [task for task in self._pending_tasks
if not task.done()]
self._pending_tasks.clear()
_LOGGER.debug(pending)
if pending:
_LOGGER.debug('pending')
await asyncio.wait(pending)
else:
_LOGGER.debug('no pending')
await asyncio.sleep(0)
async def async_run(self) -> int:
""" MerceEdge main entry point.
Start and block until stopped.
This method is a coroutine.
"""
# _async_stop will set this instead of stopping the loop
self._stopped = asyncio.Event()
await self.async_start()
async_register_signal_handling(self)
_LOGGER.debug("self._stopped.wait() start")
print(self._stopped)
await self._stopped.wait()
_LOGGER.debug("self._stopped.wait() stop")
return self.exit_code
async def async_start(self) -> None:
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
# _LOGGER.info("Starting Merce Edge")
setattr(self.loop, '_thread_ident', threading.get_ident())
# self.bus.async_fire(EVENT_HOMEASSISTANT_START)
try:
# Only block for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
with timeout(15):
await self.async_block_till_done()
except asyncio.TimeoutError:
# TODO warning
pass
# _LOGGER.warning(
# 'Something is blocking Home Assistant from wrapping up the '
# 'start up phase. We\'re going to continue anyway. Please '
# 'report the following info at http://bit.ly/2ogP58T : %s',
# ', '.join(self.config.components))
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0)
# if self.state != CoreState.starting:
# _LOGGER.warning(
# 'Home Assistant startup has been interrupted. '
# 'Its state may be inconsistent.')
# return
# self.state = CoreState.running
_async_create_timer(self)
async def async_stop(self, exit_code: int = 0, *,
force: bool = False) -> None:
"""Stop MerceEdge and shuts down all threads.
The "force" flag commands async_stop to proceed regardless of
Home Assistan't current state. You should not set this flag
unless you're testing.
This method is a coroutine.
"""
_LOGGER.debug("Stop all wire load execution...")
self.stop_wireload_exec()
self.async_track_tasks()
self.bus.async_fire(EVENT_EDGE_STOP)
await self.async_block_till_done()
self.executor.shutdown()
_LOGGER.debug('MerceEdge loop stop...')
self.loop.stop()
def wireload_emit_output_payload(self, output_name, emit_call, payload):
self.add_job(emit_call)
class Entity(object):
"""ABC for Merce Edge entity(Component, Interface, etc.)"""
id = id_util.generte_unique_id()
attrs = {}
def load_attrs(self, config):
# TODO
raise NotImplementedError
def get_attrs(self, attr_key):
try:
return self.attrs.get(attr_key)
except KeyError as e:
_LOGGER.error(str(e))
return None
def set_attrs(self, _attrs):
self.attrs.update(_attrs)
class Component(Entity):
"""ABC for Merce Edge components"""
def __init__(self, edge, model_template_config, id=None, init_params=None):
"""
model_template_config: yaml object
"""
self.edge = edge
self.model_template_config = model_template_config
self.id = id or id_util.generte_unique_id()
self.inputs = {}
self.outputs = {}
self.init_params = init_params or {}
# self.components = {}
# init interfaces
self._init_interfaces()
@property
def parameters(self):
return self.init_params
@parameters.setter
def parameters(self, params):
self.init_params = params
def _init_interfaces(self):
"""initiate inputs & outputs
"""
inputs = self.model_template_config['component'].get('inputs', None)
if inputs:
for _input in inputs:
self.inputs[_input['name']] = Input(edge=self.edge,
name=_input['name'],
component=self,
attrs=_input['protocol'],
propreties=_input.get('propreties', None))
outputs = self.model_template_config['component'].get('outputs', None)
if outputs:
for _output in outputs:
self.outputs[_output['name']] = Output(edge=self.edge,
name=_output['name'],
component=self,
attrs=_output['protocol'],
propreties=_output.get('propreties', None))
def get_start_wires_info(self):
""" Get wires infomation that start from component
"""
wires = []
for output in self.outputs:
for wire in output.output_wires:
# TODO
pass
return wires
class Interface(Entity):
"""Interface ABC
1. Read configuration file and load interface using service(eg: mqtt service).
2. Listen message from EventBus, or call fire event provide by service(eg: mqtt service).
"""
def __init__(self, edge, name, component,
attrs=None, propreties=None):
self.edge = edge
self.name = name
self.component = component
self.propreties = propreties or {}
self.attrs = attrs or {}
self._set_protocol()
def _set_protocol(self):
self.protocol = self.attrs.get('name', 'virtual_interface')
class Output(Interface):
"""Virtual output interface, receive data from real world
"""
def __init__(self, edge, name, component, attrs=None, propreties=None):
super(Output, self).__init__(edge, name, component, attrs, propreties)
self.output_wires = {}
self.data = {}
# read output configuration
# print("init output {} {}".format(name, protocol))
self._init_provider()
def wires_info(self):
info = {}
for wire_id, wire in self.output_wires.items():
info[wire_id] = wire.__repr__()
return info
def add_wire(self, wire):
"""Add new wire"""
self.output_wires[wire.id] = wire
def del_wire(self, wire_id):
"""Remove wire
"""
self.provider.disconn_output_sink(self)
del self.output_wires[wire_id]
def _init_provider(self):
try:
self.provider = ServiceProviderFactory.get_provider(self.protocol)
_LOGGER.debug("Output {} load provider {}".format(self.name, self.provider))
# if self.provider:
# self.provider.new_instance_setup(self.name, self.attrs, True)
# self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
except KeyError as e:
# log no such provider key error
_LOGGER.error("Cannot load {} provider".format(self.protocol))
raise
async def conn_output_sink(self, output_wire_params={}):
""" register EventBus listener"""
self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
await self.provider.conn_output_sink(output=self,
output_wire_params=output_wire_params,
callback=self.output_sink_callback)
def output_sink_callback(self, event):
"""Send output Event"""
# 发送wirefire Event(连线的时候Wire的Output需要注册Input的wirefire事件)
wirefire_event_type = "wirefire_{}_{}".format(self.component.id, self.name)
self.edge.bus.fire(wirefire_event_type, event.data)
class Input(Interface):
"""Input"""
def __init__(self, edge, name, component, attrs=None, propreties=None):
super(Input, self).__init__(edge, name, component, attrs, propreties)
self.input_wires = {}
self._init_provider()
def wires_info(self):
info = {}
for wire_id, wire in self.input_wires.items():
info[wire_id] = wire.__repr__()
return json.dumps(info)
def add_wire(self, wire):
"""Add new wire"""
self.input_wires[wire.id] = wire
def del_wire(self, wire_id):
"""Remove wire
"""
del self.input_wires[wire_id]
def _init_provider(self):
try:
self.provider = ServiceProviderFactory.get_provider(self.protocol)
# self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
except KeyError:
# TODO log no such provider key error
raise
async def conn_input_slot(self, input_wire_params={}):
self.edge.add_job(self.provider.async_setup, self.edge, self.attrs)
await self.provider.conn_input_slot(self, input_wire_params)
async def emit_data_to_input(self, event):
# Emit data to EventBus and invoke configuration service send data function.
await self.provider.emit_input_slot(self, event.data)
class State(object):
"""Component State"""
# raise NotImplementedError
# TODO
pass
class Wire(Entity):
"""Wire """
def __init__(self, edge: MerceEdge, output_sink: Output, input_slot: Input, id=None):
self.edge = edge
self.id = id or id_util.generte_unique_id()
self.input = output_sink
self.output = input_slot
self.input_params = dict()
self.output_params = dict()
self.input.add_wire(self)
self.output.add_wire(self)
def connect(self):
outcom_id = self.output_sink.component.id
out_name = self.output_sink.name
wirefire_event_type = "wirefire_{}_{}".format(outcom_id, out_name)
self.edge.bus.async_listen(wirefire_event_type, self.input_slot.emit_data_to_input)
def _add_input(self, output_sink: Output):
output_sink.add_wire(self)
def _add_output(self, input_slot: Input):
input_slot.add_wire(self)
@property
def output_sink(self):
return self.input
@property
def input_slot(self):
return self.output
def __repr__(self):
wire_info = {}
wire_info["input"] = {"component_id": self.input.component.id,
"name": self.input.name}
wire_info["output"] = {"component_id": self.output.component.id,
"name": self.output.name}
return wire_info
def set_input_params(self, parameters):
self.input_params = parameters
self.input.set_attrs(parameters)
def set_output_params(self, parameters):
self.output_params = parameters
self.output.set_attrs(parameters)
def disconnect(self):
self.input.del_wire(self.id)
self.output.del_wire(self.id)
class WireLoadFactory:
def __init__(self, config):
"""
config: user configuration
"""
self._classes = {}
paths = config['wireload']['paths']
self._load(paths)
def _load(self, paths):
"""Walk throuth path and load WireLoad subclass
"""
classes = {}
for path in paths:
path = os.path.join(dir_path, path)
classes = module_util.load_modules(path, WireLoad)
self._classes.update(classes)
_LOGGER.debug("Load wireloads modules: {}".format(self._classes))
def get_class(self, wireload_name):
return self._classes.get(wireload_name, None)
class WireLoad(Component):
"""Wire load abstract class. Mounted on wire, processing data through wire.
Filter, Analiysis, Process, etc.
"""
name = ''
def __init__(self, edge, model_template_config, component_id=None, init_params=None):
super(WireLoad, self).__init__(edge, model_template_config, id=component_id, init_params=init_params)
self.input_q = asyncio.Queue(maxsize=3, loop=self.edge.loop)
self.output_q = asyncio.Queue(maxsize=3, loop=self.edge.loop)
self.is_stop = False
self.emit_output_call = self.emit_output_payload
def before_run_setup(self):
"""Need implemented"""
raise NotImplementedError
async def put_input_payload(self, payload):
await self.input_q.put(payload)
self.edge.add_job(self.run)
async def put_output_payload(self, output_name, payload):
await self.output_q.put((output_name, payload))
self.edge.wireload_emit_output_payload(output_name, self.emit_output_call, payload)
def process(self, input_payload):
"""Need implemented"""
raise NotImplementedError
async def run(self):
while True:
if self.is_stop:
_LOGGER.debug("stop wireload------------")
break
input_payload = await self.input_q.get()
await self.process(input_payload)
del input_payload
# if result:
# await self.output_q.put(result)
# self.edge.add_job(self.emit_output_payload)
async def emit_output_payload(self):
output_payload = await self.output_q.get()
try:
if output_payload:
# self.outputs[output_payload[0]].output_sink_callback(output_payload[1])
event_type = "{}_{}_{}".format("virtual_wire_event", self.id, output_payload[0])
self.edge.bus.async_fire(event_type, output_payload[1])
except KeyError as e:
_LOGGER.warn("Cannot find output: {}".format(e))
class Event(object):
# pylint: disable=too-few-public-methods
"""Represents an event within the Bus."""
__slots__ = ['event_type', 'data', 'time_fired', 'context']
def __init__(self, event_type: str, data: Optional[Dict] = None,
time_fired: Optional[int] = None,
context: Optional[Context] = None) -> None:
"""Initialize a new event."""
self.event_type = event_type
# TODO
self.data = data
self.time_fired = time_fired or dt_util.utcnow()
self.context = context or Context()
def as_dict(self) -> Dict:
"""Create a dict representation of this Event."""
return {
'event_type': self.event_type,
'data': dict(self.data),
'time_fired': self.time_fired,
'context': self.context.as_dict()
}
def __repr__(self) -> str:
# pylint: disable=maybe-no-member
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}: {}>".format(
self.event_type,
util.repr_helper(self.data))
return "<Event {}>".format(self.event_type)
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
return (self.__class__ == other.__class__ and # type: ignore
self.event_type == other.event_type and
self.data == other.data and
self.time_fired == other.time_fired and
self.context == other.context)
class EventBus(object):
"""Allows firing of and listening for events.
NOTE: This part of code references home-assistant and chage a little.
"""
def __init__(self, edge: MerceEdge) -> None:
"""Initialize a new event bus."""
self._listeners = {} # type: Dict[str, List[Callable]]
self.edge = edge
@callback
def async_listeners(self) -> Dict[str, int]:
"""Dict with events and the number of listeners."""
return {key: len(self._listeners[key])
for key in self._listeners}
@property
def listeners(self) -> Dict[str, int]:
"""Dict with events and the number of listeners.
"""
return run_callback_threadsafe( # type: ignore
self.edge.loop, self.async_listeners
).result()
def fire(self, event_type: str, event_data: Optional[Dict] = None,
context: Optional[Context] = None) -> None:
"""Fire an event."""
self.edge.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, context)
@callback
def async_fire(self, event_type: str, event_data: Optional[Dict] = None,
context: Optional[Context] = None) -> None:
"""Fire an event.
This method must be run in the event loop
"""
# _LOGGER.info("async_fire: {}".format(event_type))
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
if (match_all_listeners is not None):
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, None, context)
# if event_type != EVENT_TIME_CHANGED:
# _LOGGER.debug("Bus:Handling %s", event)
if not listeners:
return
for func in listeners:
self.edge.async_add_job(func, event)
def listen(
self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
self.edge.loop, self.async_listen, event_type, listener).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(
self.edge.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
This method must be run in the event loop.
"""
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def remove_listener() -> None:
"""Remove the listener."""
self._async_remove_listener(event_type, listener)
return remove_listener
def listen_once(
self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
async_remove_listener = run_callback_threadsafe(
self.edge.loop, self.async_listen_once, event_type, listener,
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(
self.edge.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen_once(
self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
@callback
def onetime_listener(event: Event) -> None:
"""Remove listener from event bus and then fire listener."""
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(onetime_listener, 'run', True)
self._async_remove_listener(event_type, onetime_listener)
self.edge.async_run_job(listener, event)
return self.async_listen(event_type, onetime_listener)
@callback
def _async_remove_listener(
self, event_type: str, listener: Callable) -> None:
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.warning("Unable to remove unknown listener %s", listener)
def _async_create_timer(edge) -> None:
"""Create a timer that will start on EVENT_EDGE_START."""
handle = None
def schedule_tick(now: datetime.datetime) -> None:
"""Schedule a timer tick when the next second rolls around."""
nonlocal handle
slp_seconds = 1 - (now.microsecond / 10**6)
target = monotonic() + slp_seconds
handle = edge.loop.call_later(slp_seconds, fire_time_event, target)
@callback
def fire_time_event(target: float) -> None:
"""Fire next time event."""
now = dt_util.utcnow()
edge.bus.async_fire(EVENT_TIME_CHANGED,
{ATTR_NOW: now})
# If we are more than a second late, a tick was missed
late = monotonic() - target
if late > 1:
edge.bus.async_fire(EVENT_TIMER_OUT_OF_SYNC,
{ATTR_SECONDS: late})
schedule_tick(now)
@callback
def stop_timer(_: Event) -> None:
"""Stop the timer."""
if handle is not None:
handle.cancel()
edge.bus.async_listen_once(EVENT_EDGE_STOP, stop_timer)
_LOGGER.info("Timer:starting")
schedule_tick(dt_util.utcnow()) | 35.429493 | 122 | 0.603782 | 35,575 | 0.922134 | 0 | 0 | 7,513 | 0.194743 | 8,025 | 0.208015 | 10,141 | 0.262863 |
10a97bd58b8abf9c9e02efdadaee1fda99449912 | 549 | py | Python | trinitee/manage.py | chaosk/trinitee | 30190c7e8ff2164c1127e26e733e0b65a9cd1f57 | [
"BSD-3-Clause"
] | 1 | 2017-04-20T18:39:48.000Z | 2017-04-20T18:39:48.000Z | trinitee/manage.py | chaosk/trinitee | 30190c7e8ff2164c1127e26e733e0b65a9cd1f57 | [
"BSD-3-Clause"
] | null | null | null | trinitee/manage.py | chaosk/trinitee | 30190c7e8ff2164c1127e26e733e0b65a9cd1f57 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory"
" containing %r. It appears you've customized things.\n"
"You'll have to run django-admin.py, passing it your settings module.\n"
"(If the file settings.py does indeed exist,"
"it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings) | 36.6 | 77 | 0.744991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.622951 |
10aab83e6e464c961b41023a7a95e718594c6eb3 | 3,981 | py | Python | src/storage.py | stivenramireza/images-resizer-app | 1dcce0bb9de6f1bd131f3d7d2bdd0d56b539ffa9 | [
"MIT"
] | null | null | null | src/storage.py | stivenramireza/images-resizer-app | 1dcce0bb9de6f1bd131f3d7d2bdd0d56b539ffa9 | [
"MIT"
] | null | null | null | src/storage.py | stivenramireza/images-resizer-app | 1dcce0bb9de6f1bd131f3d7d2bdd0d56b539ffa9 | [
"MIT"
] | 1 | 2022-01-14T07:42:17.000Z | 2022-01-14T07:42:17.000Z | import os
import sys
import shutil
import asyncio
import aioboto3
from glob import glob
from PIL import Image
from fnmatch import fnmatch
from src.secrets import (
SPACES_REGION,
SPACES_BUCKET,
SPACES_PREFIX,
SPACES_ENDPOINT_URL,
SPACES_ACCESS_KEY,
SPACES_SECRET_KEY
)
from src.format import (
get_filename,
get_image_id
)
from src.logger import logger
LOCAL_IMAGES_PATH = sys.path[0]
async def download_file(key, bucket):
if not key.endswith('/'):
await bucket.download_file(key, key)
elif not os.path.exists(key):
os.makedirs(key)
async def download_files(bucket, prefix):
async with aioboto3.resource('s3',
region_name=SPACES_REGION,
endpoint_url=SPACES_ENDPOINT_URL,
aws_access_key_id=SPACES_ACCESS_KEY,
aws_secret_access_key=SPACES_SECRET_KEY) as resource:
bucket = await resource.Bucket(bucket)
tasks = [asyncio.ensure_future(download_file(s3_obj.key, bucket)) async for s3_obj in
bucket.objects.filter(Prefix=prefix)]
await asyncio.gather(*tasks)
async def download_images():
try:
await download_files(SPACES_BUCKET, SPACES_PREFIX)
logger.info(f'Images from S3 have been downloaded successfully')
except Exception as error:
logger.error(f'Error to download images from S3: {error}')
raise
async def upload_file(subdir, file, image, bucket):
if fnmatch(file, f'{image.height}*.jpg'):
full_path = os.path.join(subdir, file)
with open(full_path, 'rb') as data:
await bucket.put_object(ACL='public-read', Key=full_path[len(LOCAL_IMAGES_PATH) + 1:], Body=data,
ContentType='image/jpg')
async def upload_files(bucket, prefix, image):
tasks = []
async with aioboto3.resource('s3',
region_name=SPACES_REGION,
endpoint_url=SPACES_ENDPOINT_URL,
aws_access_key_id=SPACES_ACCESS_KEY,
aws_secret_access_key=SPACES_SECRET_KEY) as resource:
bucket = await resource.Bucket(bucket)
for subdir, dirs, files in os.walk(LOCAL_IMAGES_PATH + f'/{prefix}'):
for file in files:
tasks.append(asyncio.ensure_future(upload_file(subdir, file, image, bucket)))
await asyncio.gather(*tasks)
async def upload_images(image):
try:
await upload_files(SPACES_BUCKET, SPACES_PREFIX, image)
logger.info('Images have been uploaded successfully into S3')
except Exception as error:
logger.error(f'Error to upload new images sizes to S3: {error}')
raise
async def get_local_images():
images = []
for filename in glob(LOCAL_IMAGES_PATH + f'/{SPACES_PREFIX}/*/720*.jpg'):
img = Image.open(filename)
image = {
"content": img,
"image_id": get_image_id(filename),
"filename": get_filename(filename)
}
images.append(image)
return images
async def save_local_images(resized_images):
try:
for (i, new_image) in enumerate(resized_images):
new_image['content'].save('{}/{}{}/{}{}'.format(LOCAL_IMAGES_PATH, SPACES_PREFIX, new_image['image_id'],
new_image['content'].height, new_image['filename']))
except Exception as error:
logger.error(f'Error to save images in local directories: {error}')
raise
async def remove_local_images():
path = os.path.join(LOCAL_IMAGES_PATH, 'test')
try:
if os.path.exists(path):
shutil.rmtree(path)
logger.info('Local images directory has been removed successfully')
except shutil.Error as error:
logger.error(f'Error to remove local images directory: {error}')
raise | 35.864865 | 116 | 0.626225 | 0 | 0 | 0 | 0 | 0 | 0 | 3,542 | 0.889726 | 540 | 0.135644 |
10ab0866f0c98cd274e552476ba7f587e6c76b7b | 17,599 | py | Python | sdbms/core/_parser.py | xSkyripper/simple-fs-dbms | 731621540eebcf36e7edb230c56304b9674bdd45 | [
"Apache-2.0"
] | null | null | null | sdbms/core/_parser.py | xSkyripper/simple-fs-dbms | 731621540eebcf36e7edb230c56304b9674bdd45 | [
"Apache-2.0"
] | null | null | null | sdbms/core/_parser.py | xSkyripper/simple-fs-dbms | 731621540eebcf36e7edb230c56304b9674bdd45 | [
"Apache-2.0"
] | null | null | null | import re
import operator
from collections import namedtuple
SCHEMA_TYPES = {'str', 'int', 'bool'}
ROWID_KEY = '_rowid'
class Literal(namedtuple('Literal', 'value')):
@classmethod
def eval_value(cls, value):
if not isinstance(value, str):
raise ValueError(f"Parameter {value} must be a str")
if value in ('True', 'False'):
return eval(value)
try:
return int(value)
except Exception:
pass
try:
return eval(value)
except Exception:
pass
raise ValueError(f'Paramater {value} is not valid')
def __new__(cls, value):
evaled_value = cls.eval_value(value)
return super().__new__(cls, evaled_value)
class Column(namedtuple('Column', 'name')):
pass
class Comparison(namedtuple('Comparison', 'left, op, right')):
ops = {
'=': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge
}
def match(self, row):
if type(self.left) is Column:
left = Literal(row[self.left.name]).value
elif type(self.left) is Literal:
left = self.left.value
else:
raise ValueError(f'Invalid left value type; {self.left}')
if type(self.right) is Column:
right = Literal(row[self.right.name]).value
elif type(self.right) is Literal:
right = self.right.value
else:
raise ValueError(f'Invalid right value type; {self.left}')
return self.ops[self.op](left, right)
class ConditionList(namedtuple('ConditionList', 'comp_type, comparisons')):
types = {'or': any, 'and': all}
def match(self, row):
if not self.comp_type:
return True
return self.types[self.comp_type](comp.match(row)
for comp in self.comparisons)
class CreateDbCmd(namedtuple('CreateDbCmd', 'name')):
def execute(self, db_manager):
db_manager.create_db(self.name)
class UseDbCmd(namedtuple('UseDbCmd', 'name')):
def execute(self, db_manager):
db_manager.use_db(self.name)
class DeleteDbCmd(namedtuple('DeleteDbCmd', 'name')):
def execute(self, db_manager):
db_manager.delete_db(self.name)
class CreateTableCmd(namedtuple('CreateTableCmd', 'name, schema')):
def validate(self):
if set(self.schema.values()) - SCHEMA_TYPES:
raise CommandError(f'Only schema accepted types are {SCHEMA_TYPES}')
def execute(self, db_manager):
self.validate()
db_manager.create_table(name=self.name, schema=self.schema)
class DeleteTableCmd(namedtuple('DeleteTableCmd', 'name')):
def execute(self, db_manager):
db_manager.delete_table(name=self.name)
class AddColumnCmd(namedtuple('AddColumnCmd', 'name, col_type, col_name')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.name)
if self.col_name in schema:
raise CommandError(f'{self.col_name} col is already existing')
if self.col_type not in SCHEMA_TYPES:
raise CommandError(f'Only schema accepted types are {SCHEMA_TYPES}')
def execute(self, db_manager):
self.validate(db_manager)
db_manager.add_column(name=self.name,
col_name=self.col_name, col_type=self.col_type)
class DelColumnCmd(namedtuple('DelColumnCmd', 'name, col_name')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.name)
if self.col_name not in schema:
raise CommandError(f'Col {self.col_name} does not exist')
def execute(self, db_manager):
self.validate(db_manager)
db_manager.del_column(name=self.name, col_name=self.col_name)
def validate_cmd_row_values(schema={}, row={}):
for col_name, col_val in row.items():
lit_val = Literal(col_val)
needed_col_type = eval(schema[col_name])
if not isinstance(lit_val.value, needed_col_type):
raise CommandError(f'Col\'s {col_name} value {col_val} has to be {schema[col_name]}')
class InsertCmd(namedtuple('InsertCmd', 'table, row')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
if self.row.keys() != schema.keys():
raise CommandError(f'Schema {schema.keys()} is mandatory')
validate_cmd_row_values(schema=schema, row=self.row)
def execute(self, db_manager):
self.validate(db_manager)
db_manager.insert_row(table=self.table, row=self.row)
def validate_cmd_conditions_list(schema={}, conditions_list=[]):
for comparison in conditions_list.comparisons:
col = comparison.left
lit = comparison.right
needed_col_type = eval(schema[col.name])
if col.name not in schema:
raise CommandError(f'Col {col.name} in conditions does not exist in schema')
if not isinstance(lit.value, needed_col_type):
raise CommandError(f'Col\'s {col.name} value {lit.value} has to be {schema[col.name]}')
class QueryCmd(namedtuple('QueryCmd', 'table, projection, conditions_list')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
if self.projection[0] != '*':
if set(self.projection) - set(schema.keys()):
raise CommandError(f'Query projection is enforced by schema; Only {schema.keys()} or * are allowed')
validate_cmd_conditions_list(schema=schema,
conditions_list=self.conditions_list)
def execute(self, db_manager):
self.validate(db_manager)
star_proj = len(self.projection) == 1 and self.projection[0] == '*'
for row in db_manager.scan_rows(table=self.table):
if self.conditions_list.match(row):
result_row = {ROWID_KEY: row[ROWID_KEY]}
del row[ROWID_KEY]
for key, val in row.items():
if not star_proj:
if key in self.projection:
result_row[key] = Literal(val).value
else:
result_row[key] = Literal(val).value
yield result_row
class DeleteCmd(namedtuple('DeleteCmd', 'table, conditions_list')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
validate_cmd_conditions_list(schema, self.conditions_list)
def execute(self, db_manager):
self.validate(db_manager)
for row in db_manager.scan_rows(table=self.table):
if self.conditions_list.match(row):
db_manager.delete_row(table=self.table, rowid=row['_rowid'])
class UpdateCmd(namedtuple('UpdateCmd', 'table, values, conditions_list')):
def validate(self, db_manager):
schema = db_manager.get_table_schema(table_name=self.table)
validate_cmd_row_values(schema=schema, row=self.values)
validate_cmd_conditions_list(schema=schema,
conditions_list=self.conditions_list)
def execute(self, db_manager):
self.validate(db_manager)
for row in db_manager.scan_rows(table=self.table):
if self.conditions_list.match(row):
db_manager.update_row(table=self.table,
rowid=row['_rowid'], new_row=self.values)
class FromCsvCmd(namedtuple('FromCsvCmd', 'csv_path')):
def execute(self, db_manager):
db_manager.from_csv(csv_path=self.csv_path)
class ToCsvCmd(namedtuple('ToCsvCmd', 'csv_path')):
def execute(self, db_manager):
db_manager.to_csv(csv_path=self.csv_path)
class SchemaCmd(namedtuple('FromCsvCmd', 'table_name')):
def execute(self, db_manager):
schema = db_manager.get_table_schema(self.table_name)
return schema
class TablesCmd(namedtuple('TablesCmd', 'db_name')):
def execute(self, db_manager):
yield from db_manager.get_tables(db_name=self.db_name)
class DbCmd(namedtuple('DbCmd', '')):
def validate(self, db_manager):
pass
def execute(self, db_manager):
self.validate(db_manager)
current_db = db_manager.get_current_db()
return current_db
class CommandError(Exception):
""" Generic command error """
def __init__(self, message):
super(CommandError, self).__init__(message)
class QueryParser(object):
re_db_create = re.compile(r'^create\s+sdb\s+(?P<name>\w+);$')
re_db_use = re.compile(r'^use\s+sdb\s+(?P<name>\w+);$')
re_db_delete = re.compile(r'^delete\s+sdb\s+(?P<name>\w+);$')
re_table_create_main = re.compile(r'^create\s+table\s+(?P<name>\w+)\s+columns\s+(?P<columns>((int|str|bool):(\w+)\s?)+);$')
re_table_create_col = re.compile(r'(int|str|bool):(\w+)')
re_table_delete = re.compile(r'^delete\s+table\s+(?P<name>\w+);$')
re_table_add_column = re.compile(r'^change\s+table\s+(?P<name>\w+)\s+add\s+column\s+(?P<col_type>int|str|bool):(?P<col_name>\w+);$')
re_table_del_column = re.compile(r'^change\s+table\s+(?P<name>\w+)\s+del\s+column\s+(?P<col_name>\w+);$')
re_table_insert_main = re.compile(r'^insert\s+into\s+(?P<table_name>\w+)\s+values\s+(?P<values>(\w+=(True|False|\d+?|\"(\w|[\/\<\>:`~.,?!@;\'#$%\^&*\-_+=\[\{\]\}\\\|()\ ])*?\")\s?)+?);$')
re_table_values = re.compile(r'(\w+)=(True|False|(\d+)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")')
re_where_conditions = re.compile(r'(?P<col_name>\w+?)(?P<op>=|!=|<|>|<=|>=)(?P<value>(\d+)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")')
re_table_scan_rows = re.compile(r'^query\s+(?P<projection>\*|(\w+\,?)+?)\s+(?P<table_name>\w+)(\s+where\s+op:(?P<op>or|and)\s+conditions\s+(?P<conditions>((\w+?)(=|!=|<|>|<=|>=)((\d+?)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")(\s+)?)+))?;$')
re_table_update_rows = re.compile(r'^update\s+(?P<table_name>\w+)\s+set\s+(?P<setters>(((\w+)=(True|False|(\d+)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\"))\s?)+)(\s+where\s+op:(?P<op>or|and)\s+conditions\s+(?P<conditions>((\w+?)(=|!=|<|>|<=|>=)((\d+?)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")(\s+)?)+))?;$')
re_table_delete_rows = re.compile(r'^delete\s+in\s+(?P<table_name>\w+)(\s+where\s+op:(?P<op>or|and)\s+conditions\s+(?P<conditions>((\w+?)(=|!=|<|>|<=|>=)((\d+?)|(True|False)|\"([A-Za-z0-9\/\<\>\:\`\~\.\,\?\!\@\;\'\#\$\%\^\&\*\-\_\+\=\[\{\]\}\\\|\(\)\ ])*?\")(\s+)?)+))?;$')
re_from_csv = re.compile(r'^from\s+csv\s+(?P<csv_path>[^ ]+?\.csv)\s*?;$')
re_to_csv = re.compile(r'^to\s+csv\s+(?P<csv_path>[^ ]+?\.csv)\s*?;$')
re_schema = re.compile(r'^schema\s+(?P<table_name>\w+)\s*?;$')
re_tables = re.compile(r'^tables\s+(?P<db_name>\w+)\s*?;$')
re_db = re.compile(r'^db\s*?;$')
def __init__(self):
pass
def _get_parse_methods(self):
for meth_name in dir(self.__class__):
meth = getattr(self.__class__, meth_name)
if meth_name.startswith('_parse') and callable(meth):
yield meth
def parse(self, query):
for meth in self._get_parse_methods():
rv = meth(self, query)
if rv is not None:
return rv
raise CommandError('No command matches; fix or retry (another) query')
def _parse_db_create(self, query):
result = self.re_db_create.fullmatch(query)
if not result:
return
return CreateDbCmd(name=result.group('name'))
def _parse_db_use(self, query):
result = self.re_db_use.fullmatch(query)
if not result:
return
return UseDbCmd(name=result.group('name'))
def _parse_db_delete(self, query):
result = self.re_db_delete.fullmatch(query)
if not result:
return
return DeleteDbCmd(name=result.group('name'))
def _parse_table_create(self, query):
result_main = self.re_table_create_main.fullmatch(query)
if not result_main:
return
name = result_main.group('name')
columns_str = result_main.group('columns')
result_cols = self.re_table_create_col.findall(columns_str)
if not result_cols:
return
schema = {col_name:col_type for col_type, col_name in result_cols}
return CreateTableCmd(name=name, schema=schema)
def _parse_table_delete(self, query):
result = self.re_table_delete.fullmatch(query)
if not result:
return
return DeleteTableCmd(name=result.group('name'))
def _parse_add_column(self, query):
result = self.re_table_add_column.fullmatch(query)
if not result:
return
name = result.group('name')
col_type = result.group('col_type')
col_name = result.group('col_name')
return AddColumnCmd(name=name, col_type=col_type, col_name=col_name)
def _parse_del_column(self, query):
result = self.re_table_del_column.fullmatch(query)
if not result:
return
name = result.group('name')
col_name = result.group('col_name')
return DelColumnCmd(name=name, col_name=col_name)
def _parse_insert_row(self, query):
result_main = self.re_table_insert_main.fullmatch(query)
if not result_main:
return
name = result_main.group('table_name')
values_str = result_main.group('values')
result_values = self.re_table_values.findall(values_str)
if not result_values:
return
row = {col_name:col_value
for col_name, col_value, _, _ in result_values}
return InsertCmd(table=name, row=row)
def _parse_scan_rows(self, query):
result_main = self.re_table_scan_rows.fullmatch(query)
if not result_main:
return
projection = result_main.group('projection').split(',')
name = result_main.group('table_name')
main_op = result_main.group('op')
conditions_str = result_main.group('conditions')
conditions = ConditionList('', [])
if conditions_str:
result_conditions = self.re_where_conditions.findall(conditions_str)
conditions = ConditionList(main_op,
[Comparison(Column(left), op, Literal(right))
for left, op, right, _, _, _ in result_conditions])
return QueryCmd(table=name, projection=projection,
conditions_list=conditions)
def _parse_table_update_rows(self, query):
result_main = self.re_table_update_rows.fullmatch(query)
if not result_main:
return
setters_str = result_main.group('setters')
result_setters = self.re_table_values.findall(setters_str)
if not result_setters:
return
name = result_main.group('table_name')
main_op = result_main.group('op')
conditions_str = result_main.group('conditions')
conditions = ConditionList('', [])
if conditions_str:
result_conditions = self.re_where_conditions.findall(conditions_str)
conditions = ConditionList(main_op,
[Comparison(Column(left), op, Literal(right))
for left, op, right, _, _, _ in result_conditions])
new_values = {col_name: col_value for col_name, col_value, _, _ in result_setters}
return UpdateCmd(table=name, values=new_values, conditions_list=conditions)
def _parse_table_delete_rows(self, query):
result_main = self.re_table_delete_rows.fullmatch(query)
if not result_main:
return
name = result_main.group('table_name')
main_op = result_main.group('op')
conditions_str = result_main.group('conditions')
conditions = ConditionList('', [])
if conditions_str:
result_conditions = self.re_where_conditions.findall(conditions_str)
conditions = ConditionList(main_op,
[Comparison(Column(left), op, Literal(right))
for left, op, right, _, _, _ in result_conditions])
return DeleteCmd(table=name, conditions_list=conditions)
def _parse_tables(self, query):
result = self.re_tables.fullmatch(query)
if not result:
return
return TablesCmd(db_name=result.group('db_name'))
def _parse_db(self, query):
result = self.re_db.fullmatch(query)
if not result:
return
return DbCmd()
def _parse_from_csv(self, query):
result = self.re_from_csv.fullmatch(query)
if not result:
return
return FromCsvCmd(csv_path=result.group('csv_path'))
def _parse_to_csv(self, query):
result = self.re_to_csv.fullmatch(query)
if not result:
return
return ToCsvCmd(csv_path=result.group('csv_path'))
def _parse_schema(self, query):
result = self.re_schema.fullmatch(query)
if not result:
return
return SchemaCmd(table_name=result.group('table_name')) | 39.108889 | 410 | 0.602875 | 16,591 | 0.942724 | 976 | 0.055458 | 475 | 0.02699 | 0 | 0 | 3,461 | 0.196659 |
10ab56398b4e896765f87507294e80c3f8b7fff7 | 6,318 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_telnet_mgmt_cfg.py | tkamata-test/ydk-py | b637e7853a8edbbd31fbc05afa3aa4110b31c5f9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_telnet_mgmt_cfg.py | tkamata-test/ydk-py | b637e7853a8edbbd31fbc05afa3aa4110b31c5f9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_telnet_mgmt_cfg.py | tkamata-test/ydk-py | b637e7853a8edbbd31fbc05afa3aa4110b31c5f9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """ Cisco_IOS_XR_ipv4_telnet_mgmt_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv4\-telnet\-mgmt package configuration.
This module contains definitions
for the following management objects\:
telnet\: Global Telnet configuration commands
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Telnet(object):
"""
Global Telnet configuration commands
.. attribute:: vrfs
VRF name for telnet service
**type**\: :py:class:`Vrfs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_telnet_mgmt_cfg.Telnet.Vrfs>`
"""
_prefix = 'ipv4-telnet-mgmt-cfg'
_revision = '2015-11-09'
def __init__(self):
self.vrfs = Telnet.Vrfs()
self.vrfs.parent = self
class Vrfs(object):
"""
VRF name for telnet service
.. attribute:: vrf
VRF name for telnet service
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_telnet_mgmt_cfg.Telnet.Vrfs.Vrf>`
"""
_prefix = 'ipv4-telnet-mgmt-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vrf = YList()
self.vrf.parent = self
self.vrf.name = 'vrf'
class Vrf(object):
"""
VRF name for telnet service
.. attribute:: vrf_name <key>
VRF name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: ipv4
IPv4 configuration
**type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_telnet_mgmt_cfg.Telnet.Vrfs.Vrf.Ipv4>`
"""
_prefix = 'ipv4-telnet-mgmt-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vrf_name = None
self.ipv4 = Telnet.Vrfs.Vrf.Ipv4()
self.ipv4.parent = self
class Ipv4(object):
"""
IPv4 configuration
.. attribute:: dscp
Specify the DSCP value
**type**\: int
**range:** 0..63
"""
_prefix = 'ipv4-telnet-mgmt-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dscp = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:ipv4'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.dscp is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_mgmt_cfg as meta
return meta._meta_table['Telnet.Vrfs.Vrf.Ipv4']['meta_info']
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
return '/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:telnet/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:vrfs/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:vrf[Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:vrf-name = ' + str(self.vrf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vrf_name is not None:
return True
if self.ipv4 is not None and self.ipv4._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_mgmt_cfg as meta
return meta._meta_table['Telnet.Vrfs.Vrf']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:telnet/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:vrfs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vrf is not None:
for child_ref in self.vrf:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_mgmt_cfg as meta
return meta._meta_table['Telnet.Vrfs']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-telnet-mgmt-cfg:telnet'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vrfs is not None and self.vrfs._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_telnet_mgmt_cfg as meta
return meta._meta_table['Telnet']['meta_info']
| 28.459459 | 216 | 0.532922 | 5,778 | 0.91453 | 0 | 0 | 1,727 | 0.273346 | 0 | 0 | 2,607 | 0.412631 |
10ab5f4f80fbe79dcb3763a9fbb0a0d765cb29f5 | 7,027 | py | Python | projects/solutions/sa/distance.py | vibbits/gentle-hands-on-python | 32aadd1fa8ebee1089cdbb96a4c64ba06c4a8ced | [
"CC-BY-4.0"
] | null | null | null | projects/solutions/sa/distance.py | vibbits/gentle-hands-on-python | 32aadd1fa8ebee1089cdbb96a4c64ba06c4a8ced | [
"CC-BY-4.0"
] | 60 | 2020-06-05T13:22:21.000Z | 2022-03-17T08:43:59.000Z | projects/solutions/sa/distance.py | vibbits/gentle-hands-on-python | 32aadd1fa8ebee1089cdbb96a4c64ba06c4a8ced | [
"CC-BY-4.0"
] | 3 | 2020-04-17T09:46:50.000Z | 2021-09-10T09:41:06.000Z | """
A module for calculating the relationship (or distance) between 2 strings.
Namely:
- edit_distance()
- needleman_wunsch()
- align()
- coverage()
"""
from typing import Callable, Tuple, List
from enum import IntEnum
from operator import itemgetter
from functools import lru_cache
import unittest
class BackTrack(IntEnum):
" Used by align() for backtracking. "
DELETE = 1
INSERT = 2
SUBSTITUTE = 3
UNASSIGNED = 4
# (Score, backtrack direction)
MatrixElement = Tuple[float, BackTrack]
Matrix = List[List[MatrixElement]]
def print_matrix(mat: Matrix) -> None:
" Pretty print an alignment matrix. "
def show_backtrack(val: BackTrack) -> str:
if val == BackTrack.DELETE:
return "d"
if val == BackTrack.INSERT:
return "i"
if val == BackTrack.SUBSTITUTE:
return "m"
return "?"
rowstr: List[List[str]] = [[] for _ in mat[0]]
for col in mat:
for irow, elem in enumerate(col):
rowstr[irow].append(
"(" + str(elem[0]) + ", " + show_backtrack(elem[1]) + ")"
)
print("\n".join([" ".join(r) for r in rowstr]) + "\n")
@lru_cache(maxsize=None)
def edit_distance(reference: str, target: str) -> Matrix:
""" Computes the edit distance matrix between a and b. """
rows = len(reference) + 1
cols = len(target) + 1
dist = [[(0.0, BackTrack.UNASSIGNED) for _ in range(cols)] for _ in range(rows)]
for i in range(1, rows):
dist[i][0] = (i, BackTrack.DELETE)
for j in range(1, cols):
dist[0][j] = (j, BackTrack.INSERT)
for col in range(1, cols):
for row in range(1, rows):
if reference[row - 1] == target[col - 1]:
cost = 0
else:
cost = 1
options = [
(dist[row - 1][col - 1][0] + cost, BackTrack.SUBSTITUTE),
(dist[row][col - 1][0] + 1, BackTrack.INSERT),
(dist[row - 1][col][0] + 1, BackTrack.DELETE),
]
dist[row][col] = min(options, key=itemgetter(0))
return dist
@lru_cache(maxsize=None)
def needleman_wunsch(reference: str, target: str) -> Matrix:
""" Computes the Needleman-Wunsch matrix between a and b. """
gap_open = 2
gap_extend = 0.1 # Expected length is 10
def gap_penalty(gaps: List[MatrixElement], direction: BackTrack) -> float:
penalty = float(gap_open)
for gap in gaps[::-1]:
if gap[1] == direction:
penalty += gap_extend
else:
penalty += gap[0]
break
return penalty
rows = len(reference) + 1
cols = len(target) + 1
dist = [[(0.0, BackTrack.UNASSIGNED) for _ in range(cols)] for _ in range(rows)]
for i in range(1, rows):
boundaryrow = [dist[r][0] for r in range(0, i)]
dist[i][0] = (
gap_penalty(boundaryrow, BackTrack.DELETE) - gap_open,
BackTrack.DELETE,
)
for j in range(1, cols):
dist[0][j] = (
gap_penalty(dist[0][:j], BackTrack.INSERT) - gap_open,
BackTrack.INSERT,
)
for col in range(1, cols):
for row in range(1, rows):
insert_penalty = gap_penalty(
[dist[row][c] for c in range(col)], BackTrack.INSERT
)
delete_penalty = gap_penalty(
[dist[r][col] for r in range(row)], BackTrack.DELETE
)
dist[row][col] = min(
[
(insert_penalty, BackTrack.INSERT),
(delete_penalty, BackTrack.DELETE),
(
dist[row - 1][col - 1][0]
+ (0 if reference[row - 1] == target[col - 1] else 1),
BackTrack.SUBSTITUTE,
),
],
key=itemgetter(0),
)
return dist
def align(
reference: str, target: str, scoringfn: Callable[[str, str], Matrix]
) -> Tuple[str, str]:
""" Compute the alignment between a and b using a provided scoring function. """
i = len(reference)
j = len(target)
matrix = scoringfn(reference, target)
_reference = ""
_target = ""
while (i, j) != (0, 0):
backtrack = matrix[i][j][1]
if backtrack == BackTrack.SUBSTITUTE:
i -= 1
j -= 1
_reference += reference[i]
_target += target[j]
elif backtrack == BackTrack.INSERT:
j -= 1
_reference += "-"
_target += target[j]
elif backtrack == BackTrack.DELETE:
i -= 1
_reference += reference[i]
_target += "-"
return (_reference[::-1], _target[::-1])
@lru_cache(maxsize=None)
def coverage(reference: str, target: str) -> int:
"""
The number of substitutions in an alignment.
>>> coverage("---ATGGC", "GTTA-GGG")
4
"""
return sum([ref != "-" and tgt != "-" for ref, tgt in zip(reference, target)])
####### TESTING ########
class TestDistance(unittest.TestCase):
" Unit tests functions in this file. "
def test_coverage(self):
" Unit tests for the merge() function. "
self.assertEqual(coverage("", ""), 0)
self.assertEqual(coverage("A", "A"), 1)
self.assertEqual(coverage("A", "G"), 1)
self.assertEqual(coverage("-A", "AA"), 1)
self.assertEqual(coverage("A-", "AA"), 1)
self.assertEqual(coverage("AA", "-A"), 1)
self.assertEqual(coverage("AA", "A-"), 1)
self.assertEqual(coverage("A-A", "AAA"), 2)
self.assertEqual(coverage("AAA", "A-A"), 2)
def test_align_edit_distance(self):
" Unit tests for align() using edit_distance(). "
self.assertEqual(align("", "", edit_distance), ("", ""))
self.assertEqual(align("A", "A", edit_distance), ("A", "A"))
self.assertEqual(align("AB", "A", edit_distance), ("AB", "A-"))
self.assertEqual(align("AB", "B", edit_distance), ("AB", "-B"))
self.assertEqual(align("A", "AB", edit_distance), ("A-", "AB"))
self.assertEqual(align("B", "AB", edit_distance), ("-B", "AB"))
self.assertEqual(align("AB", "CD", edit_distance), ("AB", "CD"))
def test_align_needleman_wunsch(self):
" Unit tests for align() using needleman_wunsch(). "
self.assertEqual(align("", "", needleman_wunsch), ("", ""))
self.assertEqual(align("A", "A", needleman_wunsch), ("A", "A"))
self.assertEqual(align("AB", "A", needleman_wunsch), ("AB", "-A"))
self.assertEqual(align("AB", "B", needleman_wunsch), ("AB", "-B"))
self.assertEqual(align("A", "AB", needleman_wunsch), ("-A", "AB"))
self.assertEqual(align("B", "AB", needleman_wunsch), ("-B", "AB"))
self.assertEqual(align("AB", "CD", needleman_wunsch), ("AB", "CD"))
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30.552174 | 84 | 0.539491 | 1,956 | 0.278355 | 0 | 0 | 3,047 | 0.433613 | 0 | 0 | 1,125 | 0.160097 |
10abfb12f6e2336b0704fd165a04ee71fb341a6a | 10,391 | py | Python | indicator17.py | nkzhengwt/Spyder_cta | 1cb5e8fd9d70da381ef198aec6431aca4feb24da | [
"MIT"
] | 13 | 2018-05-18T09:19:24.000Z | 2019-03-18T02:06:49.000Z | indicator17.py | nkuzhengwt/spyder_cta | 1cb5e8fd9d70da381ef198aec6431aca4feb24da | [
"MIT"
] | null | null | null | indicator17.py | nkuzhengwt/spyder_cta | 1cb5e8fd9d70da381ef198aec6431aca4feb24da | [
"MIT"
] | 2 | 2019-03-17T14:29:10.000Z | 2019-04-09T01:50:38.000Z | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import math
import datetime
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
class Indicators():
def __init__(self, dataframe, params = []):
self.dataframe = dataframe
self.params = params
self.dataframe['return'] = 0
for i in range(1,len(dataframe['return'])):
#http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
dataframe.loc[i,'return'] = (self.dataframe.loc[i,'open']-self.dataframe.loc[i-1,'open'])/self.dataframe.loc[i-1,'open']
self.Return = dataframe['return']
self.dataframe['time'] = dataframe['tradeDate']
self.dataframe['cumulative_return'] = self.dataframe['open']
self.dataframe['cumulative_return'] = self.dataframe['cumulative_return']/self.dataframe.loc[0,'open']
self.dataframe['cumulative_return'] = dataframe['cumulative_return']#*1000000
self.dataframe.index = pd.to_datetime(dataframe['tradeDate'])#!!!!!
#分年计算
self.year_slice = {}
i = 0
y = time.strptime(self.dataframe['time'].iat[0],"%Y-%m-%d").tm_year
for j in range(1,len(self.dataframe)):
if y != time.strptime(self.dataframe['time'].iat[j],"%Y-%m-%d").tm_year:
self.year_slice[str(y)] = dataframe[i:j-1]
y = time.strptime(self.dataframe['time'].iat[j],"%Y-%m-%d").tm_year
i = j
self.year_slice[str(y)] = dataframe[i:]
###年化收益
def annual_return(self,asset,year):
R = self.year_slice[year][asset].iat[-1]/self.year_slice[year][asset].iat[0]
t1 = time.strptime(self.year_slice[year]['time'].iat[0],"%Y-%m-%d")
t2 = time.strptime(self.year_slice[year]['time'].iat[-1],"%Y-%m-%d")
d1 = datetime.datetime(t1.tm_year, t1.tm_mon, t1.tm_mday)
d2 = datetime.datetime(t1.tm_year, t2.tm_mon, t2.tm_mday)
n = (d2-d1).days
n = n/244
# print('The annual return for %s in %s is %f' %(asset,year,math.pow(R, 1/n)-1))
return math.pow(R, 1/n)-1
###最大回撤
def max_draw(self,asset,year):
self.year_slice[year]['max'] = 0
self.year_slice[year].ix[0,'max'] = self.year_slice[year].ix[0,asset]#loc, iloc, and ix
for i in range(1, len(self.year_slice[year][asset])):
if self.year_slice[year].ix[i, asset] > self.year_slice[year].ix[i-1, 'max']:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i, asset]
else:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i-1, 'max']
self.year_slice[year]['retreat']=(self.year_slice[year][asset]- self.year_slice[year]['max'])/self.year_slice[year]['max']
print('The max draw for %s in %s is %f' %(asset,year,abs(min(self.year_slice[year]['retreat']))))
return abs(min(self.year_slice[year]['retreat']))
###波动率
def volatility(self,asset,year):
print('The volatility for %s in %s is %f' %(asset,year,np.std(self.year_slice[year][asset])*math.sqrt(244/len(self.year_slice[year][asset]))))
return np.std(self.year_slice[year][asset])*math.sqrt(244/len(self.year_slice[year][asset]))
###夏普比率
def sharp(self, asset,no_risk_R,year):
print('The Sharp Ratio for %s in %s is %.7f' %(asset,year,(self.annual_return(asset,year)-no_risk_R)/(self.volatility(asset,year)*math.sqrt(244/len(self.year_slice[year][asset]))+1e-10)))
return (self.annual_return(asset,year)-no_risk_R)/(self.volatility(asset,year)*math.sqrt(244/len(self.year_slice[year][asset]))+1e-10)
###卡玛比率
def calmar(self,asset,year):
print('The Calmar Ratio for %s in %s is %f' %(asset,year,self.annual_return(asset,year)/self.max_draw(asset,year)))
return self.annual_return(asset,year)/self.max_draw(asset,year)
###日胜率
def daily_win_ratio(self,asset,year):
#df的条件选择不是self.dataframe[asset][self.dataframe[asset] > 0]而是self.dataframe[self.dataframe[asset] > 0][asset]
#!!
pnl = asset.replace('asset','pnl')
n1 = len(self.year_slice[year][self.year_slice[year][pnl] > 0][pnl])
n2 = len(self.year_slice[year][pnl])
print('The daily win ratio for %s in %s is %f' %(asset,year,n1/n2))
return n1/n2
###日盈亏比
def win_lose_ratio(self,asset,year):
self.year_slice[year]['dif'] = self.year_slice[year][asset] - self.year_slice[year][asset].shift(1)
print('The win lose ratio for %s in %s is %f' %(asset,year,abs(min(self.year_slice[year]['retreat']))))
return abs(sum(self.year_slice[year][self.year_slice[year]['dif']>0]['dif']))/abs(sum(self.year_slice[year][self.year_slice[year]['dif']<0]['dif']))
###大回撤区间
def worst_draw_interval(self,asset,year):
self.year_slice[year]['max'] = 0
self.year_slice[year].ix[0,'max'] = self.year_slice[year].ix[0,asset]
self.year_slice[year]['max_time'] = self.year_slice[year]['time']
for i in range(1, len(self.year_slice[year][asset])):
if self.year_slice[year].ix[i, asset] > self.year_slice[year].ix[i-1, 'max']:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i, asset]
else:
self.year_slice[year].ix[i, 'max'] = self.year_slice[year].ix[i-1, 'max']
self.year_slice[year].ix[i, 'max_time'] = self.year_slice[year].ix[i-1, 'max_time']
self.year_slice[year]['retreat']=(self.year_slice[year][asset]- self.year_slice[year]['max'])/self.year_slice[year]['max']
max_draw = min(self.year_slice[year]['retreat'])
data = self.year_slice[year][self.year_slice[year]['retreat'] == max_draw]
t1 = data['tradeDate']#
t2 = data['max_time']
#print('The worst draw interval for %s in %s is %s %s' %(asset,year,str(t1),str(t2)))
return t1,t2
###总换手
def total_turnover(self,asset,year):
turnover = asset.replace('asset','turnover')
print('The total turnover for %s in %s is %f' %(asset,year,sum(self.year_slice[year][turnover])))
return sum(self.year_slice[year][turnover])
###日均换手
def average_daily_turnover(self,asset,year):
t1 = time.strptime(self.year_slice[year]['time'].iat[0],"%Y-%m-%d")
t2 = time.strptime(self.year_slice[year]['time'].iat[-1],"%Y-%m-%d")
d1 = datetime.datetime(t1.tm_year, t1.tm_mon, t1.tm_mday)
d2 = datetime.datetime(t1.tm_year, t2.tm_mon, t2.tm_mday)
n = (d2-d1).days
print('The average daily turnover for %s in %s is %f' %(asset,year,self.total_turnover(asset,year)/n))
return self.total_turnover(asset,year)/n
###日均持仓
def average_daily_position(self,asset,year):
position = asset.replace('asset','position')
print('The average daily position for %s in %s is %f' %(asset,year,self.year_slice[year][position].mean()))
return self.year_slice[year][position].mean()
###次均收益
def minor_average_return(self,asset,year):
position = asset.replace('asset','position')
sum_pos = sum(self.year_slice[year][self.year_slice[year][position]!=0][position])
num = len(self.year_slice[year][self.year_slice[year][position]!=0][position])
print('The minor average return for %s in %s is %f' %(asset,year,sum_pos/num))
return sum_pos/num
def write_indicators_concat(self,path):
frames = []
for items in self.year_slice:
temp_data = []
temp_index = []
for k in self.params:
x = [items,
self.annual_return('asset'+ str(k),items),
self.max_draw('asset'+ str(k),items),
self.volatility('asset'+ str(k),items),
self.sharp('asset'+ str(k),0,items),
self.calmar('asset'+ str(k),items),
self.daily_win_ratio('asset'+ str(k),items),
self.win_lose_ratio('asset'+ str(k),items),
self.total_turnover('asset'+ str(k),items),
self.average_daily_turnover('asset'+ str(k),items),
self.average_daily_position('asset'+ str(k),items),
self.minor_average_return('asset'+ str(k),items)]
temp_data.append(x)
temp_index.append('asset'+ str(k))
DataFrame = pd.DataFrame(temp_data,index=temp_index,columns=['year','annual_return', 'max_draw', 'volatility', 'sharp','calmar','daily_win_ratio','win_lose_ratio','total_turnover','average_daily_turnover','average_daily_position','minor_average_return'])
frames.append(DataFrame)
DataFrame = pd.concat(frames)
DataFrame.to_csv(path_or_buf=path)
def plot_figure(self,asset_num):
t1 = time.strptime(self.dataframe['time'].iat[0],"%Y-%m-%d")
t2 = time.strptime(self.dataframe['time'].iat[-1],"%Y-%m-%d")
d1 = datetime.datetime(t1.tm_year, t1.tm_mon, t1.tm_mday)
d2 = datetime.datetime(t1.tm_year, t2.tm_mon, t2.tm_mday)
plt.figure()
plt.subplots_adjust(hspace=1, wspace=1)
plt.subplot(3,1,1)
self.dataframe['asset'+ str(asset_num)].plot(legend = True)
self.dataframe['cumulative_return'].plot(x=None, y=None, kind='line', ax=None, subplots=False, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=None, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False)
plt.subplot(3,1,2)
f2 = plt.bar(range(len(self.dataframe['transaction'+ str(asset_num)])), self.dataframe['transaction'+ str(asset_num)].tolist(),tick_label= None,label='transaction'+ str(asset_num))
plt.legend((f2,),('transaction'+ str(asset_num),))
plt.subplot(3,1,3)
f3 = plt.bar(range(len(self.dataframe['pnl'+ str(asset_num)])),self.dataframe['pnl'+ str(asset_num)].tolist(),label='pnl'+ str(asset_num))
plt.legend((f3,),('pnl'+ str(asset_num),))
plt.show()
if __name__=='__main__':
indicators = Indicators('/Users/zhubaobao/Documents/Quant/ZXJT/total3.csv', [5,10,20])
#indicators.write_indicators_concat('/Users/zhubaobao/Documents/Quant/ZXJT/write_indicators.csv')
indicators.plot_figure(10)
| 53.287179 | 420 | 0.630161 | 10,076 | 0.958432 | 0 | 0 | 0 | 0 | 0 | 0 | 2,180 | 0.207362 |
10ad5bf59feb60ba24a561f3cbe2b398632d6514 | 1,785 | py | Python | colour/examples/io/examples_ies_tm2714.py | BPearlstine/colour | 40f0281295496774d2a19eee017d50fd0c265bd8 | [
"Cube",
"BSD-3-Clause"
] | 2 | 2020-05-03T20:15:42.000Z | 2021-04-09T18:19:06.000Z | colour/examples/io/examples_ies_tm2714.py | BPearlstine/colour | 40f0281295496774d2a19eee017d50fd0c265bd8 | [
"Cube",
"BSD-3-Clause"
] | null | null | null | colour/examples/io/examples_ies_tm2714.py | BPearlstine/colour | 40f0281295496774d2a19eee017d50fd0c265bd8 | [
"Cube",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Showcases input / output *IES TM-27-14* spectral data XML files related
examples.
"""
import os
import colour
from colour.utilities import message_box
RESOURCES_DIRECTORY = os.path.join(os.path.dirname(__file__), 'resources')
message_box('"IES TM-27-14" Spectral Data "XML" File IO')
message_box('Reading spectral data from "IES TM-27-14" "XML" file.')
sd = colour.SpectralDistribution_IESTM2714(
os.path.join(RESOURCES_DIRECTORY, 'TM27 Sample Spectral Data.spdx'))
sd.read()
print(sd)
print('\n')
message_box('"IES TM-27-14" spectral data "XML" file header:')
print('Manufacturer: {0}'.format(sd.header.manufacturer))
print('Catalog Number: {0}'.format(sd.header.catalog_number))
print('Description: {0}'.format(sd.header.description))
print('Document Creator: {0}'.format(sd.header.document_creator))
print('Unique Identifier: {0}'.format(sd.header.unique_identifier))
print('Measurement Equipment: {0}'.format(sd.header.measurement_equipment))
print('Laboratory: {0}'.format(sd.header.laboratory))
print('Report Number: {0}'.format(sd.header.report_number))
print('Report Date: {0}'.format(sd.header.report_date))
print('Document Creation Date: {0}'.format(sd.header.document_creation_date))
print('Comments: {0}'.format(sd.header.comments))
print('\n')
message_box('"IES TM-27-14" spectral data "XML" file spectral distribution:')
print('Spectral Quantity: {0}'.format(sd.spectral_quantity))
print('Reflection Geometry: {0}'.format(sd.reflection_geometry))
print('Transmission Geometry: {0}'.format(sd.transmission_geometry))
print('Bandwidth FWHM: {0}'.format(sd.bandwidth_FWHM))
print('Bandwidth Corrected: {0}'.format(sd.bandwidth_corrected))
print('\n')
message_box('"IES TM-27-14" spectral data "XML" file spectral data:')
print(sd)
| 35.7 | 77 | 0.746779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 792 | 0.443697 |
10ae75ed502594e92857e84f0c648a364231335c | 158 | py | Python | ISA/Util/__init__.py | tumido/FIT-VUT-projects | 0e23c38a11d0aee55921e15b8865256efddefc53 | [
"BSD-2-Clause"
] | null | null | null | ISA/Util/__init__.py | tumido/FIT-VUT-projects | 0e23c38a11d0aee55921e15b8865256efddefc53 | [
"BSD-2-Clause"
] | null | null | null | ISA/Util/__init__.py | tumido/FIT-VUT-projects | 0e23c38a11d0aee55921e15b8865256efddefc53 | [
"BSD-2-Clause"
] | 3 | 2015-05-16T00:29:59.000Z | 2021-02-03T00:31:16.000Z | from .Announce import get_announce, announce_to_txt
from .Torrent import get_torrent_file, parse_torrent_file
from .Tracker import get_peerlist, save_peerlist | 52.666667 | 57 | 0.873418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
10b0dfd51fb725245bdb9b998ac1c1c6f5c26b22 | 2,577 | py | Python | python-sdk/odi/client/storage/s3.py | Project-OpenBytes/odi | ab9dbcc4d29c80e21f1a8ca3be911eb755495c54 | [
"Apache-2.0"
] | 5 | 2021-12-09T09:32:36.000Z | 2022-01-31T20:21:35.000Z | python-sdk/odi/client/storage/s3.py | Project-OpenBytes/odi | ab9dbcc4d29c80e21f1a8ca3be911eb755495c54 | [
"Apache-2.0"
] | 17 | 2022-01-07T04:24:25.000Z | 2022-02-14T11:22:41.000Z | python-sdk/odi/client/storage/s3.py | Project-OpenBytes/odi | ab9dbcc4d29c80e21f1a8ca3be911eb755495c54 | [
"Apache-2.0"
] | 6 | 2021-12-09T09:29:22.000Z | 2021-12-31T11:00:24.000Z | # Copyright 2021 The OpenBytes Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from concurrent.futures import as_completed, ThreadPoolExecutor
from typing import Any, Dict, List
import requests
from tqdm import tqdm
from odi.client.storage.storage import Storage
class _S3Obj:
def __init__(self, name: str, path: str, size: int, url: str) -> None:
self._name = name
self._path = path
self._size = size
self._url = url
@property
def name(self) -> str:
return self._name
@property
def path(self) -> str:
return self._path
@property
def size(self) -> int:
return self._size
@property
def url(self) -> str:
return self._url
class S3(Storage):
@classmethod
def _load_obj(cls, data: List[Dict[str, Any]]) -> List[_S3Obj]:
objs = []
for d in data:
objs.append(_S3Obj(name=d["name"], path=d["fullPath"], size=d["size"], url=d["url"]))
return objs
def upload(self) -> Any:
pass
def download(self, data: List[Dict[str, Any]], path="") -> Any:
# done, not done
objs = self._load_obj(data)
size = self._size_convert(size=sum(o.size for o in objs), origin="b", target="mb")
total = len(objs)
print(f"Total: {total}, {size}MB.")
with tqdm(total=total) as pbar:
with ThreadPoolExecutor(max_workers=total) as executor:
futures = [executor.submit(self._download_obj, obj) for obj in objs]
for future in as_completed(futures):
result = future.result()
pbar.update(1)
@classmethod
def _download_obj(cls, obj: _S3Obj) -> None:
if not os.path.exists(os.path.dirname(obj.path)):
os.makedirs(os.path.dirname(obj.path))
with requests.get(obj.url, stream=True) as r:
with open(obj.path, "wb") as file:
for chunk in r.iter_content(chunk_size=1024):
file.write(chunk)
| 31.814815 | 97 | 0.625146 | 1,759 | 0.682577 | 0 | 0 | 856 | 0.332169 | 0 | 0 | 686 | 0.266201 |
10b2cc5f512be5bb8241e30505ccfc29ff27302f | 1,505 | py | Python | Chapter14/c14_11_rainbow_callMaxOn2_viaSimulation.py | John-ye666/Python-for-Finance-Second-Edition | dabef09bcdd7b0ec2934774741bd0a7e1950de73 | [
"MIT"
] | 236 | 2017-07-02T03:06:54.000Z | 2022-03-31T03:15:33.000Z | Chapter14/c14_11_rainbow_callMaxOn2_viaSimulation.py | John-ye666/Python-for-Finance-Second-Edition | dabef09bcdd7b0ec2934774741bd0a7e1950de73 | [
"MIT"
] | null | null | null | Chapter14/c14_11_rainbow_callMaxOn2_viaSimulation.py | John-ye666/Python-for-Finance-Second-Edition | dabef09bcdd7b0ec2934774741bd0a7e1950de73 | [
"MIT"
] | 139 | 2017-06-30T10:28:16.000Z | 2022-01-19T19:43:34.000Z | """
Name : c14_11_rainbow_callMaxOn2_viaSimulation.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import scipy as sp
from scipy import zeros, sqrt, shape
#
sp.random.seed(123) # fix our random numbers
s1=100. # stock price 1
s2=95. # stock price 2
k=102.0 # exercise price
T=8./12. # maturity in years
r=0.08 # risk-free rate
rho=0.75 # correlation between 2
sigma1=0.15 # volatility for stock 1
sigma2=0.20 # volatility for stock 1
nSteps=100. # number of steps
nSimulation=1000 # number of simulations
#
# step 1: generate correlated random number
dt =T/nSteps
call = sp.zeros([nSimulation], dtype=float)
x = range(0, int(nSteps), 1)
#
# step 2: call call prices
for j in range(0, nSimulation):
x1=sp.random.normal(size=nSimulation)
x2=sp.random.normal(size=nSimulation)
y1=x1
y2=rho*x1+sp.sqrt(1-rho**2)*x2
sT1=s1
sT2=s2
for i in x[:-1]:
e1=y1[i]
e2=y2[i]
sT1*=sp.exp((r-0.5*sigma1**2)*dt+sigma1*e1*sqrt(dt))
sT2*=sp.exp((r-0.5*sigma2**2)*dt+sigma2*e2*sqrt(dt))
minOf2=min(sT1,sT2)
call[j]=max(minOf2-k,0)
#
# Step 3: summation and discount back
call=sp.mean(call)*sp.exp(-r*T)
print('Rainbow call on minimum of 2 assets = ', round(call,3))
| 28.396226 | 62 | 0.598007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 622 | 0.413289 |
10b32ad162c00e0088fd2085f0ff60ca3be39e4b | 67 | py | Python | loudblog_just_for_git_purposes/yatraWebPageAutomation.py | hodlerfolyf/MobileSatyagraha_Backend_aws | 3a82689ae44867f93073581a27c22cd5d8390c52 | [
"MIT"
] | 2 | 2020-01-27T03:25:37.000Z | 2020-03-30T21:48:07.000Z | loudblog_just_for_git_purposes/yatraWebPageAutomation.py | hodlerfolyf/MobileSatyagraha_Backend_aws | 3a82689ae44867f93073581a27c22cd5d8390c52 | [
"MIT"
] | 3 | 2021-03-25T23:33:50.000Z | 2021-06-01T23:24:21.000Z | loudblog_just_for_git_purposes/yatraWebPageAutomation.py | hodlerfolyf/MobileSatyagraha_Backend_aws | 3a82689ae44867f93073581a27c22cd5d8390c52 | [
"MIT"
] | 3 | 2021-12-07T11:50:26.000Z | 2022-01-22T09:21:05.000Z | from db_repo import *
mydb=database_flaskr()
rows=mydb.yatraWPA() | 22.333333 | 23 | 0.776119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
10b3898c39cc445a1d7a9fa450e58ed1fdf4ed6d | 13,823 | py | Python | python/constants.py | cbilstra/FATE | 40f3962db471764e1cfcd39e295a13308b7d85fd | [
"MIT"
] | null | null | null | python/constants.py | cbilstra/FATE | 40f3962db471764e1cfcd39e295a13308b7d85fd | [
"MIT"
] | null | null | null | python/constants.py | cbilstra/FATE | 40f3962db471764e1cfcd39e295a13308b7d85fd | [
"MIT"
] | null | null | null | import os
from os.path import join
INVESTIGATE = False # Records coverages and saves them. Generates a plot in the end. Do not use with automate.
TEST_OUTSIDE_FUZZER = False # Runs FATE as standalone (1+1) EA
BLACKBOX = True and TEST_OUTSIDE_FUZZER # Disables white-box information such as thresholds and feat imp.
FORCE_DEFAULT_EPSILON = True or TEST_OUTSIDE_FUZZER # Runs all datasets with the default epsilon
FORCE_DEFAULT_MUTATION_CHANCE = False or TEST_OUTSIDE_FUZZER # Runs all datasets with the default mutation chance
LIMIT_TIME = True # If false, run 10 times as long
############ FATE Standalone ############
CROSSOVER_CHANCE = 0.001 # Chance that crossover occurs
CROSSOVER_RANDOM_CHANCE = 1.0 # Actual chance for crossover with random features is 0.001
# CROSSOVER_CHANCE * CROSSOVER_RANDOM_CHANCE
NUM_RUNS = 100000000 # Unlimited. Change for smaller amount of runs
POPULATION_SIZE = 1 # Population size.
############ RQ 1 defaults ############
MEASURE_EXEC_P_S = True # Parse the number of executions per second.
ALLOW_FLOAT_MIS_CLASSIFICATION = True # If True, do not filter mis-classifications from the produced AE
CONSISTENT_DRAWS = True # Seeds random with 0, to create consistent check-set draws
FUZZ_ONE_POINT_PER_INSTANCE = True # compile to generic fuzz target and fuzz per point
USE_CUSTOM_MUTATOR = True # If False, use the standard mutator of LibFuzzer
USE_CROSSOVER = True and USE_CUSTOM_MUTATOR # Combines mutation with crossover (split at random location)
USE_GAUSSIAN = True # Gaussian vs random uniform mutation
USE_PROBABILITY_STEPS_SPECIAL = True # Proba descent based on small proba diff between 2nd class predicted
PROBA_LIMIT_WITHIN_EPSILON = True # Only save seeds if within epsilon
WRITE_AE_ONLY_IF_BETTER_OUTSIDE_BRANCHES = True # Saves execution time
ALWAYS_OPTIMIZE = True # Otherwise only optimize small files
MUTATE_DEPTH = 7 if TEST_OUTSIDE_FUZZER else 5 # The maximum number of consecutive mutations per seed for LibFuzzer
DEFAULT_EPSILON = 0.1 if TEST_OUTSIDE_FUZZER else 0.2 # Default epsilon
DEFAULT_MUTATE_CHANCE = 0.5 if TEST_OUTSIDE_FUZZER else 0.1 # Chance that a single features is mutated
FUZZER = 'libFuzzer'
# FUZZER = 'AFL++'
# FUZZER = 'honggfuzz'
# FUZZER = 'AFLGo'
FUZZERS = ['libFuzzer', 'AFL++', 'AFLGo', 'honggfuzz']
if FUZZER not in FUZZERS:
raise ValueError(f'Fuzzer {FUZZER} not recognised, should be one of [{", ".join(FUZZERS)}]')
if FUZZER == 'honggfuzz' and USE_CUSTOM_MUTATOR:
raise ValueError('Honggfuzz and custom mutator is not supported')
############ RQ 2 defaults ############
AE_MUTATE_TOWARDS_VICTIM = True # If AE, mutate values only towards victim point.
MUTATE_BIGGEST_CHANCE = 0.5 # When an AE is found, the chance to only mutate all biggest difference fs towards victim
ALSO_MUTATE_BIGGEST = True # Always mutate all features > the biggest l-inf distance - 0.01. Only with FUZZ_ONE
# These alter the chance that a feature is mutated
BIAS_MUTATE_BIG_DIFFS = True
USE_THRESHOLDS_FOR_MUTATION = True and not BLACKBOX # move to optimal boundary value after drawing from mutation dist
# Fuzzes for each datapoint with and without AE init
DOUBLE_FUZZ_WITH_AE = True and not (TEST_OUTSIDE_FUZZER or INVESTIGATE)
USE_FEATURE_IMPORTANCE = True and not BLACKBOX # prioritize more important features for mutation
INITIALIZE_WITH_POINT_IN_BETWEEN = True and DOUBLE_FUZZ_WITH_AE
INITIALIZE_WITH_EXTRA_POINTS_IN_BETWEEN = True and INITIALIZE_WITH_POINT_IN_BETWEEN
if TEST_OUTSIDE_FUZZER and (not FUZZ_ONE_POINT_PER_INSTANCE):
raise ValueError('Test outside fuzzer conflicting options')
if TEST_OUTSIDE_FUZZER and DOUBLE_FUZZ_WITH_AE and (POPULATION_SIZE < 2 or CROSSOVER_RANDOM_CHANCE > 0.99):
raise ValueError('Test outside fuzzer double fuzz configuration problem')
############ RQ 1.2 defaults ############
FILTER_BAD_AE = True # If True, discards all AE that are worse than FAILURE_THRES
FUZZ_ONLY_COV_FOR_FOREST = False # Only insert coverage-guidance for the lines that belong to the Forest
FUZZ_ONLY_COV_FOR_CHECK = True # Only insert coverage-guidance for the lines that belong to the objective function
FUZZ_WITHOUT_COVERAGE_GUIDANCE = False # If True, baseline: removes almost all coverage guidance (except TestOneInput)
if FUZZER == 'AFL++' and FUZZ_WITHOUT_COVERAGE_GUIDANCE:
raise ValueError('AFL++ crashes because the fuzzer name cannot be set with the -n (no instrument) option')
############ Objective function settings ############
COMBINE_DISTANCE_AND_PROBABILITY = False # distance = distance + probability
USE_PROBABILITY_STEPS = False # probability steps in the check function ELSE branch
PROBA_SPECIAL_ALWAYS = False
PROBA_SPECIAL_START_STEP = 0.2
PROBA_SPECIAL_STEP_SIZE = 0.01
WRITE_AE_ALWAYS_IN_IF = False # Slower option for the objective function
if USE_PROBABILITY_STEPS and USE_PROBABILITY_STEPS_SPECIAL:
raise ValueError('Select at most one type of probability step')
if WRITE_AE_ALWAYS_IN_IF and WRITE_AE_ONLY_IF_BETTER_OUTSIDE_BRANCHES:
raise ValueError('Only one write_X can be used on the settings')
############ Fuzzer settings ############
NEVER_OPTIMIZE = False
FORCE_ENTROPIC = False # libfuzzer. Experimental. Enables entropic power schedule.
NO_ENTROPIC = False
FOCUS_FUNCTION = "0" # focus_function 0 Experimental. Fuzzing will focus on inputs that trigger calls
# # to this function. If -focus_function=auto and -data_flow_trace is used, libFuzzer will choose the
# focus functions automatically.
if sum([FUZZ_WITHOUT_COVERAGE_GUIDANCE, FUZZ_ONLY_COV_FOR_CHECK, FUZZ_ONLY_COV_FOR_FOREST]) > 1:
raise ValueError('Only one coverage guidance option can be used at the same time')
if NEVER_OPTIMIZE and ALWAYS_OPTIMIZE:
raise ValueError('Conflicting optimize options')
############ AFL settings ############
# TIME_NO_NEW_COV = 10
IS_AE_CHANCE = 0.5 # Because we cannot access the fuzzer logic in the mutator
NUM_CYCLES_IN_LOOP = 1000 # Number of consecutive iterations after which we start with a clean sheet
AFL_USE_DICT = True and not USE_CUSTOM_MUTATOR
AFL_USE_CMP_LOG = False and not USE_CUSTOM_MUTATOR
ENABLE_DETERMINISTIC = False
SKIP_DETERMINISTIC = False
# see docs/power_schedules.md
AFL_SCHEDULE = None # one of fast(default, use None), explore, exploit, seek, rare, mmopt, coe, lin, quad
# AFL generic
AFL_MUTATE_FILENAME = "afl_mutation.cc"
AFL_OUTPUT_DIR = "afl_out"
# AFL++
AFLPP_DICT_PATH = join(os.getcwd(), 'afl_dict')
AFLPP_TEMPLATE_PATH = "templates/aflpp.jinja2"
MUTATE_TEMPLATE_PATH = "templates/mutate.jinja2"
AFLPP_COMPILER_PATH = "afl-clang-lto++"
# AFLPP_COMPILER_PATH = "afl-clang-fast++"
# AFLGo
AFL_GO_COMPILER_PATH = "/home/cas/AFLGo/afl-clang-fast++"
AFL_GO_FUZZ_PATH = "/home/cas/AFLGo/afl-fuzz"
AFL_GO_GEN_DIST_PATH = "/home/cas/AFLGo/scripts/gen_distance_fast.py"
AFL_GO_TARGETS_FILE = 'BBtargets.txt'
AFLGO_TEMPLATE_PATH = "templates/aflgo.jinja2"
############ honggfuzz settings ############
HONG_COMPILER_PATH = "/home/cas/honggfuzz/hfuzz_cc/hfuzz-clang++"
HONG_FUZZER_PATH = "/home/cas/honggfuzz/honggfuzz"
HONG_OUTPUT_DIR = "hongg_out"
############ Mutation settings ############
MINIMIZE_THRESHOLD_LIST = False # Removes all thresholds within 0.0001 from each other
IS_AE_FAKE = False # Fakes the model query if the current input is an AE
USE_WAS_AE = False # Saves the result of the last known model query
STEEP_CURVE = False # If True, square the draw from the gaussian distribution, such that smaller draws are more likely
# feature importance is calculated by its occurrence
FEATURE_IMPORTANCE_BASED_ON_OCCURRENCE = False and USE_FEATURE_IMPORTANCE
MUTATE_LESS_WHEN_CLOSER = False # When True, multiplies mutation with largest diff between fuzzed and victim.
# as splitting threshold in the forest. Cannot be true together with AE_MUTATE_TOWARDS_VICTIM
AE_CHECK_IN_MUTATE = (ALSO_MUTATE_BIGGEST or BIAS_MUTATE_BIG_DIFFS or USE_THRESHOLDS_FOR_MUTATION or
AE_MUTATE_TOWARDS_VICTIM or MUTATE_LESS_WHEN_CLOSER) and FUZZ_ONE_POINT_PER_INSTANCE \
and FUZZER != 'AFL++'
if MUTATE_LESS_WHEN_CLOSER and AE_MUTATE_TOWARDS_VICTIM:
raise ValueError('Mutate less and AE mutate towards original cannot be used together')
############ AE init ############
# k-ANN structure
ANN_TREES = 10 # the amount of trees for the "annoy" lookup
K_ANN = 10 # how many nearest neighbours to find
NO_SEED_INIT = False # When True, each run is only seeded with all-0 features. No input is not possible, because
# The custom mutator would otherwise break.
INITIALIZE_WITH_AE = False # use ANN to seed with K_ANN closest data-points from other classes
INITIALIZE_WITH_AVG_OPPOSITE = False # For binary-classification: seed with average member of the other class
INITIALIZE_WITH_POINT_IN_BETWEEN = INITIALIZE_WITH_POINT_IN_BETWEEN or \
(True and INITIALIZE_WITH_AE)
INITIALIZE_WITH_EXTRA_POINTS_IN_BETWEEN = INITIALIZE_WITH_EXTRA_POINTS_IN_BETWEEN or \
(True and INITIALIZE_WITH_POINT_IN_BETWEEN)
INITIALIZE_WITH_FULL_TRAIN_SET = False # Put all instances of other class from test set in corpus.
if INITIALIZE_WITH_FULL_TRAIN_SET and (INITIALIZE_WITH_AE or DOUBLE_FUZZ_WITH_AE):
raise ValueError('INITIALIZE_WITH_FULL_TRAIN_SET cannot be used with INITIALIZE_WITH_AE or DOUBLE_FUZZ_WITH_AE')
if sum([INITIALIZE_WITH_AE, INITIALIZE_WITH_AVG_OPPOSITE, INITIALIZE_WITH_FULL_TRAIN_SET]) > 1:
raise ValueError('Conflicting initialize options')
############ Testing ############
DEBUG = False # If True, shows output and runs 1 sample with 1 thread only.
MEASURE_COVERAGE = False # Measure coverage through instrumentation, costs exec/s
SKIP_COMPILATION = False
COMPILE_ONLY = False
PRINT_NUMBER_OF_LEAVES = False # Estimate for model size
INVESTIGATE_WITH_SCATTER = False and INVESTIGATE # Shows a scatter plot instead of a line plot when INVESTIGATE
NUM_INVESTIGATE_RUNS = 5 # The number of repetitions for creating plots.
FAILURE_THRES = 0.9 # See FILTER_BAD_AE
SHOW_OUTPUT = False or DEBUG # Shows fuzzer output
CREATE_LOOKUP = False or INITIALIZE_WITH_AE or INITIALIZE_WITH_AVG_OPPOSITE or INVESTIGATE \
or INITIALIZE_WITH_FULL_TRAIN_SET or DOUBLE_FUZZ_WITH_AE
if DEBUG and MEASURE_EXEC_P_S:
raise ValueError('Debug and measure exec/s cannot be used at the same time')
if INVESTIGATE and DOUBLE_FUZZ_WITH_AE:
raise ValueError('Double fuzz together with investigate should not be used.')
NUM_DEBUG = 1
NUM_THREADS = 10 if not DEBUG else NUM_DEBUG # Number of simultaneous fuzzing instances, but is also
# Used for training the ensembles, the MILP attack and the lt-attack (Zhang)
NUM_ADV_SUPER_QUICK = 10 # The number of victims to attack for runs with the -qq flag.
NUM_ADV_QUICK = 50 # The number of victims to attack for runs with the -q flag.
NUM_ADV_CHECKS = 500 if not DEBUG else NUM_DEBUG # number of adversarial victims
MAX_POINTS_LOOKUP = 5000 # The AE lookup will be created over this amount of training samples maximum
DEFAULT_TIME_PER_POINT = 1 # The default fuzzing time per datapoint
MODEL_TYPES = ['RF', 'GB'] # the identifiers of the model types (Random Forest, Gradient Boosting)
DISTANCE_NORMS = ['l_0', 'l_1', 'l_2', 'l_inf']
DISTANCE_NORM = 'l_inf' # the norm to calculate the distance in the fuzzer
if DISTANCE_NORM not in DISTANCE_NORMS:
raise ValueError(f'Norm {DISTANCE_NORM} not recognised, should be one of [{", ".join(DISTANCE_NORMS)}]')
DISTANCE_STEPS = [round(0.005 * i, 3) for i in reversed(range(1, 201))] # [1.0, 0.995, ..., 0.005]
# DISTANCE_STEPS = [round(0.001 * i, 3) for i in reversed(range(1, 1001))] # [1.0, 0.999, ..., 0.001]
# DISTANCE_STEPS = [round(0.01 * i, 2) for i in reversed(range(1, 101))] # [1.0, 0.99, ..., 0.01]
# DISTANCE_STEPS = [round(0.1 * i, 1) for i in reversed(range(1, 11))] # [1.0, 0.99, ..., 0.01]
# DISTANCE_STEPS = [0.8, 0.7, 0.6] \
# + [round(0.01 * i, 2) for i in reversed(range(11, 51))] \
# + [round(0.001 * i, 3) for i in reversed(range(1, 101))] # Decreasing
# DISTANCE_STEPS = [0.8, 0.7] \
# + [round(0.01 * i, 2) for i in reversed(range(25, 70))] \
# + [round(0.001 * i, 3) for i in reversed(range(20, 250))] \
# + [round(0.0001 * i, 4) for i in reversed(range(1, 200))] # Decreasing very small
DISTANCE_STEPS.append(0.000001)
PROBABILITY_STEPS = [round(0.01 * i, 2) for i in reversed(range(1, 51))] # [1.0, 0.99, ..., 0.01]
# PROBABILITY_STEPS = [0.8, 0.7, 0.6] \
# + [round(0.01 * i, 2) for i in reversed(range(1, 51))] # [0.8, 0.7, ..., 0.5, 0.49...]
# PROBABILITY_STEPS = [round(0.5 + 0.05 * i, 2) for i in reversed(range(1, 11))] \
# + [round(0.2 + 0.01 * i, 2) for i in reversed(range(1, 31))] \
# + [round(0.005 * i, 3) for i in reversed(range(1, 41))]
# Directories, all relative to main folder (code)
CHECK_DIR = "python/.CHECK"
IMAGE_DIR = 'python/img'
RESULTS_DIR = "python/.RESULTS"
COVERAGES_DIR = "python/.COVERAGES"
MODEL_DIR = "python/models"
JSON_DIR = join(MODEL_DIR, 'json')
DATA_DIR = "python/data"
LIB_SVM_DIR = join(DATA_DIR, 'libsvm')
OPEN_ML_DIR = join(DATA_DIR, 'openml')
ZHANG_DATA_DIR = join(DATA_DIR, 'zhang')
CORPUS_DIR = ".GENERATED_CORPUS"
ADV_DIR = ".ADVERSARIAL_EXAMPLES"
ZHANG_CONFIG_DIR = ".ZHANG_CONFIGS"
# Files
NUM_FEATURES_PATH = ".num_features"
LIBFUZZER_TEMPLATE_PATH = "templates/libfuzzer.jinja2"
OUTPUT_FILE = "fuzzme.cc"
# WARNING, run with --reload (once for each dataset) after changing these.
DEFAULT_LEARNING_RATE_GB = 0.1
TEST_FRACTION = 0.2
NUM_SAMPLES = 2500 # For synthetic datasets
# Better not change these
SMALL_PERTURBATION_THRESHOLD = 0.00001
THRESHOLD_DIGITS = 7
BYTES_PER_FEATURE = 8 # float = 4, double = 8
TIME_PRECISION = 4
def get_num_adv(): return NUM_ADV_CHECKS
| 56.651639 | 119 | 0.747667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,814 | 0.56529 |
10b3992140f6b71f5bf7a94b467357cdf363478d | 417 | py | Python | tests/test_cli.py | sthagen/python-xmllint_map_html | 23363cfe1c126bc72efddf8fea084283375e2204 | [
"MIT"
] | null | null | null | tests/test_cli.py | sthagen/python-xmllint_map_html | 23363cfe1c126bc72efddf8fea084283375e2204 | [
"MIT"
] | 16 | 2020-09-11T11:07:09.000Z | 2020-12-06T16:42:18.000Z | tests/test_cli.py | sthagen/python-xmllint_map_html | 23363cfe1c126bc72efddf8fea084283375e2204 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import io
import json
import pytest # type: ignore
import xmllint_map_html.cli as cli
import xmllint_map_html.xmllint_map_html as xmh
def test_main_ok_minimal(capsys):
job = ['']
report_expected = ''
assert cli.main(argv=job) == 0
out, err = capsys.readouterr()
assert out.strip() == report_expected.strip()
| 24.529412 | 60 | 0.714628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.242206 |
10b4c550d804f5bcd18eb2df2fb3a4ec068f3834 | 1,668 | py | Python | mat3json.py | tienhaophung/poseval | e78fd221835803895d693de5b30dd7002fd7991c | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | mat3json.py | tienhaophung/poseval | e78fd221835803895d693de5b30dd7002fd7991c | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | mat3json.py | tienhaophung/poseval | e78fd221835803895d693de5b30dd7002fd7991c | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import os
# import json
from scipy.io import loadmat
import argparse
import mat4py
import h5py
import json_tricks as json
parser = argparse.ArgumentParser(description="Convert .mat to .json file")
parser.add_argument("-ddir", "--data_dir", type=str, default="",
help="Data directory of .mat files")
args = parser.parse_args()
def list_dir(data_dir, allowed_extensions=['.mat']):
"""
List files in directory
Args:
data_dir: data directory
allowed_extensions: File extensions were accepted
Returns:
file_paths: list of files
"""
file_paths = []
# anot_paths = []
# List files in data_dir
for root, dirs, files in os.walk(data_dir):
# print(root, dirs, files)
for file in files:
filename, extension = os.path.splitext(file)
if extension in allowed_extensions:
file_paths.append(os.path.join(root, file))
# print(file_paths)
return file_paths
def makedir(path):
try:
os.makedirs(path, exist_ok=True)
print("Directory %s created successfully!" %path)
except OSError:
print("Directory %s failed to create!" %path)
def main(args):
paths = list_dir(args.data_dir)
for path in paths:
print(path)
x = loadmat(path) # load mat file
# x = h5py.File(path, 'r')
# tables.openFile(path)
# x = mat4py.loadmat(path)
print(x["annolist"]["annorect"])
json_fn = os.path.splitext(path)[0] + ".json"
with open(json_fn, 'wt') as json_f:
json.dump(x, json_f)
# break
if __name__ == "__main__":
main(args)
| 26.47619 | 74 | 0.613909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 579 | 0.347122 |
10b59feccd43ac230e025390e6b432e7a9694f66 | 261 | py | Python | easy/calculate_distance.py | Amin-Abouee/code_eval | 8f0d53d2af128d7b57dd3fc8ff16616252f18a52 | [
"MIT"
] | null | null | null | easy/calculate_distance.py | Amin-Abouee/code_eval | 8f0d53d2af128d7b57dd3fc8ff16616252f18a52 | [
"MIT"
] | null | null | null | easy/calculate_distance.py | Amin-Abouee/code_eval | 8f0d53d2af128d7b57dd3fc8ff16616252f18a52 | [
"MIT"
] | null | null | null | import sys
import re
import math
with open(sys.argv[1]) as test_cases:
for test in test_cases:
t = re.findall("[+-]?\d+", test)
nums = [int(x) for x in t]
print int(math.sqrt((nums[0]-nums[2]) * (nums[0]-nums[2]) + (nums[1]-nums[3]) * (nums[1]-nums[3]))) | 32.625 | 101 | 0.613027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.038314 |
10b61e4dc3d48f58d5fda316899ef0e8fdafca8b | 4,606 | py | Python | airflow_provider_kafka/operators/produce_to_topic.py | astronomer/airflow-provider-kafka | c836ff91a5a1cd3a5c1f635a0e3c6602322b579b | [
"Apache-2.0"
] | 3 | 2022-03-11T02:22:41.000Z | 2022-03-25T21:53:10.000Z | airflow_provider_kafka/operators/produce_to_topic.py | astronomer/airflow-provider-kafka | c836ff91a5a1cd3a5c1f635a0e3c6602322b579b | [
"Apache-2.0"
] | 1 | 2022-03-09T11:22:03.000Z | 2022-03-09T11:22:03.000Z | airflow_provider_kafka/operators/produce_to_topic.py | astronomer/airflow-provider-kafka | c836ff91a5a1cd3a5c1f635a0e3c6602322b579b | [
"Apache-2.0"
] | null | null | null | import logging
from functools import partial
from typing import Any, Callable, Dict, Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow_provider_kafka.hooks.producer import KafkaProducerHook
from airflow_provider_kafka.shared_utils import get_callable
local_logger = logging.getLogger("airflow")
def acked(err, msg):
if err is not None:
local_logger.error(f"Failed to deliver message: {err}")
else:
local_logger.info(
f"Produced record to topic {msg.topic()} partition [{msg.partition()}] @ offset {msg.offset()}"
)
class ProduceToTopicOperator(BaseOperator):
"""ProduceToTopicOperator An operator that produces messages to a Kafka topic
:param topic: The topic the producer should produce to, defaults to None
:type topic: str, optional
:param producer_function: The function that generates key/value pairs as messages for production, defaults to None
:type producer_function: Union[str, Callable[..., Any]], optional
:param producer_function_args: Additional arguments to be applied to the producer callable, defaults to None
:type producer_function_args: Optional[Sequence[Any]], optional
:param producer_function_kwargs: Additional keyword arguments to be applied to the producer callable,
defaults to None
:type producer_function_kwargs: Optional[Dict[Any, Any]], optional
:param delivery_callback: The callback to apply after delivery(or failure) of a message, defaults to None
:type delivery_callback: Optional[str], optional
:param kafka_conn_id: The airflow connection to get brokers address from, defaults to None
:type kafka_conn_id: Optional[str], optional
:param synchronous: If writes to kafka should be fully synchronous, defaults to True
:type synchronous: Optional[bool], optional
:param kafka_config: the config dictionary for the kafka client (additional information available on the
confluent-python-kafka documentation), defaults to None
:type kafka_config: Optional[Dict[Any, Any]], optional
:param poll_timeout: How long of a delay should be applied when calling poll after production to kafka,
defaults to 0
:type poll_timeout: float, optional
:raises AirflowException: _description_
"""
def __init__(
self,
topic: str = None,
producer_function: Union[str, Callable[..., Any]] = None,
producer_function_args: Optional[Sequence[Any]] = None,
producer_function_kwargs: Optional[Dict[Any, Any]] = None,
delivery_callback: Optional[str] = None,
kafka_conn_id: Optional[str] = None,
synchronous: Optional[bool] = True,
kafka_config: Optional[Dict[Any, Any]] = None,
poll_timeout: float = 0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if delivery_callback:
dc = get_callable(delivery_callback)
else:
dc = acked
self.kafka_conn_id = kafka_conn_id
self.kafka_config = kafka_config
self.topic = topic
self.producer_function = producer_function or ""
self.producer_function_args = producer_function_args or ()
self.producer_function_kwargs = producer_function_kwargs or {}
self.delivery_callback = dc
self.synchronous = synchronous
self.poll_timeout = poll_timeout
if not (self.topic and self.producer_function):
raise AirflowException(
"topic and producer_function must be provided. Got topic="
+ f"{self.topic} and producer_function={self.producer_function}"
)
return
def execute(self, context) -> Any:
# Get producer and callable
producer = KafkaProducerHook(
kafka_conn_id=self.kafka_conn_id, config=self.kafka_config
).get_producer()
if isinstance(self.producer_function, str):
self.producer_function = get_callable(self.producer_function)
producer_callable = self.producer_function
producer_callable = partial(
producer_callable, *self.producer_function_args, **self.producer_function_kwargs
)
# For each returned k/v in the callable : publish and flush if needed.
for k, v in producer_callable():
producer.produce(self.topic, key=k, value=v, on_delivery=self.delivery_callback)
producer.poll(self.poll_timeout)
if self.synchronous:
producer.flush()
producer.flush()
| 41.125 | 118 | 0.694746 | 3,962 | 0.860182 | 0 | 0 | 0 | 0 | 0 | 0 | 2,015 | 0.437473 |
10b7df52a7109b3cd059e5aa3d9c1aee9eb2218c | 2,171 | py | Python | tests/models/boundary/test_is_boundary_concave_to_y.py | EderVs/Voronoi-Diagrams | 6e69f9b6eb516dee12d66f187cf267a7b527da5f | [
"MIT"
] | 3 | 2021-11-12T17:43:08.000Z | 2022-01-03T02:47:34.000Z | tests/models/boundary/test_is_boundary_concave_to_y.py | EderVs/Voronoi-Diagrams | 6e69f9b6eb516dee12d66f187cf267a7b527da5f | [
"MIT"
] | 3 | 2021-11-19T20:12:31.000Z | 2021-11-19T20:14:39.000Z | tests/models/boundary/test_is_boundary_concave_to_y.py | EderVs/Voronoi-Diagrams | 6e69f9b6eb516dee12d66f187cf267a7b527da5f | [
"MIT"
] | null | null | null | """Test is_boundary_not_x_monotone method in WeightedPointBoundary."""
# Standard
from typing import List, Any
from random import randint
# Models
from voronoi_diagrams.models import (
WeightedSite,
WeightedPointBisector,
WeightedPointBoundary,
)
# Math
from decimal import Decimal
class TestWeightedPointBoundaryIsBoundaryConcaveToY:
"""Test formula."""
def test_with_concave_to_y_boundary(self):
"""Test with a boundary that is concave to y."""
p = WeightedSite(Decimal(-20), Decimal(10), Decimal(2))
# q is the one in the top.
q = WeightedSite(Decimal(-5), Decimal(10), Decimal(7))
bisector = WeightedPointBisector(sites=(p, q))
boundary_plus = WeightedPointBoundary(bisector=bisector, sign=True)
boundary_minus = WeightedPointBoundary(bisector=bisector, sign=False)
assert not boundary_plus.is_boundary_not_x_monotone()
assert boundary_minus.is_boundary_not_x_monotone()
def test_with_normal_boundary(self):
"""Test with a boundary that is not concave to y."""
p = WeightedSite(Decimal(-20), Decimal(10), Decimal(2))
# q is the one in the top.
q = WeightedSite(Decimal(-8), Decimal(18), Decimal(7))
bisector = WeightedPointBisector(sites=(p, q))
boundary_plus = WeightedPointBoundary(bisector=bisector, sign=True)
boundary_minus = WeightedPointBoundary(bisector=bisector, sign=False)
assert not boundary_plus.is_boundary_not_x_monotone()
assert not boundary_minus.is_boundary_not_x_monotone()
def test_with_stopped_boundary(self):
"""Test with a boundary that is not concave to y."""
p = WeightedSite(Decimal(-20), Decimal(10), Decimal(2))
# q is the one in the top.
q = WeightedSite(Decimal(-5), Decimal(15), Decimal(7))
bisector = WeightedPointBisector(sites=(p, q))
boundary_plus = WeightedPointBoundary(bisector=bisector, sign=True)
boundary_minus = WeightedPointBoundary(bisector=bisector, sign=False)
assert not boundary_plus.is_boundary_not_x_monotone()
assert not boundary_minus.is_boundary_not_x_monotone()
| 41.75 | 77 | 0.706587 | 1,872 | 0.862275 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.157992 |
10b999d930f89df35286892beee53b570eddc382 | 1,202 | py | Python | src/core/models/classsification.py | romanovacca/detectioncollection | 98f43ceb4ddd51af39fd6566685a8dc57a1380b4 | [
"MIT"
] | null | null | null | src/core/models/classsification.py | romanovacca/detectioncollection | 98f43ceb4ddd51af39fd6566685a8dc57a1380b4 | [
"MIT"
] | null | null | null | src/core/models/classsification.py | romanovacca/detectioncollection | 98f43ceb4ddd51af39fd6566685a8dc57a1380b4 | [
"MIT"
] | null | null | null | import os
import torchvision
import torchvision.models as models
from src.core.config import config
class Model():
def __init__(self, model=None, classes=None, device=None):
""" Initializes a model that is predefined or manually added.
Most models are taked from Pytorch's torchvision. These model
can be for classification/object detection purposes.
"""
self._determine_model(model)
if classes:
print("true")
else:
self._classes = config["classification"]["resnet_classes"]
self.classes_mapping = {label: index for index, label in enumerate(self._classes)}
def _determine_model(self, model):
available_models = {
"resnet18": models.resnet18(),
"alexnet": models.alexnet(),
"vgg16": models.vgg16()
}
if model in available_models:
self._model = available_models[model]
elif model == None:
self._model = available_models["resnet18"]
else:
raise ValueError("The model that you have chosen is not in the current library.")
#print(f"Model chosen: {self._model.__class__.__name__}")
| 30.820513 | 93 | 0.631448 | 1,097 | 0.912646 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.331115 |
10bb63ae565c938efcb0bb9a6dc0a388e76e7918 | 46,242 | py | Python | flash/core/data/io/input_transform.py | ar90n/lightning-flash | 61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131 | [
"Apache-2.0"
] | null | null | null | flash/core/data/io/input_transform.py | ar90n/lightning-flash | 61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131 | [
"Apache-2.0"
] | null | null | null | flash/core/data/io/input_transform.py | ar90n/lightning-flash | 61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.enums import LightningEnum
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from flash.core.data.callback import ControlFlow
from flash.core.data.transforms import ApplyToKeys
from flash.core.data.utilities.collate import default_collate
from flash.core.data.utils import _INPUT_TRANSFORM_FUNCS, _STAGES_PREFIX
from flash.core.utilities.stages import RunningStage
from flash.core.utilities.types import INPUT_TRANSFORM_TYPE
class InputTransformPlacement(LightningEnum):
PER_SAMPLE_TRANSFORM = "per_sample_transform"
PER_BATCH_TRANSFORM = "per_batch_transform"
COLLATE = "collate"
PER_SAMPLE_TRANSFORM_ON_DEVICE = "per_sample_transform_on_device"
PER_BATCH_TRANSFORM_ON_DEVICE = "per_batch_transform_on_device"
class ApplyToKeyPrefix(LightningEnum):
INPUT = "input"
TARGET = "target"
INVALID_STAGES_FOR_INPUT_TRANSFORMS = [RunningStage.SANITY_CHECKING, RunningStage.TUNING]
# Credit to Torchvision Team:
# https://pytorch.org/vision/stable/_modules/torchvision/transforms/transforms.html#Compose
class Compose:
"""Composes several transforms together.
This transform does not support torchscript.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, x):
for t in self.transforms:
x = t(x)
return x
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += f"{t}"
format_string += "\n)"
return format_string
@dataclass
class _InputTransformPerStage:
collate_in_worker_from_transform: Optional[bool] = None
transforms: Optional[Dict[str, Callable]] = None
@dataclass
class InputTransform:
def __post_init__(self):
self.callbacks: Optional[List] = None
# used to keep track of provided transforms
self._transform: Dict[RunningStage, _InputTransformPerStage] = {}
# For all the stages possible, set/load the transforms.
for stage in RunningStage:
if stage not in INVALID_STAGES_FOR_INPUT_TRANSFORMS:
self._populate_transforms_for_stage(stage)
def current_transform(self, stage: RunningStage, current_fn: str) -> Callable:
if stage in [RunningStage.SANITY_CHECKING, RunningStage.TUNING]:
raise KeyError(
f"Transforms are only defined for stages:"
f"\t{[stage for stage in RunningStage if stage not in INVALID_STAGES_FOR_INPUT_TRANSFORMS]}"
f"But received {stage} instead."
)
# Check is transforms are present and the key is from the Enum defined above.
if InputTransformPlacement.from_str(current_fn) is None:
raise KeyError(
f"{[fn for fn in InputTransformPlacement]} are the only allowed keys to retreive the transform."
f"But received {current_fn} instead."
)
return self._transform[stage].transforms.get(current_fn, self._identity)
########################
# PER SAMPLE TRANSFORM #
########################
def per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on a single sample on cpu for all stages stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_sample_transform(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def input_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each sample on
device for all stages stage."""
pass
def target_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each sample on
device for all stages stage."""
pass
def train_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on a single sample on cpu for the training stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
"""
pass
def train_input_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the training stage."""
pass
def train_target_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the training stage."""
pass
def val_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on a single sample on cpu for the validating stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_sample_transform(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def val_input_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the validating stage."""
pass
def val_target_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the validating stage."""
pass
def test_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on a single sample on cpu for the testing stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
"""
pass
def test_input_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the testing stage."""
pass
def test_target_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the testing stage."""
pass
def predict_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on a single sample on cpu for the predicting stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_sample_transform(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def predict_input_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the predicting stage."""
pass
def predict_target_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the predicting stage."""
pass
def serve_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on a single sample on cpu for the serving stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_sample_transform(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def serve_input_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the serving stage."""
pass
def serve_target_per_sample_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the serving stage."""
pass
##################################
# PER SAMPLE TRANSFORM ON DEVICE #
##################################
def per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a single sample on device for all stages stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_sample_transform_on_device(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def input_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each sample on
device for all stages stage."""
pass
def target_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each sample on
device for all stages stage."""
pass
def train_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a single sample on device for the training stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
"""
pass
def train_input_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the training stage."""
pass
def train_target_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the training stage."""
pass
def val_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a single sample on device for the validating stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_sample_transform_on_device(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def val_input_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the validating stage."""
pass
def val_target_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the validating stage."""
pass
def test_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a single sample on device for the testing stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
"""
pass
def test_input_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the testing stage."""
pass
def test_target_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the testing stage."""
pass
def predict_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a single sample on device for the predicting stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_sample_transform_on_device(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def predict_input_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the predicting stage."""
pass
def predict_target_per_sample_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the predicting stage."""
pass
#######################
# PER BATCH TRANSFORM #
#######################
def per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on a batch of data on cpu for all stages stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_batch_transform(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def input_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of batch on cpu for all
stages stage."""
pass
def target_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of batch on cpu for
all stages stage."""
pass
def train_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on a batch of data on cpu for the training stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
"""
pass
def train_input_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the training stage."""
pass
def train_target_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the training stage."""
pass
def val_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on a batch of data on cpu for the validating stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_batch_transform(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def val_input_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the validating stage."""
pass
def val_target_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the validating stage."""
pass
def test_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on a batch of data on cpu for the testing stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
"""
pass
def test_input_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the testing stage."""
pass
def test_target_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the testing stage."""
pass
def predict_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on a batch of data on cpu for the predicting stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_batch_transform(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def predict_input_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the predicting stage."""
pass
def predict_target_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the predicting stage."""
pass
def serve_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on a batch of data on cpu for the serving stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_batch_transform(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def serve_input_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on cpu for the serving stage."""
pass
def serve_target_per_batch_transform(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on cpu for the serving stage."""
pass
#################################
# PER BATCH TRANSFORM ON DEVICE #
#################################
def per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a batch of data on device for all stages stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_batch_transform_on_device(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def input_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of batch on device for
all stages stage."""
pass
def target_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of batch on device for
all stages stage."""
pass
def train_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a batch of data on device for the training stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
"""
pass
def train_input_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the training stage."""
pass
def train_target_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the training stage."""
pass
def val_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a batch of data on device for the validating stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_batch_transform_on_device(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def val_input_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the validating stage."""
pass
def val_target_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the validating stage."""
pass
def test_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a batch of data on device for the testing stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
"""
pass
def test_input_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the testing stage."""
pass
def test_target_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the testing stage."""
pass
def predict_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on a batch of data on device for the predicting stage.
The input data of the transform would have the following form::
{
DataKeys.INPUT: ...,
DataKeys.TARGET: ...,
DataKeys.METADATA: ...,
}
You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:
.. code-block:: python
from flash.core.data.transforms import ApplyToKeys
class MyInputTransform(InputTransform):
def per_batch_transform_on_device(self) -> Callable:
return ApplyToKeys("input", my_func)
"""
pass
def predict_input_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "input" key of each single sample
on device for the predicting stage."""
pass
def predict_target_per_batch_transform_on_device(self) -> Callable:
"""Defines the transform to be applied on the value associated with the "target" key of each single sample
on device for the predicting stage."""
pass
###########
# COLLATE #
###########
def train_collate(self) -> Callable:
"""Defines the transform to be applied on a list of training sample to create a training batch."""
return default_collate
def val_collate(self) -> Callable:
"""Defines the transform to be applied on a list of validating sample to create a validating batch."""
return default_collate
def test_collate(self) -> Callable:
"""Defines the transform to be applied on a list of testing sample to create a testing batch."""
return default_collate
def predict_collate(self) -> Callable:
"""Defines the transform to be applied on a list of predicting sample to create a predicting batch."""
return default_collate
def serve_collate(self) -> Callable:
"""Defines the transform to be applied on a list of serving sample to create a serving batch."""
return default_collate
def collate(self) -> Callable:
"""Defines the transform to be applied on a list of sample to create a batch for all stages."""
return default_collate
########################################
# HOOKS CALLED INTERNALLY WITHIN FLASH #
########################################
def _per_sample_transform(self, sample: Any, stage: RunningStage) -> Any:
fn = self.current_transform(stage=stage, current_fn="per_sample_transform")
if isinstance(sample, list):
return [fn(s) for s in sample]
return fn(sample)
def _per_batch_transform(self, batch: Any, stage: RunningStage) -> Any:
"""Transforms to apply to a whole batch (if possible use this for efficiency).
.. note:: This option is mutually exclusive with :meth:`per_sample_transform_on_device`, since if both are
specified, uncollation has to be applied.
"""
return self.current_transform(stage=stage, current_fn="per_batch_transform")(batch)
def _collate(self, samples: Sequence, stage: RunningStage) -> Any:
"""Transform to convert a sequence of samples to a collated batch."""
return self.current_transform(stage=stage, current_fn="collate")(samples)
def _per_sample_transform_on_device(self, sample: Any, stage: RunningStage) -> Any:
"""Transforms to apply to the data before the collation (per-sample basis).
.. note:: This option is mutually exclusive with :meth:`per_batch_transform`, since if both are
specified, uncollation has to be applied. .. note:: This function won't be called within the dataloader
workers, since to make that happen each of the workers would have to create it's own CUDA-context which
would pollute GPU memory (if on GPU).
"""
fn = self.current_transform(stage=stage, current_fn="per_sample_transform_on_device")
if isinstance(sample, list):
return [fn(s) for s in sample]
return fn(sample)
def _per_batch_transform_on_device(self, batch: Any, stage: RunningStage) -> Any:
"""Transforms to apply to a whole batch (if possible use this for efficiency).
.. note:: This function won't be called within the dataloader workers, since to make that happen each of
the workers would have to create it's own CUDA-context which would pollute GPU memory (if on GPU).
"""
return self.current_transform(stage=stage, current_fn="per_batch_transform_on_device")(batch)
#############
# UTILITIES #
#############
def inject_collate_fn(self, collate_fn: Callable):
# For all the stages possible, set collate function
if collate_fn is not default_collate:
for stage in RunningStage:
if stage not in [RunningStage.SANITY_CHECKING, RunningStage.TUNING]:
self._transform[stage].transforms[InputTransformPlacement.COLLATE.value] = collate_fn
def _populate_transforms_for_stage(self, running_stage: RunningStage):
transform, collate_in_worker = self.__check_transforms(
transform=self.__resolve_transforms(running_stage), stage=running_stage
)
if self._transform is None:
self._transform = {}
self._transform[running_stage] = _InputTransformPerStage(
collate_in_worker_from_transform=collate_in_worker,
transforms=transform,
)
def __resolve_transforms(self, running_stage: RunningStage) -> Optional[Dict[str, Callable]]:
from flash.core.data.data_pipeline import DataPipeline
transforms_out = {}
stage = _STAGES_PREFIX[running_stage]
# iterate over all transforms hook name
for transform_name in InputTransformPlacement:
transforms = {}
transform_name = transform_name.value
# iterate over all prefixes
for key in ApplyToKeyPrefix:
# get the resolved hook name based on the current stage
resolved_name = DataPipeline._resolve_function_hierarchy(
transform_name, self, running_stage, InputTransform
)
# check if the hook name is specialized
is_specialized_name = resolved_name.startswith(stage)
# get the resolved hook name for apply to key on the current stage
resolved_apply_to_key_name = DataPipeline._resolve_function_hierarchy(
f"{key}_{transform_name}", self, running_stage, InputTransform
)
# check if resolved hook name for apply to key is specialized
is_specialized_apply_to_key_name = resolved_apply_to_key_name.startswith(stage)
# check if they are overridden by the user
resolve_name_overridden = DataPipeline._is_overridden(resolved_name, self, InputTransform)
resolved_apply_to_key_name_overridden = DataPipeline._is_overridden(
resolved_apply_to_key_name, self, InputTransform
)
if resolve_name_overridden and resolved_apply_to_key_name_overridden:
# if both are specialized or both aren't specialized, raise a exception
# It means there is priority to specialize hooks name.
if not (is_specialized_name ^ is_specialized_apply_to_key_name):
raise MisconfigurationException(
f"Only one of {resolved_name} or {resolved_apply_to_key_name} can be overridden."
)
method_name = resolved_name if is_specialized_name else resolved_apply_to_key_name
else:
method_name = resolved_apply_to_key_name if resolved_apply_to_key_name_overridden else resolved_name
# get associated transform
try:
fn = getattr(self, method_name)()
except AttributeError as e:
raise AttributeError(str(e) + ". Hint: Call super().__init__(...) after setting all attributes.")
if fn is None:
continue
if not callable(fn):
raise MisconfigurationException(f"The hook {method_name} should return a function.")
# wrap apply to key hook into `ApplyToKeys` with the associated key.
if method_name == resolved_apply_to_key_name:
fn = ApplyToKeys(key.value, fn)
if method_name not in transforms:
transforms[method_name] = fn
# store the transforms.
if transforms:
transforms = list(transforms.values())
transforms_out[transform_name] = Compose(transforms) if len(transforms) > 1 else transforms[0]
return transforms_out
def __check_transforms(
self, transform: Optional[Dict[str, Callable]], stage: RunningStage
) -> Tuple[Optional[Dict[str, Callable]], Optional[bool]]:
if transform is None:
return transform
keys_diff = set(transform.keys()).difference([v.value for v in InputTransformPlacement])
if len(keys_diff) > 0:
raise MisconfigurationException(
f"{stage}_transform contains {keys_diff}. Only {_INPUT_TRANSFORM_FUNCS} keys are supported."
)
is_per_batch_transform_in = "per_batch_transform" in transform
is_per_sample_transform_on_device_in = "per_sample_transform_on_device" in transform
if is_per_batch_transform_in and is_per_sample_transform_on_device_in:
raise MisconfigurationException(
f"{transform}: `per_batch_transform` and `per_sample_transform_on_device` are mutually exclusive."
)
collate_in_worker: Optional[bool] = None
if is_per_batch_transform_in or (not is_per_batch_transform_in and not is_per_sample_transform_on_device_in):
collate_in_worker = True
elif is_per_sample_transform_on_device_in:
collate_in_worker = False
return transform, collate_in_worker
@staticmethod
def _identity(x: Any) -> Any:
return x
def __str__(self) -> str:
return f"{self.__class__.__name__}(" + f"running_stage={self.running_stage}, transform={self._transform})"
def __getitem__(self, placement: InputTransformPlacement) -> Callable:
return self._transform[placement]
@dataclass
class LambdaInputTransform(InputTransform):
transform: Callable = InputTransform._identity
def per_sample_transform(self) -> Callable:
return self.transform
def create_or_configure_input_transform(
transform: INPUT_TRANSFORM_TYPE,
transform_kwargs: Optional[Dict] = None,
) -> Optional[InputTransform]:
if not transform_kwargs:
transform_kwargs = {}
if isinstance(transform, InputTransform):
return transform
if inspect.isclass(transform) and issubclass(transform, InputTransform):
# Deprecation Warning
rank_zero_warn(
"Please pass an instantiated object of the `InputTransform` class. Passing the Class and keyword arguments"
" separately has been deprecated since v0.8.0 and will be removed in v0.9.0.",
stacklevel=8,
category=FutureWarning,
)
return transform(**transform_kwargs)
if isinstance(transform, partial):
return transform(**transform_kwargs)
if isinstance(transform, Callable):
return LambdaInputTransform(
transform=transform,
**transform_kwargs,
)
if not transform:
return None
raise MisconfigurationException(f"The format for the transform isn't correct. Found {transform}")
class _InputTransformProcessor:
"""
This class is used to encapsulate the following functions of an `InputTransform` Object:
Inside a worker:
per_sample_transform: Function to transform an individual sample
collate: Function to merge sample into a batch
per_batch_transform: Function to transform an individual batch
Inside main process:
per_sample_transform_on_device: Function to transform an individual sample
collate: Function to merge sample into a batch
per_batch_transform_on_device: Function to transform an individual batch
"""
def __init__(
self,
input_transform: InputTransform,
collate_fn: Callable,
per_sample_transform: Callable,
per_batch_transform: Callable,
stage: RunningStage,
apply_per_sample_transform: bool = True,
on_device: bool = False,
):
super().__init__()
self.input_transform = input_transform
self.callback = ControlFlow(self.input_transform.callbacks or [])
self.collate_fn = collate_fn
self.per_sample_transform = per_sample_transform
self.per_batch_transform = per_batch_transform
self.apply_per_sample_transform = apply_per_sample_transform
self.stage = stage
self.on_device = on_device
def __call__(self, samples: Sequence[Any]) -> Any:
if not self.on_device:
for sample in samples:
self.callback.on_load_sample(sample, self.stage)
if self.apply_per_sample_transform:
if not isinstance(samples, list):
list_samples = [samples]
else:
list_samples = samples
transformed_samples = [self.per_sample_transform(sample, self.stage) for sample in list_samples]
for sample in transformed_samples:
if self.on_device:
self.callback.on_per_sample_transform_on_device(sample, self.stage)
else:
self.callback.on_per_sample_transform(sample, self.stage)
collated_samples = self.collate_fn(transformed_samples, self.stage)
self.callback.on_collate(collated_samples, self.stage)
else:
collated_samples = samples
transformed_collated_samples = self.per_batch_transform(collated_samples, self.stage)
if self.on_device:
self.callback.on_per_batch_transform_on_device(transformed_collated_samples, self.stage)
else:
self.callback.on_per_batch_transform(transformed_collated_samples, self.stage)
return transformed_collated_samples
def __str__(self) -> str:
# todo: define repr function which would take object and string attributes to be shown
return (
"_InputTransformProcessor:\n"
f"\t(per_sample_transform): {str(self.per_sample_transform)}\n"
f"\t(collate_fn): {str(self.collate_fn)}\n"
f"\t(per_batch_transform): {str(self.per_batch_transform)}\n"
f"\t(apply_per_sample_transform): {str(self.apply_per_sample_transform)}\n"
f"\t(on_device): {str(self.on_device)}\n"
f"\t(stage): {str(self.stage)}"
)
def __make_collates(input_transform: InputTransform, on_device: bool, collate: Callable) -> Tuple[Callable, Callable]:
"""Returns the appropriate collate functions based on whether the transforms happen in a DataLoader worker or
on the device (main process)."""
if on_device:
return input_transform._identity, collate
return collate, input_transform._identity
def __configure_worker_and_device_collate_fn(
running_stage: RunningStage, input_transform: InputTransform
) -> Tuple[Callable, Callable]:
from flash.core.data.data_pipeline import DataPipeline
prefix: str = _STAGES_PREFIX[running_stage]
transform_for_stage: _InputTransformPerStage = input_transform._transform[running_stage]
per_batch_transform_overridden: bool = DataPipeline._is_overridden_recursive(
"per_batch_transform", input_transform, InputTransform, prefix=prefix
)
per_sample_transform_on_device_overridden: bool = DataPipeline._is_overridden_recursive(
"per_sample_transform_on_device", input_transform, InputTransform, prefix=prefix
)
is_per_overridden = per_batch_transform_overridden and per_sample_transform_on_device_overridden
if transform_for_stage.collate_in_worker_from_transform is None and is_per_overridden:
raise MisconfigurationException(
f"{input_transform.__class__.__name__}: `per_batch_transform` and `per_sample_transform_on_device` "
f"are mutually exclusive for stage {running_stage}"
)
if isinstance(transform_for_stage.collate_in_worker_from_transform, bool):
worker_collate_fn, device_collate_fn = __make_collates(
input_transform, not transform_for_stage.collate_in_worker_from_transform, input_transform._collate
)
else:
worker_collate_fn, device_collate_fn = __make_collates(
input_transform, per_sample_transform_on_device_overridden, input_transform._collate
)
worker_collate_fn = (
worker_collate_fn.collate_fn if isinstance(worker_collate_fn, _InputTransformProcessor) else worker_collate_fn
)
return worker_collate_fn, device_collate_fn
def create_worker_input_transform_processor(
running_stage: RunningStage, input_transform: InputTransform
) -> _InputTransformProcessor:
"""This utility is used to create the 2 `_InputTransformProcessor` objects which contain the transforms used as
the DataLoader `collate_fn`."""
worker_collate_fn, _ = __configure_worker_and_device_collate_fn(
running_stage=running_stage, input_transform=input_transform
)
worker_input_transform_processor = _InputTransformProcessor(
input_transform,
worker_collate_fn,
input_transform._per_sample_transform,
input_transform._per_batch_transform,
running_stage,
)
return worker_input_transform_processor
def create_device_input_transform_processor(
running_stage: RunningStage, input_transform: InputTransform
) -> _InputTransformProcessor:
"""This utility is used to create a `_InputTransformProcessor` object which contain the transforms used as the
DataModule `on_after_batch_transfer` hook."""
_, device_collate_fn = __configure_worker_and_device_collate_fn(
running_stage=running_stage, input_transform=input_transform
)
device_input_transform_processor = _InputTransformProcessor(
input_transform,
device_collate_fn,
input_transform._per_sample_transform_on_device,
input_transform._per_batch_transform_on_device,
running_stage,
apply_per_sample_transform=device_collate_fn != input_transform._identity,
on_device=True,
)
return device_input_transform_processor
| 38.09061 | 120 | 0.648674 | 39,821 | 0.861144 | 0 | 0 | 35,682 | 0.771636 | 0 | 0 | 25,446 | 0.550279 |
10bc0a151128639eac20d774f3975e3ffce78b0c | 1,768 | py | Python | scripts/ranger/system_requests.py | scil/ansible-ambari-manager | ec6e8406e0475dac8627d2539bf2363c98ace399 | [
"Apache-2.0"
] | 10 | 2017-08-21T09:45:42.000Z | 2020-10-02T12:13:55.000Z | scripts/ranger/system_requests.py | scil/ansible-ambari-manager | ec6e8406e0475dac8627d2539bf2363c98ace399 | [
"Apache-2.0"
] | 1 | 2018-03-29T11:14:45.000Z | 2018-03-29T11:14:45.000Z | scripts/ranger/system_requests.py | scil/ansible-ambari-manager | ec6e8406e0475dac8627d2539bf2363c98ace399 | [
"Apache-2.0"
] | 5 | 2017-11-23T11:33:18.000Z | 2019-12-16T09:59:12.000Z | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import pwd
import subprocess
import sys
from audit_logger import AuditLogger
class SystemRequests:
def __init__(self, type):
AuditLogger.initialize_logger(type)
AuditLogger.info("Initializing Logger for Ranger Audits [" + type + "]")
def execute_command(self, command, user, max_log_length=sys.maxint):
env = os.environ.copy()
subprocess_command = ["su", user, "-l", "-s", "/bin/bash", "-c", command]
log_subprocess_command = " ".join(subprocess_command)
log_subprocess_command = log_subprocess_command if len(log_subprocess_command) < max_log_length else log_subprocess_command[:max_log_length] + '...'
AuditLogger.info("RUNNING COMMAND: " + log_subprocess_command)
proc = subprocess.Popen(subprocess_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, cwd=None, env=env)
result = proc.communicate()
proc_stdout = result[0]
proc_returncode = proc.returncode
return proc_returncode, proc_stdout | 40.181818 | 152 | 0.765837 | 892 | 0.504525 | 0 | 0 | 0 | 0 | 0 | 0 | 882 | 0.498869 |
10bd99ec8ccedb03c569281fb82814ef2e18a1af | 44 | py | Python | europython-2018/code/simple_bind/run.py | svenstaro/talks | 0462268a8c684dde65aceb2fb98644cb655c5013 | [
"MIT"
] | 5 | 2018-07-26T10:45:41.000Z | 2020-08-16T17:45:51.000Z | europython-2018/code/simple_bind/run.py | svenstaro/talks | 0462268a8c684dde65aceb2fb98644cb655c5013 | [
"MIT"
] | null | null | null | europython-2018/code/simple_bind/run.py | svenstaro/talks | 0462268a8c684dde65aceb2fb98644cb655c5013 | [
"MIT"
] | 1 | 2020-10-02T22:09:15.000Z | 2020-10-02T22:09:15.000Z | from europython import hello
hello("Alisa")
| 14.666667 | 28 | 0.795455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.159091 |
10bdd4d3723e55e8ab379a60437ccd0b2d7f972d | 8,587 | py | Python | describe.py | Antip003/logistic_regression | 5b80e56386d377c6d6c2d0d16dc7b31992b30ad8 | [
"MIT"
] | null | null | null | describe.py | Antip003/logistic_regression | 5b80e56386d377c6d6c2d0d16dc7b31992b30ad8 | [
"MIT"
] | null | null | null | describe.py | Antip003/logistic_regression | 5b80e56386d377c6d6c2d0d16dc7b31992b30ad8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import csv
import datetime
import math
from tabulate import tabulate
import scipy.stats as st
from tqdm import tqdm
import numpy as np
np.seterr(all='ignore')
def isfloat(val):
try:
val = float(val)
if math.isnan(val):
return False
return True
except:
return False
class Describe:
def __init__(self, filename):
self.filename = filename
self.content = []
self.listed = {}
self.mean = {}
self.count = {}
self.columns = []
self.min = {}
self.max = {}
self.std = {}
self.Q25 = {}
self.Q50 = {}
self.Q75 = {}
self.iqr = {}
self.range = {}
self.best_dist = {}
self.dist_params = {}
self.dist_pval = {}
def ReadFile(self):
with open(self.filename, 'r') as file:
coco = csv.DictReader(file)
for row in coco:
del row['Index']
newrow = {}
for k, v in row.items():
if isfloat(v):
newrow[k] = float(v)
if k not in self.listed.keys():
self.listed[k] = [float(v)]
else:
self.listed[k] += [float(v)]
elif k == 'Birthday':
split = v.split('-')
year, month, day = int(split[0]), int(split[1]), int(split[2])
newrow[k] = datetime.datetime(year, month, day, 0, 0).timestamp()
if k not in self.listed.keys():
self.listed[k] = [newrow[k]]
else:
self.listed[k] += [newrow[k]]
self.content += [newrow]
def FilterNumerics(self):
for k, v in self.content[0].items():
try:
float(v)
self.columns += [k]
self.mean[k] = 0
self.count[k] = 0
self.std[k] = 0
self.min[k] = 0
self.max[k] = 0
except:
pass
def GetCount(self):
for x in self.content:
for k, v in x.items():
self.count[k] += 1
def GetMean(self):
for x in self.content:
for k, v in x.items():
self.mean[k] += v / self.count[k]
def GetStd(self):
for x in self.content:
for k, v in x.items():
self.std[k] += (v - self.mean[k]) ** 2 / self.count[k]
for k, v in self.std.items():
self.std[k] = math.sqrt(self.std[k])
def GetQMinMax(self):
for k in self.listed.keys():
self.listed[k] = sorted(self.listed[k])
if self.listed[k] != []:
self.min[k] = self.listed[k][0]
self.max[k] = self.listed[k][-1]
self.range[k] = self.max[k] - self.min[k]
else:
continue
L25 = (self.count[k] + 1) * 0.25
L50 = (self.count[k] + 1) * 0.5
L75 = (self.count[k] + 1) * 0.75
try:
P25 = self.listed[k][int(L25)] + (L25 - int(L25)) * (self.listed[k][int(L25) + 1] - self.listed[k][int(L25)])
P50 = self.listed[k][int(L50)] + (L50 - int(L50)) * (self.listed[k][int(L50) + 1] - self.listed[k][int(L25)])
P75 = self.listed[k][int(L75)] + (L75 - int(L75)) * (self.listed[k][int(L75) + 1] - self.listed[k][int(L25)])
except:
P25 = self.listed[k][0]
P50 = self.listed[k][0]
P75 = self.listed[k][0]
self.Q25[k] = P25
self.Q50[k] = P50
self.Q75[k] = P75
self.iqr[k] = P75 - P25
def get_best_distribution(self):
dist_names = ["norm", "exponweib", "weibull_max", "weibull_min", "pareto", "genextreme"]
dist_results = []
params = {}
with tqdm(total=len(self.listed.keys()) * len(dist_names)) as tq:
for k in self.listed.keys():
for dist_name in dist_names:
dist = getattr(st, dist_name)
param = dist.fit(self.listed[k])
params[dist_name] = param
# Applying the Kolmogorov-Smirnov test
D, p = st.kstest(self.listed[k], dist_name, args=param)
dist_results.append((dist_name, p))
tq.update(1)
# select the best fitted distribution
best_dist, best_p = (max(dist_results, key=lambda item: item[1]))
self.best_dist[k] = best_dist
self.dist_params[k] = params[dist_name]
self.dist_pval[k] = best_p
def Describe(self):
self.GetCount()
self.GetMean()
self.GetStd()
self.GetQMinMax()
if len(sys.argv) > 2 and sys.argv[2] == "-dist":
self.get_best_distribution()
def Print(self):
self.columns = sorted(self.columns)
if len(sys.argv) > 2 and sys.argv[2] == "-dist":
i = 0
for k, v in self.best_dist.items():
self.columns[i] += '\n(' + v + ')'
i += 1
self.mean = {k: v for k, v in sorted(self.mean.items(), key=lambda item: item[0])}
self.count = {k: v for k, v in sorted(self.count.items(), key=lambda item: item[0])}
self.min = {k: v for k, v in sorted(self.min.items(), key=lambda item: item[0])}
self.max = {k: v for k, v in sorted(self.max.items(), key=lambda item: item[0])}
self.std = {k: v for k, v in sorted(self.std.items(), key=lambda item: item[0])}
self.Q25 = {k: v for k, v in sorted(self.Q25.items(), key=lambda item: item[0])}
self.Q50 = {k: v for k, v in sorted(self.Q50.items(), key=lambda item: item[0])}
self.Q75 = {k: v for k, v in sorted(self.Q75.items(), key=lambda item: item[0])}
self.iqr = {k: v for k, v in sorted(self.iqr.items(), key=lambda item: item[0])}
self.range = {k: v for k, v in sorted(self.range.items(), key=lambda item: item[0])}
self.best_dist = {k: v for k, v in sorted(self.best_dist.items(), key=lambda item: item[0])}
columns = [''] + self.columns
print(tabulate([
['Count'] + list(self.count.values()),
['Mean'] + list(self.mean.values()),
['Std'] + list(self.std.values()),
['Min'] + list(self.min.values()),
['25%'] + list(self.Q25.values()),
['50%'] + list(self.Q50.values()),
['75%'] + list(self.Q75.values()),
['Max'] + list(self.max.values()),
['IQR'] + list(self.iqr.values()),
['Range'] + list(self.range.values())], headers=columns, tablefmt='plain', floatfmt=".6f"))
#print(tabulate([
# ['Distribution'] + list(self.best_dist.values())], headers=columns, tablefmt='plain', floatfmt=".6f"))
def ConvertBirthday(self):
start = datetime.datetime.fromtimestamp(0)
self.mean['Birthday'] = datetime.datetime.fromtimestamp(self.mean['Birthday']).strftime('%Y-%m-%d')
self.std['Birthday'] = str((datetime.datetime.fromtimestamp(self.std['Birthday']) - start).days) + '(d)'
self.min['Birthday'] = datetime.datetime.fromtimestamp(self.min['Birthday']).strftime('%Y-%m-%d')
self.max['Birthday'] = datetime.datetime.fromtimestamp(self.max['Birthday']).strftime('%Y-%m-%d')
self.Q25['Birthday'] = datetime.datetime.fromtimestamp(self.Q25['Birthday']).strftime('%Y-%m-%d')
self.Q50['Birthday'] = datetime.datetime.fromtimestamp(self.Q50['Birthday']).strftime('%Y-%m-%d')
self.Q75['Birthday'] = datetime.datetime.fromtimestamp(self.Q75['Birthday']).strftime('%Y-%m-%d')
self.iqr['Birthday'] = str((datetime.datetime.fromtimestamp(self.iqr['Birthday']) - start).days) + '(d)'
self.range['Birthday'] = str((datetime.datetime.fromtimestamp(self.range['Birthday']) - start).days) + '(d)'
pass
def __call__(self):
self.ReadFile()
self.FilterNumerics()
self.Describe()
self.ConvertBirthday()
self.Print()
def main():
best_class = Describe(sys.argv[1])
best_class()
def CheckArgs():
if len(sys.argv) < 2:
print(f"Usage: {__file__} <dataset_name.csv> <flags>")
exit()
if __name__ == '__main__':
CheckArgs()
main()
| 37.17316 | 125 | 0.501572 | 7,977 | 0.928962 | 0 | 0 | 0 | 0 | 0 | 0 | 718 | 0.083615 |
10be6da56035fd9ae609938d9ac287a00e7f04d6 | 1,446 | py | Python | package.py | wolfogre/notify-github-release | d39b53b78438e242209e745b34d6615ae09875dc | [
"MIT"
] | 1 | 2018-08-12T09:16:23.000Z | 2018-08-12T09:16:23.000Z | package.py | wolfogre/notify-github-release | d39b53b78438e242209e745b34d6615ae09875dc | [
"MIT"
] | 1 | 2018-07-11T05:39:42.000Z | 2018-07-11T05:39:42.000Z | package.py | wolfogre/notify-github-release | d39b53b78438e242209e745b34d6615ae09875dc | [
"MIT"
] | null | null | null | import os
import shutil
from modulefinder import ModuleFinder
def main():
temp_dir = "package_temp"
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.makedirs(temp_dir)
for py in ["index.py", "notifier.py"]:
src, dst = py, os.path.join(temp_dir, py)
print("copy '%s' to '%s'" % (src, dst))
shutil.copy(src, dst)
print("analysing modules ...")
finder = ModuleFinder()
finder.run_script("index.py")
module_paths = set()
for name, mod in finder.modules.items():
if mod.__path__ and "site-packages" in mod.__path__[0]:
path = mod.__path__[0]
while os.path.basename(os.path.dirname(path)) != "site-packages":
path = os.path.dirname(path)
if path not in module_paths:
src, dst = path, os.path.join(temp_dir, os.path.basename(path))
print("copy '%s' from '%s' to '%s'" % (name, src, dst))
shutil.copytree(src, dst, ignore=shutil.ignore_patterns("__pycache__", "*.pyc"))
module_paths.add(path)
zip_file = "notify-github-release"
print("zipping %s to %s.zip ..." % (temp_dir, zip_file))
if os.path.exists(zip_file + ".zip"):
os.remove(zip_file + ".zip")
shutil.make_archive(zip_file, 'zip', temp_dir)
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
print("done")
if __name__ == '__main__':
main()
| 30.125 | 96 | 0.591286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.172891 |
529b7c74855c1ebbaf382f7acad7feee7b497ce2 | 894 | py | Python | dataset_loaders/test/data_augmentation/_verify_flip_axis.py | dendisuhubdy/fc-drn | 10656d254b450539b50c7c9e38b5dde7ace7c2c3 | [
"MIT"
] | 9 | 2018-08-02T06:50:47.000Z | 2021-08-09T06:55:42.000Z | dataset_loaders/test/data_augmentation/_verify_flip_axis.py | dendisuhubdy/fc-drn | 10656d254b450539b50c7c9e38b5dde7ace7c2c3 | [
"MIT"
] | null | null | null | dataset_loaders/test/data_augmentation/_verify_flip_axis.py | dendisuhubdy/fc-drn | 10656d254b450539b50c7c9e38b5dde7ace7c2c3 | [
"MIT"
] | 5 | 2019-01-22T13:02:25.000Z | 2020-04-07T12:15:03.000Z | import numpy as np
def flip_axis(x_in, axis):
x_out = np.zeros(x_in.shape, dtype=x_in.dtype)
for i, x in enumerate(x_in):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x_out[i] = x.swapaxes(0, axis)
return x_out
def flip_axis_fra(x, flipping_axis):
pattern = [flipping_axis]
pattern += [el for el in range(x.ndim) if el != flipping_axis]
inv_pattern = [pattern.index(el) for el in range(x.ndim)]
x = x.transpose(pattern) # "flipping_axis" first
x = x[::-1, ...]
x = x.transpose(inv_pattern)
return x
if __name__ == '__main__':
aa = np.random.random((10, 2, 3, 4)) # b, *, *, *
for axis in [1, 2, 3]:
print('Testing channel in axis {}'.format(axis))
mm = flip_axis(aa.copy(), axis-1)
ff = flip_axis_fra(aa.copy(), axis)
assert np.array_equal(mm, ff)
print('Test passed!')
| 28.83871 | 66 | 0.585011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.097315 |
529d02901aebe50198c7119ce11dba7ba389558e | 6,932 | py | Python | maya/Tests/joint_test.py | ryu-sw/alembic | 395450bad88f9d5ed6d20612e9201aac93a5eb54 | [
"MIT"
] | 921 | 2015-01-03T11:04:38.000Z | 2022-03-29T06:38:34.000Z | maya/Tests/joint_test.py | ryu-sw/alembic | 395450bad88f9d5ed6d20612e9201aac93a5eb54 | [
"MIT"
] | 264 | 2015-01-05T17:15:45.000Z | 2022-03-28T20:14:51.000Z | maya/Tests/joint_test.py | ryu-sw/alembic | 395450bad88f9d5ed6d20612e9201aac93a5eb54 | [
"MIT"
] | 276 | 2015-01-12T01:34:20.000Z | 2022-03-08T09:19:42.000Z | ##-*****************************************************************************
##
## Copyright (c) 2009-2011,
## Sony Pictures Imageworks, Inc. and
## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following disclaimer
## in the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Sony Pictures Imageworks, nor
## Industrial Light & Magic nor the names of their contributors may be used
## to endorse or promote products derived from this software without specific
## prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##-*****************************************************************************
from maya import cmds as MayaCmds
import maya.OpenMaya as OpenMaya
import os
import subprocess
import unittest
import util
def createJoints():
name = MayaCmds.joint(position=(0, 0, 0))
MayaCmds.rotate(33.694356, 4.000428, 61.426019, r=True, ws=True)
MayaCmds.joint(position=(0, 4, 0), orientation=(0.0, 45.0, 90.0))
MayaCmds.rotate(62.153171, 0.0, 0.0, r=True, os=True)
MayaCmds.joint(position=(0, 8, -1), orientation=(90.0, 0.0, 0.0))
MayaCmds.rotate(70.245162, -33.242019, 41.673097, r=True, os=True)
MayaCmds.joint(position=(0, 12, 3))
MayaCmds.rotate(0.0, 0.0, -58.973851, r=True, os=True)
return name
class JointTest(unittest.TestCase):
def setUp(self):
MayaCmds.file(new=True, force=True)
self.__files = []
self.__abcStitcher = [os.environ['AbcStitcher']]
def tearDown(self):
for f in self.__files:
os.remove(f)
def testStaticJointRW(self):
name = createJoints()
# write to file
self.__files.append(util.expandFileName('testStaticJoints.abc'))
MayaCmds.AbcExport(j='-root %s -file %s' % (name, self.__files[-1]))
MayaCmds.select(name)
MayaCmds.group(name='original')
# read from file
MayaCmds.AbcImport(self.__files[-1], mode='import')
# make sure the translate and rotation are the same
nodes1 = ["|original|joint1", "|original|joint1|joint2", "|original|joint1|joint2|joint3", "|original|joint1|joint2|joint3|joint4"]
nodes2 = ["|joint1", "|joint1|joint2", "|joint1|joint2|joint3", "|joint1|joint2|joint3|joint4"]
for i in range(0, 4):
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tx'), MayaCmds.getAttr(nodes2[i]+'.tx'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.ty'), MayaCmds.getAttr(nodes2[i]+'.ty'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tz'), MayaCmds.getAttr(nodes2[i]+'.tz'), 4)
def testStaticIKRW(self):
name = createJoints()
MayaCmds.ikHandle(sj=name, ee='joint4')
MayaCmds.move(-1.040057, -7.278225, 6.498725, r=True)
# write to file
self.__files.append(util.expandFileName('testStaticIK.abc'))
MayaCmds.AbcExport(j='-root %s -f %s' % (name, self.__files[-1]))
MayaCmds.select(name)
MayaCmds.group(name='original')
# read from file
MayaCmds.AbcImport(self.__files[-1], mode='import')
# make sure the translate and rotation are the same
nodes1 = ["|original|joint1", "|original|joint1|joint2", "|original|joint1|joint2|joint3", "|original|joint1|joint2|joint3|joint4"]
nodes2 = ["|joint1", "|joint1|joint2", "|joint1|joint2|joint3", "|joint1|joint2|joint3|joint4"]
for i in range(0, 4):
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tx'), MayaCmds.getAttr(nodes2[i]+'.tx'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.ty'), MayaCmds.getAttr(nodes2[i]+'.ty'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tz'), MayaCmds.getAttr(nodes2[i]+'.tz'), 4)
def testAnimIKRW(self):
name = createJoints()
handleName = MayaCmds.ikHandle(sj=name, ee='joint4')[0]
MayaCmds.currentTime(1, update=True)
MayaCmds.setKeyframe(handleName, breakdown=0, hierarchy='none', controlPoints=False, shape=False)
MayaCmds.currentTime(16, update=True)
MayaCmds.move(-1.040057, -7.278225, 6.498725, r=True)
MayaCmds.setKeyframe(handleName, breakdown=0, hierarchy='none', controlPoints=False, shape=False)
self.__files.append(util.expandFileName('testAnimIKRW.abc'))
self.__files.append(util.expandFileName('testAnimIKRW01_08.abc'))
self.__files.append(util.expandFileName('testAnimIKRW09-16.abc'))
# write to files
MayaCmds.AbcExport(j='-fr 1 8 -root %s -f %s' % (name, self.__files[-2]))
MayaCmds.AbcExport(j='-fr 9 16 -root %s -f %s' % (name, self.__files[-1]))
MayaCmds.select(name)
MayaCmds.group(name='original')
subprocess.call(self.__abcStitcher + self.__files[-3:])
# read from file
MayaCmds.AbcImport(self.__files[-3], mode='import')
# make sure the translate and rotation are the same
nodes1 = ["|original|joint1", "|original|joint1|joint2", "|original|joint1|joint2|joint3", "|original|joint1|joint2|joint3|joint4"]
nodes2 = ["|joint1", "|joint1|joint2", "|joint1|joint2|joint3", "|joint1|joint2|joint3|joint4"]
for t in range(1, 25):
MayaCmds.currentTime(t, update=True)
for i in range(0, 4):
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tx'), MayaCmds.getAttr(nodes2[i]+'.tx'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.ty'), MayaCmds.getAttr(nodes2[i]+'.ty'), 4)
self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tz'), MayaCmds.getAttr(nodes2[i]+'.tz'), 4)
| 46.837838 | 139 | 0.657819 | 4,386 | 0.632718 | 0 | 0 | 0 | 0 | 0 | 0 | 3,056 | 0.440854 |
529d121f9adcfecd640a8c4cf37ab1034c262fe9 | 3,625 | py | Python | airflow/contrib/hooks/gcp_pubsub_hook.py | diggzhang/airflow-dingit | 41482b83130d5815b772840681fb36eb9bfa69b9 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6 | 2016-04-20T20:40:43.000Z | 2022-02-20T10:32:00.000Z | airflow/contrib/hooks/gcp_pubsub_hook.py | diggzhang/airflow-dingit | 41482b83130d5815b772840681fb36eb9bfa69b9 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 13 | 2018-11-30T18:18:32.000Z | 2021-02-19T17:04:12.000Z | airflow/contrib/hooks/gcp_pubsub_hook.py | diggzhang/airflow-dingit | 41482b83130d5815b772840681fb36eb9bfa69b9 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 8 | 2016-04-13T21:22:46.000Z | 2020-07-31T18:31:59.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from apiclient.discovery import build
from apiclient import errors
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
def _format_topic(project, topic):
return 'projects/%s/topics/%s' % (project, topic)
class PubSubHook(GoogleCloudBaseHook):
"""Hook for accessing Google Pub/Sub.
The GCP project against which actions are applied is determined by
the project embedded in the Connection referenced by gcp_conn_id.
"""
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None):
super(PubSubHook, self).__init__(gcp_conn_id, delegate_to=delegate_to)
def get_conn(self):
"""Returns a Pub/Sub service object.
:rtype: apiclient.discovery.Resource
"""
http_authorized = self._authorize()
return build('pubsub', 'v1', http=http_authorized)
def publish(self, project, topic, messages):
"""Publishes messages to a Pub/Sub topic.
:param project: the GCP project name or ID in which to publish
:type project: string
:param topic: the Pub/Sub topic to which to publish; do not
include the 'projects/{project}/topics/' prefix.
:type topic: string
:param messages: messages to publish; if the data field in a
message is set, it should already be base64 encoded.
:type messages: list of PubSub messages; see
http://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
"""
body = {'messages': messages}
full_topic = _format_topic(project, topic)
request = self.get_conn().projects().topics().publish(
topic=full_topic, body=body)
try:
request.execute()
except errors.HttpError as e:
raise Exception('Error publishing to topic %s' % full_topic, e)
def create_topic(self, project, topic, fail_if_exists=False):
"""Creates a Pub/Sub topic, if it does not already exist.
:param project: the GCP project name or ID in which to create
the topic
:type project: string
:param topic: the Pub/Sub topic name to create; do not
include the 'projects/{project}/topics/' prefix.
:type topic: string
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().create(
name=full_topic, body={}).execute()
except errors.HttpError as e:
# Status code 409 indicates that the topic already exists.
if str(e.resp['status']) == '409':
if fail_if_exists:
raise Exception(
'Error creating topic. Topic already exists: %s'
% full_topic)
else:
raise Exception('Error creating topic %s' % full_topic, e)
| 38.56383 | 79 | 0.642207 | 2,824 | 0.779034 | 0 | 0 | 0 | 0 | 0 | 0 | 2,131 | 0.587862 |
529df55b21a880c44659499ee8c65dadbb2b9f31 | 1,635 | py | Python | src/repositories/note.py | notalab/api | 7afc0f9896c0d4b5f81605f671f13a9168c78380 | [
"MIT"
] | null | null | null | src/repositories/note.py | notalab/api | 7afc0f9896c0d4b5f81605f671f13a9168c78380 | [
"MIT"
] | null | null | null | src/repositories/note.py | notalab/api | 7afc0f9896c0d4b5f81605f671f13a9168c78380 | [
"MIT"
] | null | null | null | """ Defines the Note repository """
import random
import string
import time
import bcrypt
from sqlalchemy.orm import load_only
from werkzeug.exceptions import Forbidden, UnprocessableEntity
from models import Note
class NoteRepository:
@staticmethod
def create(user, notebook_id, title, content):
""" Create a new note """
if len(title) > 32:
return UnprocessableEntity(description="NOTE_TITLE_MAX_LENGTH")
current_time = int(time.time())
note = Note(
notebook_id=notebook_id,
title=title,
content=content,
user_id=user.id,
created_at=current_time,
updated_at=current_time
)
note.save()
return note.transform()
@staticmethod
def update(user, id, title, content):
""" Update a notebook by ID """
if len(title) > 32:
return UnprocessableEntity(description="NOTE_TITLE_MAX_LENGTH")
current_time = int(time.time())
note = Note.query.filter_by(id=id, user_id=user.id).first()
if not note:
raise UnprocessableEntity(description="NOTE_NOT_FOUND")
note.title = title
note.content = content
note.updated_at = current_time
note.save()
return note.transform()
@staticmethod
def delete(user, id):
""" Delete a notebook by ID """
note = Note.query.filter_by(id=id, user_id=user.id).first()
if not note:
raise UnprocessableEntity(description="NOTE_NOT_FOUND")
note.delete()
return 200
| 24.044118 | 75 | 0.607339 | 1,415 | 0.865443 | 0 | 0 | 1,369 | 0.837309 | 0 | 0 | 200 | 0.122324 |
52a0ea8374d3628028b18a4cfa58c3fe60049d6c | 575 | py | Python | src/main/resources/pytz/zoneinfo/Africa/Ndjamena.py | TheEin/swagger-maven-plugin | cf93dce2d5c8d3534f4cf8c612b11e2d2313871b | [
"Apache-2.0"
] | 65 | 2015-11-14T13:46:01.000Z | 2021-08-14T05:54:04.000Z | lib/pytz/zoneinfo/Africa/Ndjamena.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | 13 | 2016-03-31T20:00:17.000Z | 2021-08-20T14:52:31.000Z | lib/pytz/zoneinfo/Africa/Ndjamena.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | 20 | 2015-03-18T08:41:37.000Z | 2020-12-18T02:58:30.000Z | '''tzinfo timezone information for Africa/Ndjamena.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Ndjamena(DstTzInfo):
'''Africa/Ndjamena timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Ndjamena'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1911,12,31,22,59,48),
d(1979,10,13,23,0,0),
d(1980,3,7,22,0,0),
]
_transition_info = [
i(3600,0,'LMT'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
]
Ndjamena = Ndjamena()
| 21.296296 | 78 | 0.676522 | 366 | 0.636522 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.288696 |
52a1c2d78cfb1a125c997f8dc53947a5b9217444 | 5,988 | py | Python | code_analyzer/core.py | draihal/code_analyzer | 05f56a3f44bbf9e1ccd9bd25b2fbafb631486ad7 | [
"MIT"
] | null | null | null | code_analyzer/core.py | draihal/code_analyzer | 05f56a3f44bbf9e1ccd9bd25b2fbafb631486ad7 | [
"MIT"
] | null | null | null | code_analyzer/core.py | draihal/code_analyzer | 05f56a3f44bbf9e1ccd9bd25b2fbafb631486ad7 | [
"MIT"
] | null | null | null | import logging
import os
import shutil
import tempfile
from git import Repo
from .ast_analysis import _get_all_names, _get_all_func_names, _generate_trees
from .ntlk_analysis import _get_verbs_from_function_name, _get_nouns_from_function_name
from .utils import _get_count_most_common, _get_converted_names, _convert_tpls_to_lst
logging.basicConfig(
filename='code_analyzer.log',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
level=logging.INFO)
class CodeAnalyzer:
"""Code analyzer main class."""
def __init__(
self,
path='C:\\',
lookup='verb',
projects=('', ),
top_size=10,
len_filenames=100,
github_path=None,
):
logging.info("Program started.")
self.path = path
self.github_path = github_path
self.lookup = lookup
self.projects = projects
self.top_size = top_size
self.len_filenames = len_filenames
self.words = []
def _get_filenames(self, path):
"""
Get filenames from path.
:param path: path
:return: list
"""
filenames = []
for dirname, dirs, files in os.walk(path, topdown=True):
for file in files:
if file.endswith('.py'):
filenames.append(os.path.join(dirname, file))
if len(filenames) == self.len_filenames:
break
logging.info(f"Path is: {path}.")
logging.info(f"Total {len(filenames)} files.")
return filenames
def _get_trees(self, path, with_filenames=False, with_file_content=False):
"""
Returns lists of ast objects.
:param path: path
:return: lists of ast objects
"""
filenames = self._get_filenames(path)
trees = (_generate_trees(filename, with_filenames,
with_file_content)[0]
for filename in filenames)
logging.info("Trees generated.")
return trees
def _get_top_verbs_in_path(self, path):
"""
Returns a list of tuples with words and his counts.
:param path: path
:return: list of tuples with words and his counts
"""
trees = self._get_trees(path)
fncs = _get_converted_names(trees, _get_all_func_names)
verbs = (_get_verbs_from_function_name(function_name)
for function_name in fncs)
converted_verbs = _convert_tpls_to_lst(verbs)
return converted_verbs
def _get_top_nouns_in_path(self, path):
"""
Returns a list of tuples with words and his counts.
:param path: path
:return: list of tuples with words and his counts
"""
trees = self._get_trees(path)
fncs = _get_converted_names(trees, _get_all_func_names)
nouns = (_get_nouns_from_function_name(function_name)
for function_name in fncs)
converted_nouns = _convert_tpls_to_lst(nouns)
return converted_nouns
def _get_all_words_in_path(self, path):
"""
Returns a list of tuples with words and his counts.
:param path: path
:return: list of tuples with words and his counts
"""
trees = self._get_trees(path)
function_names = _get_converted_names(trees, _get_all_names)
all_words_in_path = ((word for word in function_name.split('_')
if word) for function_name in function_names)
converted_all_words_in_path = _convert_tpls_to_lst(all_words_in_path)
return converted_all_words_in_path
def _get_top_functions_names_in_path(self, path):
"""
Returns a list of tuples with words and his counts.
:param path: path
:return: list of tuples with words and his counts
"""
trees = self._get_trees(path)
fncs = _get_converted_names(trees, _get_all_func_names)
return fncs
def _parse_lookup_args(self, path_):
"""
Parse arguments for lookup.
:param path_: path
:return: None
"""
# verb - show statistics of the most common words by verbs
# noun - show statistics on the most frequent words by nouns
# funcname - show statistics of the most common words function names
# localvarname - show statistics of the most common
# words names of local variables inside functions
lookups_functions = {
'verb': self._get_top_verbs_in_path,
'noun': self._get_top_nouns_in_path,
'funcname': self._get_top_functions_names_in_path,
'localvarname': self._get_all_words_in_path,
}
for project in self.projects:
path = os.path.join(path_, project)
function_for_lookup = lookups_functions.get(self.lookup)
self.words += function_for_lookup(path)
def parse(self):
"""
Returns a list of tuples with words and his counts.
:return: list of tuples with words and his counts
"""
if self.github_path:
tmpdir = tempfile.mkdtemp()
logging.info(f'Created temporary directory: {tmpdir}.')
Repo.clone_from(self.github_path, tmpdir)
self._parse_lookup_args(tmpdir)
top_words = _get_count_most_common(self.words, self.top_size)
try:
shutil.rmtree(tmpdir)
except PermissionError:
logging.info(
'Can\'t deleting temp directory. Access is denied.')
logging.info('Done!')
return [] if len(top_words) == 0 else top_words
else:
self._parse_lookup_args(self.path)
top_words = _get_count_most_common(self.words, self.top_size)
logging.info("Done!")
return [] if len(top_words) == 0 else top_words
| 36.512195 | 87 | 0.609552 | 5,485 | 0.915999 | 0 | 0 | 0 | 0 | 0 | 0 | 1,747 | 0.29175 |
52a1fe39882bbc1353ccc3425c9b5c1dfc01c4d7 | 2,482 | py | Python | sort_file.py | ask-santosh/Document-Matching | 2b5a1be3e8e460029121e43b16fc676ed3874094 | [
"Apache-2.0"
] | null | null | null | sort_file.py | ask-santosh/Document-Matching | 2b5a1be3e8e460029121e43b16fc676ed3874094 | [
"Apache-2.0"
] | null | null | null | sort_file.py | ask-santosh/Document-Matching | 2b5a1be3e8e460029121e43b16fc676ed3874094 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from fuzzywuzzy import process
import Levenshtein as lev
import numpy as np
import openpyxl
# -----------------------code for first csv file-------------------------------------------
buyer_df = pd.read_csv("./results/CD_21_05/final2.csv", usecols=['PRODUCTS','UNITS', 'BATCHES', 'EXPIRY'])
# aggregation_functions
return_list = []
for i, row in buyer_df.iterrows():
return_list.append(' - '.join((row['PRODUCTS'], row['UNITS'], row['BATCHES'], row['EXPIRY'])))
# ---------------------------code for second csv file -----------------------------------------
jk_df = pd.read_csv("./results/JK_21_05/final.csv", usecols=['PRODUCTS','UNITS', 'BATCHES', 'EXPIRY'])
# aggregation_functions
purchase_list = []
for i, row in jk_df.iterrows():
purchase_list.append(' - '.join((row['PRODUCTS'], row['UNITS'], row['BATCHES'], row['EXPIRY'])))
def max5_similarities(list_of_tup):
lst = len(list_of_tup)
for i in range(0, lst):
for j in range(0, lst - i - 1):
if list_of_tup[j][1] > list_of_tup[j + 1][1]:
list_of_tup[j], list_of_tup[j + 1] = list_of_tup[j + 1], list_of_tup[j]
# print(list_of_tup)
return list_of_tup[lst-5:][::-1]
rows_to_color = []
for i in range(len(return_list)):
ratios = [(x, round(lev.ratio(return_list[i], x), 3)) for x in purchase_list]
ratios = max5_similarities(ratios)
# print(return_list[i], '--', ratios)
if ratios[0][1] < 0.7:
rows_to_color.append(i)
def color_cells(x):
global rows_to_color
color = 'background-color: red'
df1 = pd.DataFrame('', index=x.index, columns=x.columns)
for i in rows_to_color:
df1.iloc[i, :] = 'background-color: red'
return df1
buyer_df.style.apply(color_cells, axis=None).to_excel('Final_Result.xlsx', engine='openpyxl', index=False)
# for pdt in max5_similarities(Ratios):
# if pdt[1]<0.7:
# c1 = 'background-color: red'
# c2 = ''
# # df1 = pd.DataFrame(c2, index=pdt.index, columns=pdt.columns)
# # print(df1)
# # df1.loc[pdt, :] = c1
# (df1.style.apply(pdt, axis=None).to_excel('styled.xlsx', engine='openpyxl', index=False))
# # if max5_similarities(Ratios) < 0.7:
# # print(max5_similarities(Ratios))
#
# # Ratios = sorted(Ratios, key=lambda x: x[1], reverse=True)
# # print(prod, "--->", max5_similarities(Ratios))
#
# # a = np.array(Ratios)
#
# # print(a[0:5, a])
| 35.457143 | 106 | 0.589444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,206 | 0.485898 |
52a1feba8407dbc2ea31f32b3fc768de16e1bd5c | 2,719 | py | Python | utils.py | mitchelljy/battleships_ai | 079555c487ab120d31aff97247e612cdebf1b275 | [
"MIT"
] | 8 | 2020-01-23T14:39:56.000Z | 2021-12-11T05:06:09.000Z | utils.py | mitchelljy/battleships_ai | 079555c487ab120d31aff97247e612cdebf1b275 | [
"MIT"
] | null | null | null | utils.py | mitchelljy/battleships_ai | 079555c487ab120d31aff97247e612cdebf1b275 | [
"MIT"
] | 5 | 2020-05-16T23:28:31.000Z | 2022-02-05T10:00:37.000Z | import numpy as np
# Place a random ship on the given board of the given length, making sure it does not intersect
# with anything in no_intersect
def place_random_ship(board, length, no_intersect):
placed = False
while not placed:
vertical = bool(np.random.randint(0, 2)) # Gives us a random boolean
# We only need 3 random numbers, since either x or y will be the same between start and end points of
# the ship. To determine distance we ensure that the coordinate that differs, has a difference of ship_size
o1 = o2 = 0
while abs(o1 - o2) != length - 1:
s, o1, o2 = np.random.randint(0, board.size), np.random.randint(0, board.size), np.random.randint(0,
board.size)
if vertical:
y1 = y2 = s
x1 = o1
x2 = o2
else:
x1 = x2 = s
y1 = o1
y2 = o2
try:
ship_crds = place_ship(board, x1, y1, x2, y2, no_intersect)
board.ships.append(ship_crds)
placed = True
except AssertionError:
continue
def place_ship(board, x1, y1, x2, y2, no_intersect):
# Make sure the ship will be a horizontal or vertical line
assert x1 == x2 or y1 == y2, "Coordinates must be inline"
# Make sure the ship is not of size one
assert not (x1 == x2 and y1 == y2), "Cannot be one point"
# Make sure all coords are on the board
for cd in [x1, x2, y1, y2]:
assert 0 <= cd < board.size, f"{cd} is not on board"
# Must be a better way to do all of this. But essentially the next 3 blocks inefficiently make it so that
# regardless of the order of coordinates, the system will work, and ensure that the ship is placed.
smallest_x, greatest_x = sorted([x1, x2])[0], sorted([x1, x2])[1]
smallest_y, greatest_y = sorted([y1, y2])[0], sorted([y1, y2])[1]
# Make sure the ship does not intersect another existing ship
for x in range(smallest_x, greatest_x + 1):
for y in range(smallest_y, greatest_y + 1):
assert board.get_board()[x, y] not in no_intersect, "Invalid intersection"
# Place the ship on the board
coords = []
for x in range(smallest_x, greatest_x + 1):
for y in range(smallest_y, greatest_y + 1):
board.get_board()[x, y] = board.inv_square_states['ship']
coords.append((x, y))
return coords
# Convert a letter and a number into x and y coords
def letter_to_coords(letter, number):
letter_coord = ord(letter) - 65
number_coord = int(number) - 1
return letter_coord, number_coord
| 39.405797 | 119 | 0.603163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 942 | 0.346451 |
52a274d162d9e0ff675d928c3cfccd89b9f1212e | 8,449 | py | Python | assemblyline_ui/security/ldap_auth.py | CybercentreCanada/assemblyline-ui | 4b44b26852ef587d1c627fa63c778a135209e25e | [
"MIT"
] | 11 | 2020-06-29T14:31:38.000Z | 2022-01-14T17:15:06.000Z | assemblyline_ui/security/ldap_auth.py | CybercentreCanada/assemblyline-ui | 4b44b26852ef587d1c627fa63c778a135209e25e | [
"MIT"
] | 20 | 2020-06-22T12:35:30.000Z | 2022-03-10T12:41:46.000Z | assemblyline_ui/security/ldap_auth.py | CybercentreCanada/assemblyline-ui | 4b44b26852ef587d1c627fa63c778a135209e25e | [
"MIT"
] | 13 | 2020-08-15T16:10:58.000Z | 2022-01-14T17:15:09.000Z | import base64
import hashlib
import ldap
import logging
import time
from assemblyline.common.str_utils import safe_str
from assemblyline_ui.config import config, CLASSIFICATION
from assemblyline_ui.helper.user import get_dynamic_classification
from assemblyline_ui.http_exceptions import AuthenticationException
log = logging.getLogger('assemblyline.ldap_authenticator')
#####################################################
# Functions
#####################################################
class BasicLDAPWrapper(object):
CACHE_SEC_LEN = 300
def __init__(self, ldap_config):
"""
:param ldap_config: dict containing configuration params for LDAP
"""
self.ldap_uri = ldap_config.uri
self.base = ldap_config.base
self.uid_lookup = f"{ldap_config.uid_field}=%s"
self.group_lookup = ldap_config.group_lookup_query
self.bind_user = ldap_config.bind_user
self.bind_pass = ldap_config.bind_pass
self.admin_dn = ldap_config.admin_dn
self.sm_dn = ldap_config.signature_manager_dn
self.si_dn = ldap_config.signature_importer_dn
self.classification_mappings = ldap_config.classification_mappings
self.cache = {}
self.get_obj_cache = {}
def create_connection(self):
if "ldaps://" in self.ldap_uri:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
ldap_server = ldap.initialize(self.ldap_uri)
ldap_server.protocol_version = ldap.VERSION3
ldap_server.set_option(ldap.OPT_REFERRALS, 0)
if self.bind_user and self.bind_pass:
ldap_server.simple_bind_s(self.bind_user, self.bind_pass)
return ldap_server
def get_group_list(self, dn, ldap_server=None):
group_list = [x[0] for x in self.get_object(self.group_lookup % dn, ldap_server)["ldap"]]
group_list.append(dn)
return group_list
def get_user_types(self, group_dn_list):
user_type = ['user']
if self.admin_dn in group_dn_list:
user_type.append('admin')
if self.sm_dn in group_dn_list:
user_type.append('signature_manager')
if self.si_dn in group_dn_list:
user_type.append('signature_importer')
return user_type
def get_user_classification(self, group_dn_list):
"""
Extend the users classification information with the configured group information
NB: This is not fully implemented at this point
:param group_dn_list: list of DNs the user is member of
:return:
"""
ret = CLASSIFICATION.UNRESTRICTED
for group_dn in group_dn_list:
if group_dn in self.classification_mappings:
ret = CLASSIFICATION.build_user_classification(ret, self.classification_mappings[group_dn])
return ret
def get_object(self, ldap_object, ldap_server=None):
cur_time = int(time.time())
cache_entry = self.get_obj_cache.get(ldap_object, None)
if cache_entry and cache_entry['expiry'] > cur_time:
# load obj from cache
return {"error": None, "ldap": cache_entry['details'], "cached": True}
if not ldap_server:
try:
ldap_server = self.create_connection()
except Exception as le:
return {"error": "Error connecting to ldap server. Reason: %s" % (repr(le)),
"ldap": None, "cached": False}
try:
res = ldap_server.search_s(self.base, ldap.SCOPE_SUBTREE, ldap_object)
# Save cache get_obj
self.get_obj_cache[ldap_object] = {"expiry": cur_time + self.CACHE_SEC_LEN, "details": res}
return {"error": None, "ldap": res, "cached": False}
except ldap.UNWILLING_TO_PERFORM:
return {"error": "ldap server is unwilling to perform the operation.", "ldap": None, "cached": False}
except ldap.LDAPError as le:
return {"error": "An error occurred while talking to the ldap server: %s" % repr(le), "ldap": None,
"cached": False}
# noinspection PyBroadException
def login(self, user, password):
cur_time = int(time.time())
password_digest = hashlib.md5(password.encode('utf-8')).hexdigest()
cache_entry = self.cache.get(user, None)
if cache_entry:
if cache_entry['expiry'] > cur_time and cache_entry['password'] == password_digest:
cache_entry["cached"] = True
return cache_entry
try:
ldap_server = self.create_connection()
ldap_ret = self.get_details_from_uid(user, ldap_server=ldap_server)
if ldap_ret and len(ldap_ret) == 2:
dn, details = ldap_ret
group_list = self.get_group_list(dn, ldap_server=ldap_server)
ldap_server.simple_bind_s(dn, password)
cache_entry = {"password": password_digest, "expiry": cur_time + self.CACHE_SEC_LEN,
"connection": ldap_server, "details": details, "cached": False,
"classification": self.get_user_classification(group_list),
"type": self.get_user_types(group_list), 'dn': dn}
self.cache[user] = cache_entry
return cache_entry
except Exception as e:
# raise AuthenticationException('Unable to login to ldap server. [%s]' % str(e))
log.exception('Unable to login to ldap server. [%s]' % str(e))
return None
# noinspection PyBroadException
def get_details_from_uid(self, uid, ldap_server=None):
res = self.get_object(self.uid_lookup % uid, ldap_server)
if res['error']:
log.error(res['error'])
return None
try:
return res['ldap'][0]
except Exception:
return None
def get_attribute(ldap_login_info, key, safe=True):
details = ldap_login_info.get('details')
if details:
value = details.get(key, [])
if len(value) >= 1:
if safe:
return safe_str(value[0])
else:
return value[0]
return None
def validate_ldapuser(username, password, storage):
if config.auth.ldap.enabled and username and password:
ldap_obj = BasicLDAPWrapper(config.auth.ldap)
ldap_info = ldap_obj.login(username, password)
if ldap_info:
cur_user = storage.user.get(username, as_obj=False) or {}
# Make sure the user exists in AL and is in sync
if (not cur_user and config.auth.ldap.auto_create) or (cur_user and config.auth.ldap.auto_sync):
u_classification = ldap_info['classification']
# Normalize email address
email = get_attribute(ldap_info, config.auth.ldap.email_field)
if email is not None:
email = email.lower()
u_classification = get_dynamic_classification(u_classification, email)
# Generate user data from ldap
data = dict(
classification=u_classification,
uname=username,
name=get_attribute(ldap_info, config.auth.ldap.name_field) or username,
email=email,
password="__NO_PASSWORD__",
type=ldap_info['type'],
dn=ldap_info['dn']
)
# Save the user avatar avatar from ldap
img_data = get_attribute(ldap_info, config.auth.ldap.image_field, safe=False)
if img_data:
b64_img = base64.b64encode(img_data).decode()
avatar = f'data:image/{config.auth.ldap.image_format};base64,{b64_img}'
storage.user_avatar.save(username, avatar)
# Save the updated user
cur_user.update(data)
storage.user.save(username, cur_user)
if cur_user:
return username, ["R", "W", "E"]
else:
raise AuthenticationException("User auto-creation is disabled")
elif config.auth.internal.enabled:
# Fallback to internal auth
pass
else:
raise AuthenticationException("Wrong username or password")
return None, None
| 39.297674 | 113 | 0.605871 | 5,445 | 0.644455 | 0 | 0 | 0 | 0 | 0 | 0 | 1,595 | 0.18878 |
52a6263a62818e4be2b94134abee8658545974c5 | 537 | py | Python | tangos/scripts/writer.py | TobiBu/tangos | decab8c892c5937fd68474a375089abef198dba2 | [
"BSD-3-Clause"
] | 15 | 2017-12-04T18:05:32.000Z | 2021-12-20T22:11:20.000Z | tangos/scripts/writer.py | TobiBu/tangos | decab8c892c5937fd68474a375089abef198dba2 | [
"BSD-3-Clause"
] | 99 | 2017-11-09T16:47:20.000Z | 2022-03-07T10:15:12.000Z | tangos/scripts/writer.py | TobiBu/tangos | decab8c892c5937fd68474a375089abef198dba2 | [
"BSD-3-Clause"
] | 14 | 2017-11-06T18:46:17.000Z | 2021-12-13T10:49:53.000Z | #!/usr/bin/env python2.7
from __future__ import absolute_import
import sys
def run_dbwriter(argv):
from tangos import parallel_tasks, core
from tangos.tools.property_writer import PropertyWriter
writer = PropertyWriter()
writer.parse_command_line(argv)
parallel_tasks.launch(writer.run_calculation_loop, 2, [])
def main():
print("""
The 'tangos_writer' command line is deprecated in favour of 'tangos write'.
'tangos_writer' may be removed in future versions.
""")
run_dbwriter(sys.argv[1:])
| 25.571429 | 79 | 0.729981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.316574 |
52a71c2541d6c70311412a6faad0de1c41c7af1d | 3,107 | py | Python | flask_app/models/pet.py | MapleLeo/PawFosterFamily | 0f0c14f12858a27ded4ec2565e8a82290518b798 | [
"MIT"
] | null | null | null | flask_app/models/pet.py | MapleLeo/PawFosterFamily | 0f0c14f12858a27ded4ec2565e8a82290518b798 | [
"MIT"
] | null | null | null | flask_app/models/pet.py | MapleLeo/PawFosterFamily | 0f0c14f12858a27ded4ec2565e8a82290518b798 | [
"MIT"
] | null | null | null | from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
from base64 import b64encode
class Pet:
db = 'pawfosterfamily'
def __init__(self,data):
self.id = data['id']
self.img = data['img']
self.name = data['name']
self.age = data['age']
self.foster_time_needed = data['foster_time_needed']
self.foster_grade = data['foster_grade']
self.description = data['description']
self.shelter_id = data['shelter_id']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
@classmethod
def save(cls,data):
query = 'INSERT INTO pets (img, name, age, foster_time_needed, foster_grade, description, shelter_id) VALUES (%(img)s, %(name)s, %(age)s, %(foster_time_needed)s, %(foster_grade)s, %(description)s, %(shelter_id)s);'
# query = 'INSERT INTO pets (img, name, age, foster_time_needed, foster_grade, description, shelter_id) VALUES (%(img)s, %(name)s, %(age)s, %(foster_time_needed)s, %(foster_grade)s, %(description)s, %(shelter_id)s);'
return connectToMySQL(cls.db).query_db(query, data)
@classmethod
def get_by_shelter(cls, data):
query = 'SELECT * FROM pets WHERE shelter_id = %(shelter_id)s;'
results = connectToMySQL(cls.db).query_db(query, data)
all_pets = []
for row in results:
all_pets.append(cls(row))
return all_pets
@classmethod
def get_one(cls,data):
query = 'SELECT * FROM pets WHERE id = %(id)s;'
results = connectToMySQL(cls.db).query_db(query,data)
return cls(results[0])
@classmethod
def get_all_available(cls):
query = 'SELECT * FROM pets WHERE (SELECT count(*) from applications where applications.pet_id = pets.id AND applications.status = "APPROVED") = 0;'
results = connectToMySQL(cls.db).query_db(query)
all_pets = []
for row in results:
all_pets.append(cls(row))
return all_pets
@classmethod
def destroy(cls,data):
query = 'DELETE FROM pets WHERE id = %(id)s;'
return connectToMySQL(cls.db).query_db(query,data)
@classmethod
def pets_for_user(cls,data):
query = 'SELECT * FROM pets WHERE user_id = %(id)s;'
return connectToMySQL(cls.db).query_db(query,data)
@classmethod
def get_by_foster(cls, foster_id):
query = 'SELECT * FROM pets JOIN applications on applications.pet_id = pets.id where applications.status = "APPROVED" and applications.foster_id = %(foster_id)s;'
results = connectToMySQL(cls.db).query_db(query, { 'foster_id': foster_id })
all_pets = []
for row in results:
all_pets.append(cls(row))
return all_pets
@staticmethod
def validate_pet(pet):
is_valid = True
if len(pet['name']) == 0:
is_valid = False
flash("Name is required","pet")
if len(pet['foster_time_needed']) < 0:
is_valid = False
flash("Foster time needed is required","pet")
return is_valid | 39.833333 | 224 | 0.633087 | 2,992 | 0.962987 | 0 | 0 | 2,462 | 0.792404 | 0 | 0 | 1,108 | 0.356614 |
52a7bb8061d2a7831f792c5ce19bca7393244db9 | 11,046 | py | Python | google/ads/google_ads/v6/proto/resources/shared_set_pb2.py | jphanwebstaurant/google-ads-python | 600812b2afcc4d57f00b47dfe436620ce50bfe9b | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v6/proto/resources/shared_set_pb2.py | jphanwebstaurant/google-ads-python | 600812b2afcc4d57f00b47dfe436620ce50bfe9b | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v6/proto/resources/shared_set_pb2.py | jphanwebstaurant/google-ads-python | 600812b2afcc4d57f00b47dfe436620ce50bfe9b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v6/proto/resources/shared_set.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.enums import shared_set_status_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_enums_dot_shared__set__status__pb2
from google.ads.google_ads.v6.proto.enums import shared_set_type_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_enums_dot_shared__set__type__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v6/proto/resources/shared_set.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\016SharedSetProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n8google/ads/googleads_v6/proto/resources/shared_set.proto\x12!google.ads.googleads.v6.resources\x1a;google/ads/googleads_v6/proto/enums/shared_set_status.proto\x1a\x39google/ads/googleads_v6/proto/enums/shared_set_type.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xf8\x03\n\tSharedSet\x12\x41\n\rresource_name\x18\x01 \x01(\tB*\xe0\x41\x05\xfa\x41$\n\"googleads.googleapis.com/SharedSet\x12\x14\n\x02id\x18\x08 \x01(\x03\x42\x03\xe0\x41\x03H\x00\x88\x01\x01\x12Q\n\x04type\x18\x03 \x01(\x0e\x32>.google.ads.googleads.v6.enums.SharedSetTypeEnum.SharedSetTypeB\x03\xe0\x41\x05\x12\x11\n\x04name\x18\t \x01(\tH\x01\x88\x01\x01\x12W\n\x06status\x18\x05 \x01(\x0e\x32\x42.google.ads.googleads.v6.enums.SharedSetStatusEnum.SharedSetStatusB\x03\xe0\x41\x03\x12\x1e\n\x0cmember_count\x18\n \x01(\x03\x42\x03\xe0\x41\x03H\x02\x88\x01\x01\x12!\n\x0freference_count\x18\x0b \x01(\x03\x42\x03\xe0\x41\x03H\x03\x88\x01\x01:[\xea\x41X\n\"googleads.googleapis.com/SharedSet\x12\x32\x63ustomers/{customer_id}/sharedSets/{shared_set_id}B\x05\n\x03_idB\x07\n\x05_nameB\x0f\n\r_member_countB\x12\n\x10_reference_countB\xfb\x01\n%com.google.ads.googleads.v6.resourcesB\x0eSharedSetProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads__v6_dot_proto_dot_enums_dot_shared__set__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v6_dot_proto_dot_enums_dot_shared__set__type__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_SHAREDSET = _descriptor.Descriptor(
name='SharedSet',
full_name='google.ads.googleads.v6.resources.SharedSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.SharedSet.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A$\n\"googleads.googleapis.com/SharedSet', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id', full_name='google.ads.googleads.v6.resources.SharedSet.id', index=1,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='google.ads.googleads.v6.resources.SharedSet.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='google.ads.googleads.v6.resources.SharedSet.name', index=3,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='google.ads.googleads.v6.resources.SharedSet.status', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='member_count', full_name='google.ads.googleads.v6.resources.SharedSet.member_count', index=5,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reference_count', full_name='google.ads.googleads.v6.resources.SharedSet.reference_count', index=6,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352AX\n\"googleads.googleapis.com/SharedSet\0222customers/{customer_id}/sharedSets/{shared_set_id}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_id', full_name='google.ads.googleads.v6.resources.SharedSet._id',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_name', full_name='google.ads.googleads.v6.resources.SharedSet._name',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_member_count', full_name='google.ads.googleads.v6.resources.SharedSet._member_count',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_reference_count', full_name='google.ads.googleads.v6.resources.SharedSet._reference_count',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=306,
serialized_end=810,
)
_SHAREDSET.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads__v6_dot_proto_dot_enums_dot_shared__set__type__pb2._SHAREDSETTYPEENUM_SHAREDSETTYPE
_SHAREDSET.fields_by_name['status'].enum_type = google_dot_ads_dot_googleads__v6_dot_proto_dot_enums_dot_shared__set__status__pb2._SHAREDSETSTATUSENUM_SHAREDSETSTATUS
_SHAREDSET.oneofs_by_name['_id'].fields.append(
_SHAREDSET.fields_by_name['id'])
_SHAREDSET.fields_by_name['id'].containing_oneof = _SHAREDSET.oneofs_by_name['_id']
_SHAREDSET.oneofs_by_name['_name'].fields.append(
_SHAREDSET.fields_by_name['name'])
_SHAREDSET.fields_by_name['name'].containing_oneof = _SHAREDSET.oneofs_by_name['_name']
_SHAREDSET.oneofs_by_name['_member_count'].fields.append(
_SHAREDSET.fields_by_name['member_count'])
_SHAREDSET.fields_by_name['member_count'].containing_oneof = _SHAREDSET.oneofs_by_name['_member_count']
_SHAREDSET.oneofs_by_name['_reference_count'].fields.append(
_SHAREDSET.fields_by_name['reference_count'])
_SHAREDSET.fields_by_name['reference_count'].containing_oneof = _SHAREDSET.oneofs_by_name['_reference_count']
DESCRIPTOR.message_types_by_name['SharedSet'] = _SHAREDSET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SharedSet = _reflection.GeneratedProtocolMessageType('SharedSet', (_message.Message,), {
'DESCRIPTOR' : _SHAREDSET,
'__module__' : 'google.ads.googleads_v6.proto.resources.shared_set_pb2'
,
'__doc__': """SharedSets are used for sharing criterion exclusions across multiple
campaigns.
Attributes:
resource_name:
Immutable. The resource name of the shared set. Shared set
resource names have the form:
``customers/{customer_id}/sharedSets/{shared_set_id}``
id:
Output only. The ID of this shared set. Read only.
type:
Immutable. The type of this shared set: each shared set holds
only a single kind of resource. Required. Immutable.
name:
The name of this shared set. Required. Shared Sets must have
names that are unique among active shared sets of the same
type. The length of this string should be between 1 and 255
UTF-8 bytes, inclusive.
status:
Output only. The status of this shared set. Read only.
member_count:
Output only. The number of shared criteria within this shared
set. Read only.
reference_count:
Output only. The number of campaigns associated with this
shared set. Read only.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.SharedSet)
})
_sym_db.RegisterMessage(SharedSet)
DESCRIPTOR._options = None
_SHAREDSET.fields_by_name['resource_name']._options = None
_SHAREDSET.fields_by_name['id']._options = None
_SHAREDSET.fields_by_name['type']._options = None
_SHAREDSET.fields_by_name['status']._options = None
_SHAREDSET.fields_by_name['member_count']._options = None
_SHAREDSET.fields_by_name['reference_count']._options = None
_SHAREDSET._options = None
# @@protoc_insertion_point(module_scope)
| 58.444444 | 1,499 | 0.779558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,655 | 0.42142 |
52aac1e125cfaa8a910431ade1ba06863dbcee6c | 2,722 | py | Python | garecovery/mnemonic.py | LeoComandini/garecovery | 66f7fe4c15b3866e751162fd990bd5bc8c58ec7a | [
"MIT"
] | 61 | 2017-08-30T13:16:42.000Z | 2022-03-24T16:28:18.000Z | garecovery/mnemonic.py | baby636/garecovery | 9ced74920b804adc3a7be443f09a950a884c8c0c | [
"MIT"
] | 34 | 2017-08-11T16:58:16.000Z | 2022-02-18T09:00:23.000Z | garecovery/mnemonic.py | baby636/garecovery | 9ced74920b804adc3a7be443f09a950a884c8c0c | [
"MIT"
] | 46 | 2017-08-09T18:11:55.000Z | 2022-03-04T05:30:54.000Z | import wallycore as wally
from . import exceptions
from gaservices.utils import h2b
wordlist_ = wally.bip39_get_wordlist('en')
wordlist = [wally.bip39_get_word(wordlist_, i) for i in range(2048)]
def seed_from_mnemonic(mnemonic_or_hex_seed):
"""Return seed, mnemonic given an input string
mnemonic_or_hex_seed can either be:
- A mnemonic
- A hex seed, with an 'X' at the end, which needs to be stripped
seed will always be returned, mnemonic may be None if a seed was passed
"""
if mnemonic_or_hex_seed.endswith('X'):
mnemonic = None
seed = h2b(mnemonic_or_hex_seed[:-1])
else:
mnemonic = mnemonic_or_hex_seed
written, seed = wally.bip39_mnemonic_to_seed512(mnemonic_or_hex_seed, None)
assert written == wally.BIP39_SEED_LEN_512
assert len(seed) == wally.BIP39_SEED_LEN_512
return seed, mnemonic
def wallet_from_mnemonic(mnemonic_or_hex_seed, ver=wally.BIP32_VER_MAIN_PRIVATE):
"""Generate a BIP32 HD Master Key (wallet) from a mnemonic phrase or a hex seed"""
seed, mnemonic = seed_from_mnemonic(mnemonic_or_hex_seed)
return wally.bip32_key_from_seed(seed, ver, wally.BIP32_FLAG_SKIP_HASH)
def _decrypt_mnemonic(mnemonic, password):
"""Decrypt a 27 word encrypted mnemonic to a 24 word mnemonic"""
mnemonic = ' '.join(mnemonic.split())
entropy = bytearray(wally.BIP39_ENTROPY_LEN_288)
assert wally.bip39_mnemonic_to_bytes(None, mnemonic, entropy) == len(entropy)
salt, encrypted = entropy[32:], entropy[:32]
derived = bytearray(64)
wally.scrypt(password.encode('utf-8'), salt, 16384, 8, 8, derived)
key, decrypted = derived[32:], bytearray(32)
wally.aes(key, encrypted, wally.AES_FLAG_DECRYPT, decrypted)
for i in range(len(decrypted)):
decrypted[i] ^= derived[i]
if wally.sha256d(decrypted)[:4] != salt:
raise exceptions.InvalidMnemonicOrPasswordError('Incorrect password')
return wally.bip39_mnemonic_from_bytes(None, decrypted)
def check_mnemonic_or_hex_seed(mnemonic):
"""Raise an error if mnemonic/hex seed is invalid"""
if ' ' not in mnemonic:
if mnemonic.endswith('X'):
# mnemonic is the hex seed
return
msg = 'Mnemonic words must be separated by spaces, hex seed must end with X'
raise exceptions.InvalidMnemonicOrPasswordError(msg)
for word in mnemonic.split():
if word not in wordlist:
msg = 'Invalid word: {}'.format(word)
raise exceptions.InvalidMnemonicOrPasswordError(msg)
try:
wally.bip39_mnemonic_validate(None, mnemonic)
except ValueError:
raise exceptions.InvalidMnemonicOrPasswordError('Invalid mnemonic checksum')
| 37.805556 | 86 | 0.709405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 640 | 0.235121 |
52aafe97f5c27fcaf130ddb706c81f803caa5389 | 4,838 | py | Python | notebooks/Computational Seismology/Summation-by-Parts/1d/rate.py | krischer/seismo_live_build | e4e8e59d9bf1b020e13ac91c0707eb907b05b34f | [
"CC-BY-3.0"
] | 3 | 2020-07-11T10:01:39.000Z | 2020-12-16T14:26:03.000Z | notebooks/Computational Seismology/Summation-by-Parts/1d/rate.py | krischer/seismo_live_build | e4e8e59d9bf1b020e13ac91c0707eb907b05b34f | [
"CC-BY-3.0"
] | null | null | null | notebooks/Computational Seismology/Summation-by-Parts/1d/rate.py | krischer/seismo_live_build | e4e8e59d9bf1b020e13ac91c0707eb907b05b34f | [
"CC-BY-3.0"
] | 3 | 2020-11-11T05:05:41.000Z | 2022-03-12T09:36:24.000Z | def elastic_rate(
hv,
hs,
v,
s,
rho,
mu,
nx,
dx,
order,
t,
y,
r0,
r1,
tau0_1,
tau0_2,
tauN_1,
tauN_2,
type_0,
forcing,
):
# we compute rates that will be used for Runge-Kutta time-stepping
#
import first_derivative_sbp_operators
import numpy as np
import boundarycondition
V = np.zeros((nx, 1))
S = np.zeros((nx, 1))
Vt = np.zeros((nx, 1))
St = np.zeros((nx, 1))
Vx = np.zeros((nx, 1))
Sx = np.zeros((nx, 1))
mms(V, S, Vt, St, Vx, Sx, y, t, type_0)
# initialize arrays for computing derivatives
vx = np.zeros((nx, 1))
sx = np.zeros((nx, 1))
# compute first derivatives for velocity and stress fields
first_derivative_sbp_operators.dx(vx, v, nx, dx, order)
first_derivative_sbp_operators.dx(sx, s, nx, dx, order)
# compute the elastic rates
hv[:, :] = (1.0 / rho) * sx + forcing * (Vt - (1.0 / rho) * Sx)
hs[:, :] = mu * vx + forcing * (St - mu * Vx)
# impose boundary conditions using penalty: SAT
impose_bc(
hv,
hs,
v,
s,
rho,
mu,
nx,
dx,
order,
forcing * V,
forcing * S,
r0,
r1,
tau0_1,
tau0_2,
tauN_1,
tauN_2,
)
def advection_rate(hv, v, nx, dx, order, t, y, tau):
# we compute rates that will be used for Runge-Kutta time-stepping
#
import first_derivative_sbp_operators
import numpy as np
# initialize arrays for computing derivatives
vx = np.zeros((nx, 1))
# compute first derivatives of the advected field v
first_derivative_sbp_operators.dx(vx, v, nx, dx, order)
# compute rates
hv[:, :] = -vx
# impose boundary conditions using penalty: SAT
# penalty weights
h11 = np.zeros((1, 1))
penaltyweight(h11, dx, order)
V0 = np.zeros((1, 1))
# boundary forcing
g(V0, t)
# print(Vn)
# penalize boundaries with the SAT terms
hv[0, :] = hv[0, :] - tau / h11 * (v[0, :] - V0)
def impose_bc(
hv,
hs,
v,
s,
rho,
mu,
nx,
dx,
order,
V,
S,
r0,
r1,
tau0_1,
tau0_2,
tauN_1,
tauN_2,
):
# impose boundary conditions
import numpy as np
import boundarycondition
# penalty weights
h11 = np.zeros((1, 1))
penaltyweight(h11, dx, order)
mv = np.zeros((1, 1))
ms = np.zeros((1, 1))
pv = np.zeros((1, 1))
ps = np.zeros((1, 1))
v0 = v[0, :]
s0 = s[0, :]
vn = v[nx - 1, :]
sn = s[nx - 1, :]
# boundary forcing
V0 = V[0, :]
S0 = S[0, :]
Vn = V[nx - 1, :]
Sn = S[nx - 1, :]
# compute SAT terms
boundarycondition.bcm(mv, ms, v0, s0, V0, S0, rho, mu, r0)
boundarycondition.bcp(pv, ps, vn, sn, Vn, Sn, rho, mu, r1)
# penalize boundaries with the SAT terms
hv[0, :] = hv[0, :] - tau0_1 / h11 * mv
hs[0, :] = hs[0, :] - tau0_2 / h11 * ms
hv[nx - 1, :] = hv[nx - 1, :] - tauN_1 / h11 * pv
def mms(V, S, V_t, S_t, V_x, S_x, y, t, type_0):
import numpy as np
if type_0 in ("Gaussian"):
delta = 0.015 * (y[-1, 0] - y[0, 0])
cs = 3.464
rho = 2.6702
Zs = rho * cs
x0 = 0.5 * (y[-1, 0] - y[0, 0])
V[:, :] = (
1
/ np.sqrt(2.0 * np.pi * delta ** 2)
* 0.5
* (
np.exp(-(y + cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
+ np.exp(-(y - cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
)
)
S[:, :] = (
1
/ np.sqrt(2.0 * np.pi * delta ** 2)
* 0.5
* Zs
* (
np.exp(-(y + cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
- np.exp(-(y - cs * (t) - x0) ** 2 / (2.0 * delta ** 2))
)
)
V_t[:, :] = 0
S_t[:, :] = 0
V_x[:, :] = 0
S_x[:, :] = 0
if type_0 in ("Sinusoidal"):
delta = y[-1, 0] - y[0, 0]
ny = 20.5 / delta * np.pi
nt = 2.5 * np.pi
fs = 9.33
V[:, :] = np.cos(nt * t) * np.sin(ny * y + fs)
S[:, :] = ny * np.sin(nt * t) * np.cos(ny * y - fs)
V_t[:, :] = -nt * np.sin(nt * t) * np.sin(ny * y + fs)
S_t[:, :] = nt * ny * np.cos(nt * t) * np.cos(ny * y - fs)
V_x[:, :] = ny * np.cos(nt * t) * np.cos(ny * y + fs)
S_x[:, :] = -ny * ny * np.sin(nt * t) * np.sin(ny * y - fs)
def g(V, t):
import numpy as np
V[:, :] = 0.0
if t <= 1.0 and t >= 0.0:
V[:, :] = (np.sin(np.pi * t)) ** 4
def penaltyweight(h11, dx, order):
if order == 2:
h11[:] = 0.5 * dx
if order == 4:
h11[:] = (17.0 / 48.0) * dx
if order == 6:
h11[:] = 13649.0 / 43200.0 * dx
| 20.327731 | 72 | 0.447912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 699 | 0.144481 |
52ab08bb235449b1d6733e6d61123545bf0f1f33 | 16,752 | py | Python | probability_kernels.py | jessebmurray/polygenic | 19c1ca5d7cf5e1b22f4301f7864a7b12ea46ac84 | [
"MIT"
] | 2 | 2020-03-12T16:45:24.000Z | 2020-03-12T23:09:05.000Z | probability_kernels.py | jessebmurray/polygenic | 19c1ca5d7cf5e1b22f4301f7864a7b12ea46ac84 | [
"MIT"
] | null | null | null | probability_kernels.py | jessebmurray/polygenic | 19c1ca5d7cf5e1b22f4301f7864a7b12ea46ac84 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
# Basic model functions
def pdf(x_i, sigma_i=1, mu=0):
"""Returns the marginal (population) pdf of X_i ~ Normal(mu, sigma_i^2)."""
return st.norm.pdf(x_i, scale=sigma_i, loc=mu)
def stable_rs(r):
"""Calculates r_s from r under stable population variance, where
r^2 + r_s^2 = 1"""
return np.sqrt(1 - np.square(r))
# Conditional descendant distribution parameters
def get_mu_tilda(x_i, r, n):
"""Calculates the conditional descendant normal distribution *expectation*
for generation-gap n.
Latex equation:
tilde{\mu}_{i+n} = r^n X_i
(See the paper for the derivation.)"""
return (r**n) * x_i
def get_sigma_tilda(sigma_i, r, rs, n):
"""Calculates the conditional descendant normal distribution *standard deviation*
(SD) for generation-gap n.
Latex equation for the variance (square of the SD):
tilde{\sigma}_{i+n}^2 = [(r^2+r_s^2)^n - r^{2n}] \sigma_i^2
(See the paper for the derivation.)"""
# Get the added part of the square root
add_part = (np.square(r) + np.square(rs)) ** n
# Get the subtracted part of the square root
subtract_part = r ** (2*n)
return sigma_i * np.sqrt(add_part - subtract_part)
# Ancestor and descendant bounds
def get_percentile_bounds(m):
"""Returns an m x 2 percentile-bounds matrix of the lower and upper bounds
of the percentiles.
For example: (0, 0.2),.., (0.8, 1). Where there are m equal-size continuous
percentile sets.
See the "Percentile transition matrices" section of the paper, where the percentile
sets are denoted Q_1, Q_2,..., Q_m. Here, row j corresponds to Q_j.
See the `test_percentile_bounds()` function for an example output."""
lower_percentiles = np.linspace(start=0, stop=1, num=m, endpoint=False)
upper_percentiles = lower_percentiles + (1 / m)
return np.column_stack((lower_percentiles, upper_percentiles))
def get_real_bounds(m, sigma):
"""Returns an m x 2 real-bounds matrix of the lower and upper real-number values.
Obtains a percentile-bounds matrix and converts to the real line.
Uses the normal `ppf` or percent point function (inverse of cdf) for the conversion.
Assumes for simplicity a location (population mean) of zero (standardization).
Note that percentiles of 0 and 1 are converted to -infinity and +infinity,
respectively."""
percentile_bounds = get_percentile_bounds(m=m)
return st.norm.ppf(q=percentile_bounds, loc=0, scale=sigma)
def expand_real_bounds(real_bounds, x_i, sigma_i, r, rs, n):
"""Converts real bounds into z-scores of the conditional distribution(s) of x_i.
That is, it converts the score relative to the conditional descendant distribution into
z-scores relative to the conditional distributions of the ancestor score(s) x_i, which
can be a scalar or vector.
This is the same as, in the paper (D_ - mu_tilda) / sigma_tilda in the
P_n(D, x_i) equation, which is calculated for each D_min and D_max in the real_bounds.
Note that the real_bounds are generally descendant_bounds, which are real values
corresponding to percentiles of the marginal (population) descendant distribution.
That distribution is normal with 0 mean and sigma_n SD.
We input sigma_i, which is the SD of the ancestor distribution, it is needed to
calculate sigma_tilda, which is the same for each x_i.
The conditional means are shaped into three dimensions. We do this because the
real_bounds is an (m x 2) matrix. The size of x_i will be n_iters in the get_matrix
function. Then, when we subtract the conditional means from the real_bounds,
we get an (n_iters x m x 2) array. That is, each 'row' (0th dimension) is a
conditionalized real_bound.
"""
# Get the conditional mean (mu_tilda), which has the same shape as x_i
# (scalar or vector)
mu_tilda = get_mu_tilda(x_i=x_i, r=r, n=n)
# Reshape mu_tilda into three dimensions, with as many rows (0th dimension)
# as needed to fit in the size of mu_tilda (1 if scalar, more if vector)
mu_tilda = np.reshape(mu_tilda, (-1, 1, 1))
# Get the conditional SD (sigma_tilda), which is the same for all conditional
# descendant distributions. That is, sigma_tilda is a scalar.
sigma_tilda = get_sigma_tilda(sigma_i, r, rs, n)
# Return the (n_iters x m x 2) array
return (real_bounds - mu_tilda) / sigma_tilda
# State to set probability
def get_state_set(m_descendant, x_i, r, rs, n, sigma_i):
"""
Calculates the the state to set probabilities for m_descendant equally spaced
(by percentile) sets, where a set is referred to in the paper as D.
This function carries out the P_n(D, x_i) calculation from the paper for each D.
The input x_i is a vector (of size n_iters) or could even be a scalar. In the
get_matrix function, x_i is a vector of evenly spaced ancestor states over an
ancestor bound. In the paper, this is denoted by x_i \in A.
For an (m_descendant x 2) real-bound matrix, or 1 by 2 element thereof,
returns the state to set probability.
Requires the right element of each 1 by 2 a-vector element to be greater than the
left element (tested elsewhere)."""
# SD of the marginal (population) descendant distribution
sigma_n = np.sqrt((np.square(r) + np.square(rs)) ** n) * sigma_i
# Calculate the real descendant bounds
descendant_bounds = get_real_bounds(m=m_descendant, sigma=sigma_n)
# Get the expanded (conditionalized) bounds according to x_i
expanded_bounds = expand_real_bounds(descendant_bounds, x_i, sigma_i, r, rs, n)
# Convert z-score to cdf
expanded_bounds = st.norm.cdf(expanded_bounds)
# Take the difference (along the last axis, which has size 2)
# This gets the area/probability between the m lower and upper bounds for each x_i
probabilities = np.diff(expanded_bounds)
# Remove the axis of length one (the last one which was collapsed when taking diff)
probabilities = np.squeeze(probabilities)
# Return the conditional probabilities scaled by the densities of the x_i
# The output is transposed so that it is a matrix of shape (m_descendant x n_iters)
return probabilities.T * pdf(x_i)
# Percentile transition matrix
def trim_real_bounds(real_bounds, trim_score=5):
"""
Symmetrically trim the ends of a real_bounds matrix to an absolute-value trim_score.
This is done so that manual integration is possible for the tail bounds (and manual
integration cannot be accomplished over an infinite length). The approximation works
because the density at a substantial trim_score (distance) from the mean will be so low,
that integrating further adds an immaterial amount of area (probability).
It should be noted that the trim_score is equal to the z-score if and only if the SD of
the real_bounds is 1. This the case for the ancestor_bounds in the get_matrix function.
"""
real_bounds[np.isneginf(real_bounds)] = -1 * trim_score
real_bounds[np.isposinf(real_bounds)] = trim_score
return real_bounds
def get_x_i_matrix(m_ancestor, trim_score, num_iters, sigma_i):
"""
Obtain a (m_ancestor x num_iters) matrix, where each row is the vector
of x_i for each of the m_ancestor real sets (couples).
"""
# Calculate the bounds for the ancestor states
ancestor_bounds = get_real_bounds(m=m_ancestor, sigma=sigma_i)
ancestor_bounds = trim_real_bounds(ancestor_bounds, trim_score=trim_score)
# Get the line-space from the lower bound to upper bound for each
# of the m_ancestor couples.
x_i_matrix = np.linspace(ancestor_bounds[:, 0], ancestor_bounds[:, 1],
num=num_iters, axis=1)
return x_i_matrix
def get_matrix(r, rs, n=1, num_iters=100_000, trim_score=5, m_descendant=5, m_ancestor=5):
"""
Obtain an (m_descendant x m_ancestor) percentile transition matrix.
As described in the paper, m_ancestor = m_descendant. However, this function allows
for the flexibility of different m's, if desired.
There are num_iters number of iterations over the numerically calculated integrals of
each entry in the matrix. As there are m_descendant x m_ancestor entries in the matrix,
that means num_iters x m_descendant x m_ancestor total iterations.
"""
# Set sigma_i (the marginal ancestor distribution SD) to be equal to one
sigma_i = 1
x_i_matrix = get_x_i_matrix(m_ancestor=m_ancestor, trim_score=trim_score,
num_iters=num_iters, sigma_i=sigma_i)
# Initialize the percentile transition matrix
matrix = np.zeros((m_descendant, m_ancestor))
# Loop through the ancestor states, filling in the columns of the matrix one by one
for j in range(m_ancestor):
# Get the x_i vector for the jth ancestor set (out of m_ancestor)
# The resultant x_i has size num_iters
x_i = x_i_matrix[j]
# Calculate the state to set probabilities: P_n(D, x_i) in the paper
# for each of the (m_descendant) descendant states.
state_set = get_state_set(m_descendant=m_descendant, x_i=x_i, sigma_i=sigma_i,
r=r, rs=rs, n=n)
# Numerical integration of the probabilities to obtain the total probability/area
# within each element of column j of the percentile transition matrix
matrix[:, j] = np.trapz(state_set, x_i)
# End for loop
# Because we want to obtain the probability for each ancestor state (rather than
# the overall probability), we normalize to the probability of an ancestor state.
# This is the same as doing: matrix /= matrix.sum(axis=0)
ancestor_state_probability = 1 / m_ancestor
matrix /= ancestor_state_probability
return matrix
# Plotting functions
def plot_ax(ax, matrix, i=0, j=0, title=None, title_loc='left', x_label=True, child=False):
"""Plots a percentile transition matrix on an axis."""
from matplotlib.ticker import PercentFormatter
ancestors = ['Parent', 'Grandparent', 'Great-Grandparent', 'Great-Great-Grandparent',
'Great$^3$-Grandparent', 'Great$^4$-Grandparent']
# ancestors = ['Generation {}'.format(i) for i in range(1, 20)]
if title:
ax.set_title(title, fontsize=17, loc=title_loc)
if matrix.shape[1] == 5:
step_labels = ['Bottom', 'Second', 'Third', 'Fourth', 'Top']
if x_label:
ax.set_xlabel('{}\'s Quintile'.format(ancestors[i]), fontsize=15)
else:
if j >= 4:
ax.set_xlabel('{}\'s Quintile'.format(ancestors[i]), fontsize=15)
else:
ax.set_xlabel(' '.format(ancestors[i]), fontsize=15)
# ax.set_xlabel("Generation {} Quintile".format(i+1), fontsize=15)
if j % 2 == 0:
if child:
ax.set_ylabel('Cumulative Probability of Child\'s Quintile', fontsize=15)
else:
ax.set_ylabel('Cumulative Probability of Descendant\'s Quintile', fontsize=15)
else:
step_labels = list(range(1, matrix.shape[1] + 1))
pal = ['#c6dbef', '#9ecae1', '#6baed6', '#4292c6', '#2171b5'][::-1]
ax.set_ylim(0, 1)
values_sum_list = [1] * matrix.shape[1]
for j in range(len(matrix) - 1, -1, -1):
if len(matrix) <= 5:
ax.bar(step_labels, [- value for value in matrix[j]],
bottom=values_sum_list, color=pal[j])
else:
ax.bar(step_labels, [- value for value in matrix[j]],
bottom=values_sum_list)
for a, b, c in zip(step_labels, values_sum_list, [value for value in matrix[j]]):
if c >= 0.01:
num = (b - c / 2) - 0.018
color = 'w'
if j >= 2:
color = 'k'
round_str = "{:0.0%}"
if i > 3:
round_str = "{:0.1%}"
ax.text(a, num, ' ' + round_str.format(c),
va='bottom', ha='center', color=color, size=13, alpha=0.8)
for k in range(len(values_sum_list)):
values_sum_list[k] -= matrix[j][k]
ax.set_yticks(np.arange(0, 1.1, 0.1))
ax.set_xticklabels(step_labels, Fontsize=14)
ax.yaxis.set_major_formatter(PercentFormatter(1))
def plot_matrix(matrix, n=1, child=True, legend=True):
"""Plots a figure with only one percentile transition matrix."""
fig, axes = plt.subplots(1, 1, figsize=(13 * 0.95 * 0.75, 8 / 0.95 * 0.75))
plot_ax(ax=axes, matrix=matrix, i=n-1, child=child)
term = 'Descendant'
if matrix.shape[1] == 5:
if n == 1:
term = 'Child'
if legend:
legend = ['{} in the\nTop Quintile'.format(term), 'Fourth Quintile',
'Third Quintile', 'Second Quintile', 'Bottom Quintile']
fig.legend(legend, bbox_to_anchor=(1, 0.977), loc="upper left", fontsize=15)
plt.tight_layout()
def get_rv_rsv(mv):
"""Get the corresponding r vector and rs vector from a mobility vector"""
rv = 1 / np.sqrt(mv**2 + 1)
rsv = stable_rs(rv)
return rv, rsv
def report_mobility(mv, rv, rsv, i):
"""Give a 'report' of the mobility, and corresponding regression and residual
coefficients.
mv is short for mobility_vector (a vector of mobility values)"""
# If the mobility value is an integer, display it as an integer
if mv[i] % 1 == 0:
return "$m$ = {:.0f}, $r$ = {:.3f}, $s$ = {:.3f}".format(mv[i], rv[i], rsv[i])
# Otherwise, display the first decimal of the mobility value
else:
return "$m$ = {:.1f}, $r$ = {:.3f}, $s$ = {:.3f}".format(mv[i], rv[i], rsv[i])
# Functions for handling (Pearson) data
def get_percentiles(vector):
"""Convert an vector of data into percentiles"""
return st.rankdata(vector) / vector.size
def get_matrix_data(x, y, m_ancestor=5, m_descendant=5, return_raw=False):
"""Obtains the observed percentile transition matrix from data.
x is the ancestor values and y is the descendant values (typically parent-child
parallel vectors).
If return_raw = True, then the counts are returned, rather than the proportions.
To estimate the probability (and obtain an estimated percentile transition matrix)
it is necessary that return_raw = False.
"""
# Create (representing percentiles) of the data
bins_ancestor = np.linspace(0, 1, m_ancestor, endpoint=False)
bins_descendant = np.linspace(0, 1, m_descendant, endpoint=False)
# Obtain the bin for each data-point based on its percentile
xb = np.digitize(get_percentiles(x), bins_ancestor)
yb = np.digitize(get_percentiles(y), bins_descendant)
# Initialize the percentile transition matrix
matrix = np.zeros((m_ancestor, m_descendant))
# Loop through the ancestor bins
for i in range(m_ancestor):
# Get the descendants of this ancestor bin
desc = xb[yb == i+1]
# Loop through the descendant bins
for j in range(m_descendant):
if return_raw:
# Get the total number of descendants in the
# ancestor bin, descendant bin pair
matrix[j, i] = np.sum(desc == j+1)
else:
# Get the proportion of descendants in the
# ancestor bin, descendant bin pair (approximates probability)
matrix[j, i] = np.mean(desc == j+1)
# End of for loop
return matrix
# Testing functions
def test_percentile_bounds():
expected = np.array([[0., 0.2],
[0.2, 0.4],
[0.4, 0.6],
[0.6, 0.8],
[0.8, 1.]])
assert np.allclose(get_percentile_bounds(m=5).ravel(), expected.ravel())
def test_expanded_real_bounds():
"""Test that this gives the correct shape. (Further tests can be added"""
x_i_trial = np.array([1, 2, 3])
rb = np.array([[-5., -0.84162123],
[-0.84162123, -0.2533471],
[-0.2533471, 0.2533471],
[0.2533471, 0.84162123],
[0.84162123, 5.]])
expand_real_bounds(real_bounds=rb, x_i=x_i_trial, sigma_i=1, r=0.5, rs=0.9, n=1)
assert (rb - np.reshape([1, 2, 3], (-1, 1, 1))).shape == (3, 5, 2)
def test_trim_real_bounds():
rb = get_real_bounds(m=5, sigma=1)
trim_score = 4
rb[0, 0] = -1 * trim_score
rb[-1, -1] = trim_score
assert (rb == trim_real_bounds(rb, trim_score)).all()
def test_functions():
test_percentile_bounds()
test_expanded_real_bounds()
test_trim_real_bounds()
test_functions()
print('Tests passed')
| 38.422018 | 94 | 0.660757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,089 | 0.542562 |
52ab535bd1515f812b69c19c90b0debaf79326f8 | 6,661 | py | Python | main/part1/utils.py | YerbaPage/KnowledgeGraph | 6e839ee2fa677360b47eefe4947c4f9d0591685c | [
"MIT"
] | 2 | 2021-04-22T01:59:57.000Z | 2021-07-26T08:55:36.000Z | main/part1/utils.py | YerbaPage/KnowledgeGraph | 6e839ee2fa677360b47eefe4947c4f9d0591685c | [
"MIT"
] | null | null | null | main/part1/utils.py | YerbaPage/KnowledgeGraph | 6e839ee2fa677360b47eefe4947c4f9d0591685c | [
"MIT"
] | null | null | null | # coding: UTF-8
import torch
from tqdm import tqdm
import time
from datetime import timedelta
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
PAD, CLS = '[PAD]', '[CLS]' # padding符号, bert中综合信息符号
class FocalLoss(nn.Module):
"""smo
This is a implementation of Focal Loss with oth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)^gamma*log(pt)
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param smooth: (float,double) smooth value when cross entropy
:param balance_index: (int) balance class index, should be specific when alpha is float
:param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch.
"""
def __init__(self, num_class, alpha=None, gamma=2, balance_index=-1, smooth=None, size_average=True):
super(FocalLoss, self).__init__()
self.num_class = num_class
self.alpha = alpha
self.gamma = gamma
self.smooth = smooth
self.size_average = size_average
if self.alpha is None:
self.alpha = torch.ones(self.num_class, 1)
elif isinstance(self.alpha, (list, np.ndarray)):
assert len(self.alpha) == self.num_class
self.alpha = torch.FloatTensor(alpha).view(self.num_class, 1)
self.alpha = self.alpha / self.alpha.sum()
elif isinstance(self.alpha, float):
alpha = torch.ones(self.num_class, 1)
alpha = alpha * (1 - self.alpha)
alpha[balance_index] = self.alpha
self.alpha = alpha
else:
raise TypeError('Not support alpha type')
if self.smooth is not None:
if self.smooth < 0 or self.smooth > 1.0:
raise ValueError('smooth value should be in [0,1]')
def forward(self, input, target):
logit = F.softmax(input, dim=1)
if logit.dim() > 2:
# N,C,d1,d2 -> N,C,m (m=d1*d2*...)
logit = logit.view(logit.size(0), logit.size(1), -1)
logit = logit.permute(0, 2, 1).contiguous()
logit = logit.view(-1, logit.size(-1))
target = target.view(-1, 1)
# N = input.size(0)
# alpha = torch.ones(N, self.num_class)
# alpha = alpha * (1 - self.alpha)
# alpha = alpha.scatter_(1, target.long(), self.alpha)
epsilon = 1e-10
alpha = self.alpha
if alpha.device != input.device:
alpha = alpha.to(input.device)
idx = target.cpu().long()
one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_()
one_hot_key = one_hot_key.scatter_(1, idx, 1)
if one_hot_key.device != logit.device:
one_hot_key = one_hot_key.to(logit.device)
if self.smooth:
one_hot_key = torch.clamp(
one_hot_key, self.smooth, 1.0 - self.smooth)
pt = (one_hot_key * logit).sum(1) + epsilon
logpt = pt.log()
gamma = self.gamma
alpha = alpha[idx]
loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
def build_dataset(config):
def load_dataset(path, pad_size=32):
contents = []
with open(path, 'r', encoding='UTF-8') as f:
for line in tqdm(f):
lin = line.strip()
if not lin:
continue
content, label = lin.split('\t')
token = config.tokenizer.tokenize(content)
token = [CLS] + token
seq_len = len(token)
mask = []
token_ids = config.tokenizer.convert_tokens_to_ids(token)
if pad_size:
if len(token) < pad_size:
mask = [1] * len(token_ids) + [0] * (pad_size - len(token))
token_ids += ([0] * (pad_size - len(token)))
else:
mask = [1] * pad_size
token_ids = token_ids[:pad_size]
seq_len = pad_size
contents.append((token_ids, int(label), seq_len, mask))
return contents
train = load_dataset(config.train_path, config.pad_size)
dev = load_dataset(config.dev_path, config.pad_size)
test = load_dataset(config.test_path, config.pad_size)
return train, dev, test
class DatasetIterater(object):
def __init__(self, batches, batch_size, device):
self.batch_size = batch_size
self.batches = batches
self.n_batches = len(batches) // batch_size
self.residue = False # 记录batch数量是否为整数
if len(batches) % self.n_batches != 0:
self.residue = True
self.index = 0
self.device = device
def _to_tensor(self, datas):
x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
y = torch.LongTensor([_[1] for _ in datas]).to(self.device)
# pad前的长度(超过pad_size的设为pad_size)
seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
mask = torch.LongTensor([_[3] for _ in datas]).to(self.device)
return (x, seq_len, mask), y
def __next__(self):
if self.residue and self.index == self.n_batches:
batches = self.batches[self.index * self.batch_size: len(self.batches)]
self.index += 1
batches = self._to_tensor(batches)
return batches
elif self.index >= self.n_batches:
self.index = 0
raise StopIteration
else:
batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
self.index += 1
batches = self._to_tensor(batches)
return batches
def __iter__(self):
return self
def __len__(self):
if self.residue:
return self.n_batches + 1
else:
return self.n_batches
def build_iterator(dataset, config):
iter = DatasetIterater(dataset, config.batch_size, config.device)
return iter
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
| 36.201087 | 118 | 0.583696 | 4,888 | 0.726408 | 0 | 0 | 0 | 0 | 0 | 0 | 1,223 | 0.181751 |
52acccd56e887711b8141885aa5194b4be9e5cd8 | 2,596 | py | Python | tools/improve_8105.py | dophist/pinyin-data | 8010e208f7cbc44290e960e1ed75aeb5ba2b1daf | [
"MIT"
] | 823 | 2016-03-02T04:24:43.000Z | 2022-03-26T13:57:23.000Z | tools/improve_8105.py | dophist/pinyin-data | 8010e208f7cbc44290e960e1ed75aeb5ba2b1daf | [
"MIT"
] | 32 | 2016-07-19T07:44:20.000Z | 2022-03-07T05:43:10.000Z | tools/improve_8105.py | dophist/pinyin-data | 8010e208f7cbc44290e960e1ed75aeb5ba2b1daf | [
"MIT"
] | 185 | 2016-03-12T12:09:09.000Z | 2022-03-28T00:57:10.000Z | # -*- coding: utf-8 -*-
"""补充 8105 中汉字的拼音数据"""
from collections import namedtuple
import re
import sys
from pyquery import PyQuery
import requests
re_pinyin = re.compile(r'拼音:(?P<pinyin>\S+) ')
re_code = re.compile(r'统一码\w?:(?P<code>\S+) ')
re_alternate = re.compile(r'异体字:\s+?(?P<alternate>\S+)')
HanziInfo = namedtuple('HanziInfo', 'pinyin code alternate')
def fetch_html(url, params):
response = requests.get(url, params=params)
return response.content
def fetch_info(hanzi):
url = 'http://www.guoxuedashi.com/zidian/so.php'
params = {
'sokeyzi': hanzi,
'kz': 1,
'submit': '',
}
html = fetch_html(url, params)
pq = PyQuery(html)
pq = PyQuery(pq('table.zui td')[1])
text = pq('tr').text()
text_alternate = pq(html)('.info_txt2')('em').text()
pinyin = ''
pinyin_match = re_pinyin.search(text)
if pinyin_match is not None:
pinyin = pinyin_match.group('pinyin')
code = re_code.search(text).group('code')
alternate = ''
alternate_match = re_alternate.search(text_alternate)
if alternate_match is not None:
alternate = alternate_match.group('alternate')
return HanziInfo(pinyin, code, alternate)
def parse_hanzi(hanzi):
info = fetch_info(hanzi)
if (not info.pinyin) and info.alternate:
alternate = fetch_info(info.alternate)
else:
alternate = ''
return HanziInfo(info.pinyin, info.code, alternate)
def main(lines):
for line in lines:
if line.startswith('# U+') and '<-' in line:
# # U+xxx ... -> U+xxx
code = line.split(':')[0].strip('# ')
# U+xxx -> xxx
code = code[2:]
info = parse_hanzi(code)
pinyin = info.pinyin
extra = ''
if (not pinyin) and info.alternate:
alternate = info.alternate
pinyin = alternate.pinyin
extra = ' => U+{0}'.format(alternate.code)
if ',' in pinyin:
first_pinyin, extra_pinyin = pinyin.split(',', 1)
pinyin = first_pinyin
extra += ' ?-> ' + extra_pinyin
if pinyin:
line = line.strip()
# # U+xxx -> U+xxx
line = line[2:]
line = line.replace('<-', pinyin)
if extra:
line += extra
yield line.strip()
if __name__ == '__main__':
args = sys.argv[1:]
input_file = args[0]
with open(input_file) as fp:
for line in main(fp):
print(line)
| 29.168539 | 69 | 0.552003 | 0 | 0 | 980 | 0.371494 | 0 | 0 | 0 | 0 | 439 | 0.166414 |