hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
201f5552a3378781916787a5f8761802bd6a2fc2
| 622
|
py
|
Python
|
tago/account/paymentHistory.py
|
tago-io/tago-sdk-python
|
c55c86a002bb5f48dd08a9c2b270d0dee4b1b6d1
|
[
"Apache-2.0"
] | 4
|
2017-05-01T01:55:29.000Z
|
2022-02-14T12:55:42.000Z
|
tago/account/paymentHistory.py
|
tago-io/tago-python
|
203b489aa5bcdbb116423049c62d90a7cce9b9a0
|
[
"Apache-2.0"
] | 10
|
2017-03-20T14:54:46.000Z
|
2021-12-14T21:52:08.000Z
|
tago/account/paymentHistory.py
|
tago-io/tago-python
|
203b489aa5bcdbb116423049c62d90a7cce9b9a0
|
[
"Apache-2.0"
] | 5
|
2017-04-04T21:59:06.000Z
|
2022-02-11T08:35:35.000Z
|
import requests # Used to make HTTP requests
import json # Used to parse JSON
import os # Used to infer environment variables
API_TAGO = os.environ.get('TAGOIO_API') or 'https://api.tago.io'
REALTIME = os.environ.get('TAGOIO_REALTIME') or 'https://realtime.tago.io'
class PaymentHistory:
def __init__(self, acc_token):
self.token = acc_token
self.default_headers = {
'content-type': 'application/json', 'Account-Token': acc_token}
return
def getHistory(self):
return requests.get('{api_endpoint}/account/payment_history'.format(api_endpoint=API_TAGO), headers=self.default_headers).json()
| 34.555556
| 132
| 0.737942
|
43b134adbb0cdfac87c7b5ca68f8a63a3f8c2097
| 2,100
|
py
|
Python
|
presenter/uptake.py
|
fic2/python-dokuwiki-export
|
3584c4cd146e1d8510504064c8c8094e41a5fc9e
|
[
"MIT"
] | null | null | null |
presenter/uptake.py
|
fic2/python-dokuwiki-export
|
3584c4cd146e1d8510504064c8c8094e41a5fc9e
|
[
"MIT"
] | null | null | null |
presenter/uptake.py
|
fic2/python-dokuwiki-export
|
3584c4cd146e1d8510504064c8c8094e41a5fc9e
|
[
"MIT"
] | null | null | null |
from . import PresenterBase
from visitor import UsedByVisitor
from entities import SpecificEnabler, DeprecatedSpecificEnabler, Application
class UptakePresenter(PresenterBase):
def __init__(self, nice = lambda item: item, hideunused = False):
PresenterBase.__init__(self)
# self.v = GEVisitor()
self.nice = nice
self.hideunused = hideunused
self.collect_entities = [SpecificEnabler, DeprecatedSpecificEnabler, Application]
def present_ge(self, ge, meta):
uv1 = UsedByVisitor(ge, ['USES'], self.collect_entities)
uv1.visit(meta)
uv2 = UsedByVisitor(ge, ['USES', 'WILL USE'], self.collect_entities)
uv2.visit(meta)
uv3 = UsedByVisitor(ge, ['USES', 'WILL USE', 'MAY USE'], self.collect_entities)
uv3.visit(meta)
uv1e = set(uv1.get_result())
uv2e = set(uv2.get_result()) - (uv1e )
uv3e = set(uv3.get_result()) - (uv1e | uv2e)
return (ge, list(uv1e), list(uv2e), list(uv3e))
def present(self, meta):
self.uptake = []
for ge in meta.get_generic_enablers():
row = self.present_ge(ge, meta)
if self.hideunused and len(row[1])+len(row[2])+len(row[3]) == 0:
continue
self.uptake.append(row)
def dump(self, out):
out.write('^ GE ^ Uptake ^ SEs / Applications ^')
for ge, uses, will, may in self.uptake:
ses = [self.nice(e) for e in uses if isinstance(e, SpecificEnabler)]
apps = [self.nice(e) for e in uses if isinstance(e, Application)]
wses = [self.nice(e) for e in will + may if isinstance(e, SpecificEnabler)]
wapps = [self.nice(e) for e in will + may if isinstance(e, Application)]
se_uptake = ' \\\\ '.join(ses + wses)
app_uptake = ' \\\\ '.join(apps + wapps)
if len(se_uptake) == 0:
se_uptake = ':::'
if len(uses):
status = 'D'
elif len(will):
status = 'U'
elif len(may):
status = 'E'
else:
status = ' '
# print('GE %s - %s - %s' % (ge.identifier, status, uptake))
out.write('| %s | %s | %s |' % (ge.get_name(), status, app_uptake))
out.write('| ::: | ::: | %s |' % se_uptake)
# out.write('| ... | ... | ... |')
| 30.882353
| 83
| 0.622857
|
d1d9fe812ebea7afd3bb9041dcff8661153fad46
| 323
|
py
|
Python
|
pyrival/strings/min_rotation.py
|
MattJDavidson/aoc2021
|
1c26697da55e58408f36525639d201303f808b1b
|
[
"Apache-2.0"
] | 748
|
2018-09-27T01:08:12.000Z
|
2022-03-25T17:31:56.000Z
|
pyrival/strings/min_rotation.py
|
MattJDavidson/aoc2021
|
1c26697da55e58408f36525639d201303f808b1b
|
[
"Apache-2.0"
] | 38
|
2019-02-24T14:50:02.000Z
|
2022-03-25T01:27:50.000Z
|
pyrival/strings/min_rotation.py
|
MattJDavidson/aoc2021
|
1c26697da55e58408f36525639d201303f808b1b
|
[
"Apache-2.0"
] | 288
|
2018-10-29T11:55:57.000Z
|
2022-03-20T04:37:27.000Z
|
def least_rotation(s):
a, n = 0, len(s)
s = s + s
for b in range(n):
for i in range(n):
if (a + i == b) or (s[a + i] < s[b + i]):
b += max(0, i - 1)
break
if s[a + i] > s[b + i]:
a = b
break
return s[a:a + n]
| 21.533333
| 53
| 0.315789
|
f340a1c7454f28fb957024ae9b379627a4b400ef
| 9,390
|
py
|
Python
|
openmdao/examples/beam_tutorial.py
|
naylor-b/OpenMDAO1
|
49d82f6601b33db9bdcf7d146d030d55e3b62ef4
|
[
"Apache-2.0"
] | 17
|
2018-01-11T20:13:59.000Z
|
2022-03-22T03:46:05.000Z
|
openmdao/examples/beam_tutorial.py
|
naylor-b/OpenMDAO1
|
49d82f6601b33db9bdcf7d146d030d55e3b62ef4
|
[
"Apache-2.0"
] | 6
|
2017-10-19T23:14:14.000Z
|
2020-11-22T17:30:57.000Z
|
openmdao/examples/beam_tutorial.py
|
naylor-b/OpenMDAO1
|
49d82f6601b33db9bdcf7d146d030d55e3b62ef4
|
[
"Apache-2.0"
] | 10
|
2018-04-12T22:13:33.000Z
|
2020-05-07T10:02:59.000Z
|
""" Beam sizing problem"""
from openmdao.api import Problem, ScipyOptimizer, Component, IndepVarComp, Group
#room_area = room_length * room_width (1)
#room_length >= room_width (2)
#(29000000 * 228 * 384) / {5 * [(0.24305 * room_width) + 4.83] * (room_length)3}>= 720 (3)
#(0.5*8.75) * [(0.24305 * room_width) + 4.83] * (room_length)2 / (8 * 50,000 * 228) < 0.5 (4)
#0.5 * [(0.24305 * room_width) + 4.83] * (room_length) / (17.1*50,000) < 1/3 (5)
#constants
E = 29000000 #modulus of elasticity (constant 29000000psi for ASTM A992 Grade 50 steel)
I = 228 #Ix = moment of Inertia (constant 228in4 for the W8x58 beam)
BEAM_WEIGHT_LBS_PER_IN = 58.0 / 12.0 #self weight of beam per unit length (58 lbs/ft or 4.83 lbs/in.)
DEAD_LOAD_PSI = 20.0 / 144 #The dead load is 20psf or 0.1389psi.
LIVE_LOAD_PSI = 50.0 / 144 #The live load is 50psf or 0.3472psi.
TOTAL_LOAD_PSI = DEAD_LOAD_PSI + LIVE_LOAD_PSI #total load
BEAM_HEIGHT_IN = 8.75 #inches
YIELD_STRENGTH_PSI = 50000 #The maximum yield strength Fy for ASTM A992 Grade 50 steel is 50,000 psi
CROSS_SECTIONAL_AREA_SQIN = 17.1 #sq in
#negate the area to turn from a maximization problem to a minimization problem
class NegativeArea(Component):
def __init__(self):
super(NegativeArea, self).__init__()
self.add_param('room_width', val=0.0)
self.add_param('room_length', val=0.0)
self.add_output('neg_room_area', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
room_width = params['room_width']
room_length = params['room_length']
unknowns['neg_room_area'] = -(room_length * room_width)
def linearize(self, params, unknowns, resids):
J = {}
room_width = params['room_width']
room_length = params['room_length']
J['neg_room_area','room_width'] = -room_length
J['neg_room_area','room_length'] = -room_width
return J
class LengthMinusWidth(Component):
def __init__(self):
super(LengthMinusWidth, self).__init__()
self.add_param('room_width', val=0.0)
self.add_param('room_length', val=0.0)
self.add_output('length_minus_width', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
room_width = params['room_width']
room_length = params['room_length']
unknowns['length_minus_width'] = room_length - room_width
def linearize(self, params, unknowns, resids):
J = {}
room_width = params['room_width']
room_length = params['room_length']
J['length_minus_width','room_width'] = -1.0
J['length_minus_width','room_length'] = 1.0
return J
class Deflection(Component):
def __init__(self):
super(Deflection, self).__init__()
self.add_param('room_width', val=0.0)
self.add_param('room_length', val=0.0)
self.add_output('deflection', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
room_width = params['room_width']
room_length = params['room_length']
unknowns['deflection'] = (E * I * 384.0) / (5.0 * ((0.5 * TOTAL_LOAD_PSI * room_width) + BEAM_WEIGHT_LBS_PER_IN) * room_length**3)
def linearize(self, params, unknowns, resids):
J = {}
room_width = params['room_width']
room_length = params['room_length']
J['deflection','room_width'] = (-192.0 * E * I * TOTAL_LOAD_PSI) / ((5.0 * room_length**3) * (TOTAL_LOAD_PSI * room_width/2.0 + BEAM_WEIGHT_LBS_PER_IN)**2)
J['deflection','room_length'] = (-1152.0 * E * I) / (5.0 * ((TOTAL_LOAD_PSI * room_width)/2.0 + BEAM_WEIGHT_LBS_PER_IN) * room_length**4)
return J
class BendingStress(Component):
def __init__(self):
super(BendingStress, self).__init__()
self.add_param('room_width', val=0.0)
self.add_param('room_length', val=0.0)
self.add_output('bending_stress_ratio', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
room_width = params['room_width']
room_length = params['room_length']
unknowns['bending_stress_ratio'] = (0.5*BEAM_HEIGHT_IN * ((0.5 * TOTAL_LOAD_PSI * room_width) + BEAM_WEIGHT_LBS_PER_IN) * (room_length)**2) / (8.0 * YIELD_STRENGTH_PSI * I)
def linearize(self, params, unknowns, resids):
J = {}
room_width = params['room_width']
room_length = params['room_length']
J['bending_stress_ratio','room_width'] = (room_length**2) * BEAM_HEIGHT_IN * (TOTAL_LOAD_PSI*room_width/2.0 + BEAM_WEIGHT_LBS_PER_IN) / (16.0 * I * YIELD_STRENGTH_PSI)
J['bending_stress_ratio','room_length'] = (BEAM_WEIGHT_LBS_PER_IN + (TOTAL_LOAD_PSI*room_width/2.0)) * BEAM_HEIGHT_IN * room_length / (8.0 * I * YIELD_STRENGTH_PSI)
return J
class ShearStress(Component):
def __init__(self):
super(ShearStress, self).__init__()
self.add_param('room_width', val=0.0)
self.add_param('room_length', val=0.0)
self.add_output('shear_stress_ratio', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
room_width = params['room_width']
room_length = params['room_length']
unknowns['shear_stress_ratio'] = 0.5 * ((0.5 * TOTAL_LOAD_PSI * room_width) + BEAM_WEIGHT_LBS_PER_IN) * (room_length) / (CROSS_SECTIONAL_AREA_SQIN * YIELD_STRENGTH_PSI)
def linearize(self, params, unknowns, resids):
J = {}
room_width = params['room_width']
room_length = params['room_length']
J['shear_stress_ratio','room_width'] = TOTAL_LOAD_PSI * room_length / (4.0 * YIELD_STRENGTH_PSI * CROSS_SECTIONAL_AREA_SQIN)
J['shear_stress_ratio','room_length'] = (BEAM_WEIGHT_LBS_PER_IN + (TOTAL_LOAD_PSI * room_width / 2.0))/(2.0 * YIELD_STRENGTH_PSI * CROSS_SECTIONAL_AREA_SQIN)
return J
class BeamTutorial(Group):
def __init__(self):
super(BeamTutorial, self).__init__()
#add design variables or IndepVarComp's
self.add('ivc_rlength', IndepVarComp('room_length', 100.0))
self.add('ivc_rwidth', IndepVarComp('room_width', 100.0))
#add our custom components
self.add('d_len_minus_wid', LengthMinusWidth())
self.add('d_deflection', Deflection())
self.add('d_bending', BendingStress())
self.add('d_shear', ShearStress())
self.add('d_neg_area', NegativeArea())
#make connections from design variables to the Components
self.connect('ivc_rlength.room_length','d_len_minus_wid.room_length')
self.connect('ivc_rwidth.room_width','d_len_minus_wid.room_width')
self.connect('ivc_rlength.room_length','d_deflection.room_length')
self.connect('ivc_rwidth.room_width','d_deflection.room_width')
self.connect('ivc_rlength.room_length','d_bending.room_length')
self.connect('ivc_rwidth.room_width','d_bending.room_width')
self.connect('ivc_rlength.room_length','d_shear.room_length')
self.connect('ivc_rwidth.room_width','d_shear.room_width')
self.connect('ivc_rlength.room_length','d_neg_area.room_length')
self.connect('ivc_rwidth.room_width','d_neg_area.room_width')
if __name__ == "__main__":
top = Problem()
top.root = BeamTutorial()
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.options['tol'] = 1.0e-8
top.driver.options['maxiter'] = 10000 #maximum number of solver iterations
#room length and width bounds
top.driver.add_desvar('ivc_rlength.room_length', lower=5.0*12.0, upper=50.0*12.0) #domain: 1in <= length <= 50ft
top.driver.add_desvar('ivc_rwidth.room_width', lower=5.0*12.0, upper=30.0*12.0) #domain: 1in <= width <= 30ft
top.driver.add_objective('d_neg_area.neg_room_area') #minimize negative area (or maximize area)
top.driver.add_constraint('d_len_minus_wid.length_minus_width', lower=0.0) #room_length >= room_width
top.driver.add_constraint('d_deflection.deflection', lower=720.0) #deflection >= 720
top.driver.add_constraint('d_bending.bending_stress_ratio', upper=0.5) #bending < 0.5
top.driver.add_constraint('d_shear.shear_stress_ratio', upper=1.0/3.0) #shear < 1/3
top.setup()
top.run()
print("\n")
print( "Solution found")
print("room area: %f in^2 (%f ft^2)" % (-top['d_neg_area.neg_room_area'], -top['d_neg_area.neg_room_area']/144.0))
print("room width: %f in (%f ft)" % (top['ivc_rwidth.room_width'], top['ivc_rwidth.room_width']/12.0))
print("room/beam length: %f in (%f ft)" % (top['ivc_rlength.room_length'], top['ivc_rlength.room_length']/12.0))
print( "deflection: L/%f" % (top['d_deflection.deflection']))
print( "bending stress ratio: %f" % (top['d_bending.bending_stress_ratio']))
print( "shear stress ratio: %f" % (top['d_shear.shear_stress_ratio']))
loadingPlusBeam = ((0.5 * TOTAL_LOAD_PSI * top['ivc_rwidth.room_width']) + BEAM_WEIGHT_LBS_PER_IN) #PLI (pounds per linear inch)
loadingNoBeam = ((0.5 * TOTAL_LOAD_PSI * top['ivc_rwidth.room_width'])) #PLI (pounds per linear inch)
print( "loading (including self weight of beam): %fpli %fplf" % (loadingPlusBeam, loadingPlusBeam*12.0))
print( "loading (not including self weight of beam): %fpli %fplf" % (loadingNoBeam, loadingNoBeam*12.0))
print( "Finished!")
| 39.957447
| 181
| 0.666454
|
919a271ddf18c069780a06bcb81fc677f0606a61
| 1,170
|
py
|
Python
|
tests/test_physics_cosm.py
|
astrojhgu/ares
|
42008c8e4bf79f0b000cc833e02a86510bce7611
|
[
"MIT"
] | 1
|
2019-01-04T15:13:18.000Z
|
2019-01-04T15:13:18.000Z
|
tests/test_physics_cosm.py
|
astrojhgu/ares
|
42008c8e4bf79f0b000cc833e02a86510bce7611
|
[
"MIT"
] | null | null | null |
tests/test_physics_cosm.py
|
astrojhgu/ares
|
42008c8e4bf79f0b000cc833e02a86510bce7611
|
[
"MIT"
] | null | null | null |
"""
test_physics_cosm.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Wed Sep 24 15:39:44 MDT 2014
Description:
"""
import numpy as np
from ares.physics import Cosmology
from ares.physics.Constants import s_per_gyr, m_H, m_He
def test(rtol=1e-3):
cosm = Cosmology()
# Check critical density
assert cosm.CriticalDensity(0.) == cosm.CriticalDensityNow
# Make sure energy densities sum to unity
assert np.allclose(cosm.omega_m_0, 1. - cosm.omega_l_0)
# Make sure the age of the Universe is OK
assert 13.5 <= cosm.t_of_z(0.) / s_per_gyr <= 14.
# Check some high-z limits
cosm_hi_z = Cosmology(omega_m_0=0.99999, omega_l_0=1e-5)
# Age of the Universe
assert np.allclose(cosm_hi_z.t_of_z(0.), 2. / 3. / cosm_hi_z.hubble_0)
# Check high-z limit for Hubble parameter. Better than 1%?
H_n = cosm.HubbleParameter(30.)
H_a = cosm.hubble_0 * np.sqrt(cosm.omega_m_0) * (1. + 30.)**1.5
assert abs(H_n - H_a) / H_a < rtol, \
"Hubble parameter @ high-z not accurate to < %.3g %." % rtol
if __name__ == '__main__':
test()
| 26
| 74
| 0.653846
|
7ad929a4be2216bd9769e96de78378246e385d88
| 957
|
py
|
Python
|
Chapter 6/03 - Using one thread per item/one_thread_per_item.py
|
moseskim/Expert-Python-Programming-Fourth-Edition
|
5160f974deb2365597b7be9cc032f24bfa13471a
|
[
"MIT"
] | null | null | null |
Chapter 6/03 - Using one thread per item/one_thread_per_item.py
|
moseskim/Expert-Python-Programming-Fourth-Edition
|
5160f974deb2365597b7be9cc032f24bfa13471a
|
[
"MIT"
] | null | null | null |
Chapter 6/03 - Using one thread per item/one_thread_per_item.py
|
moseskim/Expert-Python-Programming-Fourth-Edition
|
5160f974deb2365597b7be9cc032f24bfa13471a
|
[
"MIT"
] | null | null | null |
"""
"멀티스레드 애플리케이션 예시"절 예시.
가장 간단한 아이템 당 단일 스레드 형태로
`threading` 모듈 사용 예시를 소개한다.
"""
import time
from threading import Thread
import requests
SYMBOLS = ("USD", "EUR", "PLN", "NOK", "CZK")
BASES = ("USD", "EUR", "PLN", "NOK", "CZK")
def fetch_rates(base):
response = requests.get(f"https://api.vatcomply.com/rates?base={base}")
response.raise_for_status()
rates = response.json()["rates"]
# 노트: 동일 화폐는 1:1로 환전한다
rates[base] = 1.0
rates_line = ", ".join([f"{rates[symbol]:7.03} {symbol}" for symbol in SYMBOLS])
print(f"1 {base} = {rates_line}")
def main():
threads = []
for base in BASES:
thread = Thread(target=fetch_rates, args=[base])
thread.start()
threads.append(thread)
while threads:
threads.pop().join()
if __name__ == "__main__":
started = time.time()
main()
elapsed = time.time() - started
print()
print("time elapsed: {:.2f}s".format(elapsed))
| 21.266667
| 84
| 0.602926
|
446d4ca5bd87991086ab7617c092ef32585e820a
| 5,614
|
py
|
Python
|
patientMatcher/match/genotype_matcher.py
|
john1711/patientMatcher
|
516a2a73a2cea1e87ed2f9ae6a4f0b1b715281d9
|
[
"MIT"
] | 11
|
2019-07-02T11:14:21.000Z
|
2022-03-08T21:43:10.000Z
|
patientMatcher/match/genotype_matcher.py
|
john1711/patientMatcher
|
516a2a73a2cea1e87ed2f9ae6a4f0b1b715281d9
|
[
"MIT"
] | 182
|
2019-01-23T10:13:30.000Z
|
2022-03-25T13:17:08.000Z
|
patientMatcher/match/genotype_matcher.py
|
john1711/patientMatcher
|
516a2a73a2cea1e87ed2f9ae6a4f0b1b715281d9
|
[
"MIT"
] | 6
|
2019-01-09T21:21:43.000Z
|
2022-03-09T20:26:23.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from patientMatcher.parse.patient import (
gtfeatures_to_genes_symbols,
gtfeatures_to_variants,
lift_variant,
)
LOG = logging.getLogger(__name__)
def match(database, gt_features, max_score):
"""Handles genotype matching algorithm
Args:
database(pymongo.database.Database)
gt_features(list): a list of genomic features (objects)
max_score(float): a number between 0 and 1
Returns:
matches(dict): a dictionary of patient matches with GT score
"""
matches = {}
n_gtfeatures = len(gt_features)
LOG.info("\n\n###### Running genome matcher module ######")
if n_gtfeatures > 0:
max_feature_similarity = max_score / n_gtfeatures
LOG.info("Query patient has {0} genotype features.".format(n_gtfeatures))
LOG.info(
"Each GT feature will contribute with a weight of {0} to a total GT score (max GT score is {1})".format(
max_feature_similarity, max_score
)
)
query = {}
query_fields = []
genes, symbols = gtfeatures_to_genes_symbols(gt_features)
if genes:
query_fields.append({"genomicFeatures.gene.id": {"$in": genes}})
if symbols:
query_fields.append({"genomicFeatures.gene._geneName": {"$in": symbols}})
# Obtain variants and the corresponding variants in the other genome build from the genotype features
variants = gtfeatures_to_variants(gt_features)
if variants:
query_fields.append({"genomicFeatures.variant": {"$in": variants}})
if len(query_fields) > 0:
# prepare a query that takes into account genes and variants in general (also outside genes!)
query = {"$or": query_fields}
LOG.info("Querying database for genomic features:{}".format(query))
# query patients collection
matching_patients = list(
database["patients"].find(query)
) # a list of patients with genomic feature/s in one or more of the query genes
LOG.info("Found {0} matching patients".format(len(matching_patients)))
# assign a genetic similarity score to each of these patients
for patient in matching_patients:
gt_similarity = evaluate_GT_similarity(
gt_features, patient["genomicFeatures"], max_feature_similarity
)
LOG.info("GT similarity score is {}".format(gt_similarity))
match = {
"patient_obj": patient,
"geno_score": gt_similarity,
}
matches[patient["_id"]] = match
LOG.info(
"\n\nFOUND {} patients matching patients's genomic tracts\n\n".format(
len(matching_patients)
)
)
return matches
def evaluate_GT_similarity(query_features, db_patient_features, max_feature_similarity):
"""Evaluates the genomic similarity of two patients based on genomic similarities
Args:
query_patient(list of dictionaries): genomic features of the query patient
db_patient_features(list of dictionaries): genomic features of a patient in patientMatcher database
max_feature_similarity(float): a floating point number representing the highest value allowed for a single feature
## Explanation: for a query patient with one feature max_similarity will be equal to MAX_GT_SCORE
For a patient with 2 features max_similarity will be MAX_GT_SCORE/2 and so on.
Returns:
patient_similarity(float): the computed genetic similarity among the patients
"""
matched_features = []
n_feature = 0
# loop over the query patient's features
for feature in query_features:
matched_features.append(0) # score for matching of every feature is initially 0
q_gene_id = feature["gene"]["id"] # query feature's gene id
q_gene_symbol = feature["gene"].get("_geneName") # query feature's gene symbol
# Do liftover for query variant in order to maximize perfect matching chances
q_variant = feature.get("variant") # query feature's variant. Not mandatory.
lifted_q_variant = lift_variant(q_variant) if q_variant else []
# loop over the database patient's features:
for matching_feature in db_patient_features:
m_gene_id = matching_feature["gene"]["id"] # matching feature's gene id
m_gene_symbol = matching_feature["gene"].get(
"_geneName"
) # matching feature's gene symbol
m_variant = matching_feature.get(
"variant"
) # matching feature's variant. Not mandatory.
# if variants are matching or lifted query variant matches with matched patients variant
# ->assign max matching score
if q_variant == m_variant or m_variant in lifted_q_variant:
matched_features[n_feature] = max_feature_similarity
elif q_gene_id == m_gene_id: # matching genes
matched_features[n_feature] = (
max_feature_similarity / 4
) # (0.25 of the max_feature_similarity)
elif q_gene_symbol and q_gene_symbol == m_gene_symbol:
matched_features[n_feature] = (
max_feature_similarity / 4
) # (0.25 of the max_feature_similarity)
n_feature += 1
features_sum = sum(matched_features)
return features_sum
| 40.388489
| 122
| 0.638226
|
ff48ad419296fb16a8e4a0fd9a637b23eac218cc
| 9,334
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/19_features/numtrees_8/rule_3.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/19_features/numtrees_8/rule_3.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/19_features/numtrees_8/rule_3.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Temperature, obj[3]: Time, obj[4]: Coupon, obj[5]: Coupon_validity, obj[6]: Gender, obj[7]: Age, obj[8]: Maritalstatus, obj[9]: Children, obj[10]: Education, obj[11]: Occupation, obj[12]: Income, obj[13]: Bar, obj[14]: Coffeehouse, obj[15]: Restaurantlessthan20, obj[16]: Restaurant20to50, obj[17]: Direction_same, obj[18]: Distance
# {"feature": "Coupon", "instances": 127, "metric_value": 0.9719, "depth": 1}
if obj[4]>1:
# {"feature": "Coupon_validity", "instances": 92, "metric_value": 0.8991, "depth": 2}
if obj[5]>0:
# {"feature": "Weather", "instances": 47, "metric_value": 0.9971, "depth": 3}
if obj[1]<=1:
# {"feature": "Restaurantlessthan20", "instances": 42, "metric_value": 0.9737, "depth": 4}
if obj[15]<=3.0:
# {"feature": "Occupation", "instances": 38, "metric_value": 0.9268, "depth": 5}
if obj[11]<=9:
# {"feature": "Time", "instances": 24, "metric_value": 0.7383, "depth": 6}
if obj[3]<=2:
# {"feature": "Restaurant20to50", "instances": 15, "metric_value": 0.9183, "depth": 7}
if obj[16]<=1.0:
# {"feature": "Income", "instances": 10, "metric_value": 1.0, "depth": 8}
if obj[12]>4:
# {"feature": "Age", "instances": 5, "metric_value": 0.7219, "depth": 9}
if obj[7]>1:
return 'False'
elif obj[7]<=1:
# {"feature": "Passanger", "instances": 2, "metric_value": 1.0, "depth": 10}
if obj[0]<=1:
return 'True'
elif obj[0]>1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[12]<=4:
# {"feature": "Gender", "instances": 5, "metric_value": 0.7219, "depth": 9}
if obj[6]<=0:
return 'True'
elif obj[6]>0:
# {"feature": "Passanger", "instances": 2, "metric_value": 1.0, "depth": 10}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[16]>1.0:
return 'True'
else: return 'True'
elif obj[3]>2:
return 'True'
else: return 'True'
elif obj[11]>9:
# {"feature": "Bar", "instances": 14, "metric_value": 0.9852, "depth": 6}
if obj[13]>1.0:
# {"feature": "Coffeehouse", "instances": 9, "metric_value": 0.7642, "depth": 7}
if obj[14]<=1.0:
return 'False'
elif obj[14]>1.0:
# {"feature": "Income", "instances": 3, "metric_value": 0.9183, "depth": 8}
if obj[12]<=2:
return 'True'
elif obj[12]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[13]<=1.0:
# {"feature": "Temperature", "instances": 5, "metric_value": 0.7219, "depth": 7}
if obj[2]>55:
return 'True'
elif obj[2]<=55:
# {"feature": "Passanger", "instances": 2, "metric_value": 1.0, "depth": 8}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[15]>3.0:
return 'False'
else: return 'False'
elif obj[1]>1:
return 'False'
else: return 'False'
elif obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 45, "metric_value": 0.6236, "depth": 3}
if obj[16]>-1.0:
# {"feature": "Occupation", "instances": 44, "metric_value": 0.5746, "depth": 4}
if obj[11]<=14:
# {"feature": "Coffeehouse", "instances": 40, "metric_value": 0.469, "depth": 5}
if obj[14]<=1.0:
# {"feature": "Time", "instances": 23, "metric_value": 0.6666, "depth": 6}
if obj[3]<=2:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.8113, "depth": 7}
if obj[17]<=0:
# {"feature": "Age", "instances": 9, "metric_value": 0.9911, "depth": 8}
if obj[7]>1:
# {"feature": "Education", "instances": 5, "metric_value": 0.7219, "depth": 9}
if obj[10]<=2:
return 'True'
elif obj[10]>2:
return 'False'
else: return 'False'
elif obj[7]<=1:
# {"feature": "Bar", "instances": 4, "metric_value": 0.8113, "depth": 9}
if obj[13]<=2.0:
return 'False'
elif obj[13]>2.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[17]>0:
return 'True'
else: return 'True'
elif obj[3]>2:
return 'True'
else: return 'True'
elif obj[14]>1.0:
return 'True'
else: return 'True'
elif obj[11]>14:
# {"feature": "Passanger", "instances": 4, "metric_value": 1.0, "depth": 5}
if obj[0]>0:
# {"feature": "Weather", "instances": 3, "metric_value": 0.9183, "depth": 6}
if obj[1]<=1:
return 'True'
elif obj[1]>1:
return 'False'
else: return 'False'
elif obj[0]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[16]<=-1.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Bar", "instances": 35, "metric_value": 0.9518, "depth": 2}
if obj[13]<=2.0:
# {"feature": "Coffeehouse", "instances": 33, "metric_value": 0.9183, "depth": 3}
if obj[14]<=3.0:
# {"feature": "Income", "instances": 29, "metric_value": 0.9576, "depth": 4}
if obj[12]<=7:
# {"feature": "Age", "instances": 26, "metric_value": 0.9829, "depth": 5}
if obj[7]<=6:
# {"feature": "Occupation", "instances": 24, "metric_value": 0.995, "depth": 6}
if obj[11]>1:
# {"feature": "Education", "instances": 22, "metric_value": 0.976, "depth": 7}
if obj[10]>1:
# {"feature": "Restaurantlessthan20", "instances": 16, "metric_value": 0.896, "depth": 8}
if obj[15]<=2.0:
# {"feature": "Passanger", "instances": 12, "metric_value": 0.9799, "depth": 9}
if obj[0]>0:
# {"feature": "Weather", "instances": 10, "metric_value": 1.0, "depth": 10}
if obj[1]<=1:
# {"feature": "Temperature", "instances": 9, "metric_value": 0.9911, "depth": 11}
if obj[2]<=55:
# {"feature": "Restaurant20to50", "instances": 8, "metric_value": 0.9544, "depth": 12}
if obj[16]>0.0:
# {"feature": "Time", "instances": 7, "metric_value": 0.9852, "depth": 13}
if obj[3]>0:
# {"feature": "Coupon_validity", "instances": 4, "metric_value": 0.8113, "depth": 14}
if obj[5]>0:
# {"feature": "Gender", "instances": 2, "metric_value": 1.0, "depth": 15}
if obj[6]<=0:
# {"feature": "Maritalstatus", "instances": 2, "metric_value": 1.0, "depth": 16}
if obj[8]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 1.0, "depth": 17}
if obj[9]<=1:
# {"feature": "Direction_same", "instances": 2, "metric_value": 1.0, "depth": 18}
if obj[17]<=0:
# {"feature": "Distance", "instances": 2, "metric_value": 1.0, "depth": 19}
if obj[18]<=2:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[5]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.9183, "depth": 14}
if obj[6]<=0:
# {"feature": "Maritalstatus", "instances": 2, "metric_value": 1.0, "depth": 15}
if obj[8]>0:
return 'False'
elif obj[8]<=0:
return 'True'
else: return 'True'
elif obj[6]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[16]<=0.0:
return 'False'
else: return 'False'
elif obj[2]>55:
return 'True'
else: return 'True'
elif obj[1]>1:
return 'True'
else: return 'True'
elif obj[0]<=0:
return 'False'
else: return 'False'
elif obj[15]>2.0:
return 'False'
else: return 'False'
elif obj[10]<=1:
# {"feature": "Gender", "instances": 6, "metric_value": 0.9183, "depth": 8}
if obj[6]<=0:
# {"feature": "Maritalstatus", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[8]<=1:
return 'False'
elif obj[8]>1:
return 'True'
else: return 'True'
elif obj[6]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[11]<=1:
return 'True'
else: return 'True'
elif obj[7]>6:
return 'False'
else: return 'False'
elif obj[12]>7:
return 'False'
else: return 'False'
elif obj[14]>3.0:
return 'False'
else: return 'False'
elif obj[13]>2.0:
return 'True'
else: return 'True'
else: return 'False'
| 39.218487
| 400
| 0.5015
|
1aa065c504cb7ca97dfb85353cbd95af4251d55a
| 312
|
py
|
Python
|
zipline/pipeline/dtypes.py
|
leonarduschen/zipline
|
5e6c9fce7e0f812bd181024ad192ca2976d49667
|
[
"Apache-2.0"
] | 14,525
|
2015-01-01T02:57:52.000Z
|
2022-03-31T18:16:35.000Z
|
zipline/pipeline/dtypes.py
|
leonarduschen/zipline
|
5e6c9fce7e0f812bd181024ad192ca2976d49667
|
[
"Apache-2.0"
] | 2,146
|
2015-01-01T13:03:44.000Z
|
2022-02-22T03:25:28.000Z
|
zipline/pipeline/dtypes.py
|
leonarduschen/zipline
|
5e6c9fce7e0f812bd181024ad192ca2976d49667
|
[
"Apache-2.0"
] | 4,517
|
2015-01-01T14:26:47.000Z
|
2022-03-31T14:38:05.000Z
|
from zipline.utils.numpy_utils import (
bool_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
object_dtype,
)
CLASSIFIER_DTYPES = frozenset({object_dtype, int64_dtype})
FACTOR_DTYPES = frozenset({datetime64ns_dtype, float64_dtype, int64_dtype})
FILTER_DTYPES = frozenset({bool_dtype})
| 26
| 75
| 0.775641
|
84efe323a2cff3f6a92d0e2d6bc57195fa556daf
| 548
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/testappauto20-dev-23504
|
39f2855f3e3331c8b0b24440a97d46beb45e4ebf
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/testappauto20-dev-23504
|
39f2855f3e3331c8b0b24440a97d46beb45e4ebf
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/testappauto20-dev-23504
|
39f2855f3e3331c8b0b24440a97d46beb45e4ebf
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "testappauto20-dev-23504.botics.co"
site_params = {
"name": "TestAppAuto20",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 21.076923
| 61
| 0.662409
|
b0d7d521dd33944efee176b162e7303406fa47bf
| 76
|
py
|
Python
|
tapis_cli/commands/taccapis/v2/apps/models/__init__.py
|
bpachev/tapis-cli
|
c3128fb5b63ef74e06b737bbd95ef28fb24f0d32
|
[
"BSD-3-Clause"
] | 8
|
2020-10-18T22:48:23.000Z
|
2022-01-10T09:16:14.000Z
|
tapis_cli/commands/taccapis/v2/apps/models/__init__.py
|
bpachev/tapis-cli
|
c3128fb5b63ef74e06b737bbd95ef28fb24f0d32
|
[
"BSD-3-Clause"
] | 238
|
2019-09-04T14:37:54.000Z
|
2020-04-15T16:24:24.000Z
|
tapis_cli/commands/taccapis/v2/apps/models/__init__.py
|
bpachev/tapis-cli
|
c3128fb5b63ef74e06b737bbd95ef28fb24f0d32
|
[
"BSD-3-Clause"
] | 5
|
2019-09-20T04:23:49.000Z
|
2020-01-16T17:45:14.000Z
|
from .app import App
from .app_history import AppHistory
API_NAME = 'apps'
| 15.2
| 35
| 0.776316
|
55bd4adec0cd0de728a5f35098e544f36cf508ba
| 789
|
py
|
Python
|
matplotlib_examples/examples_src/pylab_examples/usetex_fonteffects.py
|
xzlmark/webspider
|
133c620c65aa45abea1718b0dada09618c2115bf
|
[
"Apache-2.0"
] | 3
|
2020-04-09T02:35:26.000Z
|
2021-02-27T17:00:21.000Z
|
matplotlib_examples/examples_src/pylab_examples/usetex_fonteffects.py
|
colorworlds/webspider
|
133c620c65aa45abea1718b0dada09618c2115bf
|
[
"Apache-2.0"
] | null | null | null |
matplotlib_examples/examples_src/pylab_examples/usetex_fonteffects.py
|
colorworlds/webspider
|
133c620c65aa45abea1718b0dada09618c2115bf
|
[
"Apache-2.0"
] | 1
|
2020-04-09T02:35:08.000Z
|
2020-04-09T02:35:08.000Z
|
# This script demonstrates that font effects specified in your pdftex.map
# are now supported in pdf usetex.
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rc('text', usetex=True)
def setfont(font):
return r'\font\a %s at 14pt\a ' % font
for y, font, text in zip(range(5),
['ptmr8r', 'ptmri8r', 'ptmro8r', 'ptmr8rn', 'ptmrr8re'],
['Nimbus Roman No9 L ' + x for x in
['', 'Italics (real italics for comparison)',
'(slanted)', '(condensed)', '(extended)']]):
plt.text(0, y, setfont(font) + text)
plt.ylim(-1, 5)
plt.xlim(-0.2, 0.6)
plt.setp(plt.gca(), frame_on=False, xticks=(), yticks=())
plt.title('Usetex font effects')
plt.savefig('usetex_fonteffects.pdf')
| 32.875
| 81
| 0.591888
|
56bdeda36cc1f9f18b0035b6737d4e9b27fb8b60
| 38
|
py
|
Python
|
ciphey/basemods/Crackers/__init__.py
|
gvvynplaine/Ciphey
|
982bb777162b823685a3c477c393f1240609c118
|
[
"MIT"
] | 2
|
2020-09-18T00:15:49.000Z
|
2020-10-06T19:32:16.000Z
|
ciphey/basemods/Crackers/__init__.py
|
meitounao1/Ciphey
|
982bb777162b823685a3c477c393f1240609c118
|
[
"MIT"
] | null | null | null |
ciphey/basemods/Crackers/__init__.py
|
meitounao1/Ciphey
|
982bb777162b823685a3c477c393f1240609c118
|
[
"MIT"
] | null | null | null |
from . import caesar, vigenere, XandY
| 19
| 37
| 0.763158
|
f0136a6feef00c18a60d863976ad1ef3c8a73152
| 2,725
|
py
|
Python
|
tests/actor/test_reminder_data.py
|
amulyavarote/python-sdk
|
ff570466eeef73b5a71cb378af5c4a06384dca1c
|
[
"MIT"
] | null | null | null |
tests/actor/test_reminder_data.py
|
amulyavarote/python-sdk
|
ff570466eeef73b5a71cb378af5c4a06384dca1c
|
[
"MIT"
] | null | null | null |
tests/actor/test_reminder_data.py
|
amulyavarote/python-sdk
|
ff570466eeef73b5a71cb378af5c4a06384dca1c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from datetime import timedelta
from dapr.actor.runtime._reminder_data import ActorReminderData
class ActorReminderTests(unittest.TestCase):
def test_invalid_state(self):
with self.assertRaises(ValueError):
ActorReminderData(
'test_reminder',
123, # int type
timedelta(seconds=1),
timedelta(seconds=2),
timedelta(seconds=3))
ActorReminderData(
'test_reminder',
'reminder_state', # string type
timedelta(seconds=2),
timedelta(seconds=1),
timedelta(seconds=3))
def test_valid_state(self):
# bytes type state data
reminder = ActorReminderData(
'test_reminder',
b'reminder_state',
timedelta(seconds=1),
timedelta(seconds=2),
timedelta(seconds=3))
self.assertEqual(b'reminder_state', reminder.state)
def test_as_dict(self):
reminder = ActorReminderData(
'test_reminder',
b'reminder_state',
timedelta(seconds=1),
timedelta(seconds=2),
timedelta(seconds=3))
expected = {
'reminderName': 'test_reminder',
'dueTime': timedelta(seconds=1),
'period': timedelta(seconds=2),
'ttl': timedelta(seconds=3),
'data': 'cmVtaW5kZXJfc3RhdGU=',
}
self.assertDictEqual(expected, reminder.as_dict())
def test_from_dict(self):
reminder = ActorReminderData.from_dict('test_reminder', {
'dueTime': timedelta(seconds=1),
'period': timedelta(seconds=2),
'ttl': timedelta(seconds=3),
'data': 'cmVtaW5kZXJfc3RhdGU=',
})
self.assertEqual('test_reminder', reminder.reminder_name)
self.assertEqual(timedelta(seconds=1), reminder.due_time)
self.assertEqual(timedelta(seconds=2), reminder.period)
self.assertEqual(timedelta(seconds=3), reminder.ttl)
self.assertEqual(b'reminder_state', reminder.state)
| 35.855263
| 72
| 0.625321
|
8262786d7988147508e839c0c1afb5b0b8f204f1
| 9,459
|
py
|
Python
|
tf_agents/policies/q_policy_test.py
|
gregorgebhardt/agents
|
b6aeae5e0ed68dd4e4ec2ca73ef971254d3208f3
|
[
"Apache-2.0"
] | 1
|
2020-01-11T23:11:13.000Z
|
2020-01-11T23:11:13.000Z
|
tf_agents/policies/q_policy_test.py
|
gregorgebhardt/agents
|
b6aeae5e0ed68dd4e4ec2ca73ef971254d3208f3
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/policies/q_policy_test.py
|
gregorgebhardt/agents
|
b6aeae5e0ed68dd4e4ec2ca73ef971254d3208f3
|
[
"Apache-2.0"
] | 3
|
2019-09-08T22:05:56.000Z
|
2020-05-27T08:27:15.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for tf_agents.policies.q_policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tf_agents.networks import network
from tf_agents.networks import q_network
from tf_agents.policies import q_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import test_utils
class DummyNet(network.Network):
def __init__(self, name=None, num_actions=2):
super(DummyNet, self).__init__(name, (), 'DummyNet')
self._layers.append(
tf.keras.layers.Dense(
num_actions,
kernel_initializer=tf.compat.v1.initializers.constant([[1, 1.5],
[1, 1.5]]),
bias_initializer=tf.compat.v1.initializers.constant([[1], [1]])))
def call(self, inputs, unused_step_type=None, network_state=()):
inputs = tf.cast(inputs, tf.float32)
for layer in self.layers:
inputs = layer(inputs)
return inputs, network_state
class DummyNetWithActionSpec(DummyNet):
def __init__(self, action_spec, name=None, num_actions=2):
super(DummyNetWithActionSpec, self).__init__(name, num_actions)
self._action_spec = action_spec
@property
def action_spec(self):
return self._action_spec
class QPolicyTest(test_utils.TestCase):
def setUp(self):
super(QPolicyTest, self).setUp()
self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)
self._time_step_spec = ts.time_step_spec(self._obs_spec)
self._action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
def testBuild(self):
policy = q_policy.QPolicy(
self._time_step_spec, self._action_spec, q_network=DummyNet())
self.assertEqual(policy.time_step_spec, self._time_step_spec)
self.assertEqual(policy.action_spec, self._action_spec)
self.assertEqual(policy.variables(), [])
def testMultipleActionsRaiseError(self):
action_spec = [tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)] * 2
with self.assertRaisesRegexp(
NotImplementedError,
'action_spec can only contain a single BoundedTensorSpec'):
q_policy.QPolicy(
self._time_step_spec, action_spec, q_network=DummyNet())
def testAction(self):
policy = q_policy.QPolicy(
self._time_step_spec, self._action_spec, q_network=DummyNet())
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_step = ts.restart(observations, batch_size=2)
action_step = policy.action(time_step, seed=1)
self.assertEqual(action_step.action.shape.as_list(), [2, 1])
self.assertEqual(action_step.action.dtype, tf.int32)
# Initialize all variables
self.evaluate(tf.compat.v1.global_variables_initializer())
action = self.evaluate(action_step.action)
self.assertTrue(np.all(action >= 0) and np.all(action <= 1))
def testActionWithinBounds(self):
bounded_action_spec = tensor_spec.BoundedTensorSpec([1],
tf.int32,
minimum=-6,
maximum=-5)
policy = q_policy.QPolicy(
self._time_step_spec, bounded_action_spec, q_network=DummyNet())
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_step = ts.restart(observations, batch_size=2)
action_step = policy.action(time_step)
self.assertEqual(action_step.action.shape.as_list(), [2, 1])
self.assertEqual(action_step.action.dtype, tf.int32)
# Initialize all variables
self.evaluate(tf.compat.v1.global_variables_initializer())
action = self.evaluate(action_step.action)
self.assertTrue(np.all(action <= -5) and np.all(action >= -6))
def testActionScalarSpec(self):
action_spec = tensor_spec.BoundedTensorSpec((), tf.int32, 0, 1)
policy = q_policy.QPolicy(
self._time_step_spec, action_spec, q_network=DummyNet())
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_step = ts.restart(observations, batch_size=2)
action_step = policy.action(time_step, seed=1)
self.assertEqual(action_step.action.shape.as_list(), [2])
self.assertEqual(action_step.action.dtype, tf.int32)
# Initialize all variables
self.evaluate(tf.compat.v1.global_variables_initializer())
action = self.evaluate(action_step.action)
self.assertTrue(np.all(action >= 0) and np.all(action <= 1))
def testActionList(self):
action_spec = [tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)]
policy = q_policy.QPolicy(
self._time_step_spec, action_spec, q_network=DummyNet())
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_step = ts.restart(observations, batch_size=2)
action_step = policy.action(time_step, seed=1)
self.assertIsInstance(action_step.action, list)
self.evaluate(tf.compat.v1.global_variables_initializer())
action = self.evaluate(action_step.action)
self.assertLen(action, 1)
# Extract contents from the outer list.
action = action[0]
self.assertTrue(np.all(action >= 0) and np.all(action <= 1))
def testDistribution(self):
policy = q_policy.QPolicy(
self._time_step_spec, self._action_spec, q_network=DummyNet())
observations = tf.constant([[1, 2]], dtype=tf.float32)
time_step = ts.restart(observations, batch_size=1)
distribution_step = policy.distribution(time_step)
mode = distribution_step.action.mode()
self.evaluate(tf.compat.v1.global_variables_initializer())
# The weights of index 0 are all 1 and the weights of index 1 are all 1.5,
# so the Q values of index 1 will be higher.
self.assertAllEqual([[1]], self.evaluate(mode))
def testUpdate(self):
policy = q_policy.QPolicy(
self._time_step_spec, self._action_spec, q_network=DummyNet())
new_policy = q_policy.QPolicy(
self._time_step_spec, self._action_spec, q_network=DummyNet())
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_step = ts.restart(observations, batch_size=2)
self.assertEqual(policy.variables(), [])
self.assertEqual(new_policy.variables(), [])
action_step = policy.action(time_step, seed=1)
new_action_step = new_policy.action(time_step, seed=1)
self.assertEqual(len(policy.variables()), 2)
self.assertEqual(len(new_policy.variables()), 2)
self.assertEqual(action_step.action.shape, new_action_step.action.shape)
self.assertEqual(action_step.action.dtype, new_action_step.action.dtype)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(new_policy.update(policy)), None)
action = self.evaluate(action_step.action)
new_action = self.evaluate(new_action_step.action)
self.assertTrue(np.all(action >= 0) and np.all(action <= 1))
self.assertTrue(np.all(new_action >= 0) and np.all(new_action <= 1))
self.assertAllEqual(action, new_action)
def testActionSpecsCompatible(self):
q_net = DummyNetWithActionSpec(self._action_spec)
q_policy.QPolicy(self._time_step_spec, self._action_spec, q_net)
def testActionSpecsIncompatible(self):
network_action_spec = tensor_spec.BoundedTensorSpec([2], tf.int32, 0, 1)
q_net = DummyNetWithActionSpec(network_action_spec)
with self.assertRaisesRegexp(
ValueError,
'action_spec must be compatible with q_network.action_spec'):
q_policy.QPolicy(self._time_step_spec, self._action_spec, q_net)
def testMasking(self):
batch_size = 1000
num_state_dims = 5
num_actions = 8
observations = tf.random.uniform([batch_size, num_state_dims])
time_step = ts.restart(observations, batch_size=batch_size)
input_tensor_spec = tensor_spec.TensorSpec([num_state_dims], tf.float32)
action_spec = tensor_spec.BoundedTensorSpec(
[1], tf.int32, 0, num_actions - 1)
mask = [0, 1, 0, 1, 0, 0, 1, 0]
np_mask = np.array(mask)
tf_mask = tf.constant([mask for _ in range(batch_size)])
q_net = q_network.QNetwork(
input_tensor_spec, action_spec,
mask_split_fn=lambda observation: (observation, tf_mask))
policy = q_policy.QPolicy(
ts.time_step_spec(input_tensor_spec), action_spec, q_net)
# Force creation of variables before global_variables_initializer.
policy.variables()
self.evaluate(tf.compat.v1.global_variables_initializer())
# Sample from the policy 1000 times and ensure that invalid actions are
# never chosen.
action_step = policy.action(time_step)
action = self.evaluate(action_step.action)
self.assertEqual(action.shape, (batch_size, 1))
self.assertAllEqual(np_mask[action], np.ones([batch_size, 1]))
if __name__ == '__main__':
tf.test.main()
| 40.423077
| 78
| 0.70758
|
45432cccffee201e5db0d472e9aef334673ae676
| 11,751
|
py
|
Python
|
audit_trail/watcher.py
|
askabelin/django_audit_trail
|
83c8739068782566a2a45adda7eb21e3b18420f7
|
[
"Apache-2.0"
] | null | null | null |
audit_trail/watcher.py
|
askabelin/django_audit_trail
|
83c8739068782566a2a45adda7eb21e3b18420f7
|
[
"Apache-2.0"
] | null | null | null |
audit_trail/watcher.py
|
askabelin/django_audit_trail
|
83c8739068782566a2a45adda7eb21e3b18420f7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
from django.conf import settings
from django.db.models import signals, NOT_PROVIDED
from django.dispatch import receiver
from .models import AuditTrail
from .signals import audit_trail_app_ready
from stringifier import ModelFieldStringifier
class AuditTrailWatcher(object):
"""
Watcher class that tracks post_save and post_delete signals and generates AuditTrail records.
Attributs:
tracked_models (set): set of already tracked models. Used to avoid duplicate signals handlers.
"""
tracked_models = set()
def __init__(self, fields=None, track_related=None, notify_related=None,
track_only_with_related=False, excluded_fields=None):
"""
Constructor
:param fields: list fields that should be tracked. If None — all fields will be tracked.
:param track_related: list of tracked relations. F.e. ['comment_set']
:param notify_related: list of fields to be notified as parent. Internal use only
:param track_only_with_related: boolean state should be AuditTrail object created or not if there is no parent
object. F.e. if we track Post's comment_set and we don't need to track comments separately.
:return:
"""
self.model_class = None
self.fields = fields
self.notify_related = notify_related
self.track_related = track_related
self.track_only_with_related = track_only_with_related
self.excluded_fields = ['id']
if excluded_fields:
self.excluded_fields += excluded_fields
def contribute_to_class(self, cls, name=None):
if cls in self.__class__.tracked_models:
return False
self.model_class = cls
self.__class__.tracked_models.add(cls)
setattr(cls, 'audit', self)
return True
def init_signals(self):
signals.post_save.connect(self.on_post_save_create, sender=self.model_class, weak=False)
signals.post_init.connect(self.on_post_init, sender=self.model_class, weak=False)
signals.post_save.connect(self.on_post_save_update, sender=self.model_class, weak=False)
signals.pre_delete.connect(self.on_pre_delete, sender=self.model_class, weak=False)
signals.post_delete.connect(self.on_post_delete, sender=self.model_class, weak=False)
self.init_related_signals()
def init_related_signals(self):
if not self.track_related:
return
for attr_name in self.track_related:
attribute = getattr(self.model_class, attr_name)
if hasattr(attribute, 'related'): # related object is queryset
related = attribute.related
related_model = related.related_model
related_field_name = related.field.name
else: # related object is FK
related_model = attribute.field.related_field.model
related_field_name = attribute.field.related_query_name()
# related_query_name() returns related_name if it was set
# but if it's not returns autogenerated related name without '_set' postfix!
# F.e. instead of 'post_set' it'll return 'post' so we have to handle it manually
if not hasattr(related_model, related_field_name):
related_field_name += '_set'
if not hasattr(related_model, 'audit'):
related_watcher = AuditTrailWatcher(track_only_with_related=True)
related_watcher.contribute_to_class(related_model)
related_watcher.init_signals()
related_model.audit.notify_related = related_model.audit.notify_related or []
related_model.audit.notify_related += [related_field_name]
def serialize_object(self, instance):
""" Returns stringified values for tracked fields. """
data = {}
for field in instance._meta.fields:
# Skip untracked fields
not_tracked_field = (self.fields is not None and field.name not in self.fields)
if not_tracked_field or field.name in self.excluded_fields:
continue
data[field.name] = field.value_from_object(instance)
return data
def get_changes(self, old_values, new_values):
""" Returns list of changed fields. """
diff = {}
old_values = old_values or {}
new_values = new_values or {}
fields = self.fields or [field_name.name for field_name in self.model_class._meta.fields]
for field_name in fields:
field = self.model_class._meta.get_field(field_name)
default = None
if field.default != NOT_PROVIDED and old_values:
default = field.default
old_value = old_values.get(field_name, default)
new_value = new_values.get(field_name, None)
old_value_string = ModelFieldStringifier.stringify(field, old_value)
new_value_string = ModelFieldStringifier.stringify(field, new_value)
if old_value is not None:
old_value = unicode(old_value)
if new_value is not None:
new_value = unicode(new_value)
if old_value != new_value:
diff[field_name] = {
'old_value': old_value,
'old_value_string': old_value_string,
'new_value': new_value,
'new_value_string': new_value_string
}
return diff
def on_post_init(self, instance, sender, **kwargs):
"""Stores original field values."""
instance._original_values = self.serialize_object(instance)
def on_post_save_create(self, instance, sender, created, **kwargs):
"""Saves object's data."""
if getattr(settings, 'DISABLE_AUDIT_TRAIL', False):
return
if not created:
return
if self.track_only_with_related and not self.is_parent_object_exists(instance):
return
audit_trail = AuditTrail.objects.generate_trail_for_instance_created(instance)
audit_trail.changes = self.get_changes({}, self.serialize_object(instance))
audit_trail.save()
instance._original_values = self.serialize_object(instance)
self.create_related_audit_trail(audit_trail)
def on_post_save_update(self, instance, sender, created, **kwargs):
""" Checks for difference and saves, if it's present. """
if getattr(settings, 'DISABLE_AUDIT_TRAIL', False):
return
if created:
return
if self.track_only_with_related and not self.is_parent_object_exists(instance):
return
changes = self.get_changes(instance._original_values, self.serialize_object(instance))
if not changes:
return
audit_trail = AuditTrail.objects.generate_trail_for_instance_updated(instance)
audit_trail.changes = changes
audit_trail.save()
instance._original_values = self.serialize_object(instance)
self.create_related_audit_trail(audit_trail)
def on_pre_delete(self, instance, sender, **kwargs):
""" Check if there related query_set that track current objects saves ids. """
if getattr(settings, 'DISABLE_AUDIT_TRAIL', False):
return
if not self.notify_related:
return
instance._audit_ids_to_notify_related_deletion = {}
for field_name in self.notify_related:
parent_object = getattr(instance, field_name, None)
if parent_object is None or hasattr(parent_object, '_meta'):
continue
if parent_object.all().exists():
ids = list(parent_object.all().values_list('id', flat=True))
instance._audit_ids_to_notify_related_deletion[field_name] = ids
def on_post_delete(self, instance, sender, **kwargs):
""" Saves deleted object data. """
if getattr(settings, 'DISABLE_AUDIT_TRAIL', False):
return
if self.track_only_with_related and not self.is_parent_object_exists(instance):
return
audit_trail = AuditTrail.objects.generate_trail_for_instance_deleted(instance)
audit_trail.changes = self.get_changes(self.serialize_object(instance), {})
audit_trail.save()
self.create_deleted_related_audit_trail(audit_trail, instance)
def is_parent_object_exists(self, instance):
for field_name in self.notify_related:
parent_object = getattr(instance, field_name, None)
if parent_object is None:
continue
if hasattr(parent_object, '_meta'):
return True
else:
if parent_object.all().exists():
return True
if field_name in getattr(instance, '_audit_ids_to_notify_related_deletion', {}):
return True
return False
def create_related_audit_trail(self, audit_trail):
if not self.notify_related:
return
for field_name in self.notify_related:
changed_related_object = audit_trail.content_object
attribute = getattr(changed_related_object, field_name)
if attribute is None:
continue
if hasattr(attribute, '_meta'):
# Related object
notified_objects = [attribute]
else:
# RelatedManager doesn't have _meta attribute
notified_objects = attribute.all()
for notified_object in notified_objects:
parent_audit_trail = AuditTrail.objects.generate_trail_for_related_change(notified_object)
parent_audit_trail.related_trail = audit_trail
parent_audit_trail.save()
def create_deleted_related_audit_trail(self, audit_trail, instance):
if not self.notify_related:
return
for field_name in self.notify_related:
attribute = getattr(instance, field_name)
if attribute is None:
continue
if hasattr(attribute, '_meta'):
# Related object
notified_objects = [attribute]
else:
# RelatedManager doesn't have _meta attribute
ids = instance._audit_ids_to_notify_related_deletion.get(field_name)
if not ids:
continue
# now parent object is being filtered by instance id
# f.e.
# class Post(models.Model):
# class Post(models.Model):
# author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
# audit = AuditTrailWatcher(track_related=['comment_set', 'author'])
# will be filtered as {'author__exact': instance}
# but since posts's author was set to null after author deletion we need to get posts by ids
# so we stored ids before author deletion on pre_delete
attribute.core_filters = {'id__in': ids}
notified_objects = list(attribute.all())
for notified_object in notified_objects:
parent_audit_trail = AuditTrail.objects.generate_trail_for_related_change(notified_object)
parent_audit_trail.related_trail = audit_trail
parent_audit_trail.save()
@receiver(audit_trail_app_ready)
def init_audit_instances(*args, **kwargs):
tracked_models = AuditTrailWatcher.tracked_models.copy()
for model_class in tracked_models:
model_class.audit.init_signals()
| 40.944251
| 118
| 0.643009
|
a87e58f67d7b9dee797940da35e1c6e1f80fdf58
| 316
|
py
|
Python
|
lib/python2.7/site-packages/tdl/queue/actions/publish_action.py
|
DPNT-Sourcecode/CHK-uimw01
|
87144ae10115d7a8df565f5109666f00bc001ce4
|
[
"Apache-2.0"
] | null | null | null |
lib/python2.7/site-packages/tdl/queue/actions/publish_action.py
|
DPNT-Sourcecode/CHK-uimw01
|
87144ae10115d7a8df565f5109666f00bc001ce4
|
[
"Apache-2.0"
] | null | null | null |
lib/python2.7/site-packages/tdl/queue/actions/publish_action.py
|
DPNT-Sourcecode/CHK-uimw01
|
87144ae10115d7a8df565f5109666f00bc001ce4
|
[
"Apache-2.0"
] | null | null | null |
class PublishAction:
@staticmethod
def get_audit_text():
return ''
@staticmethod
def after_response(remote_broker, headers, response):
remote_broker.respond_to(headers, response)
@staticmethod
def prepare_for_next_request(remote_broker):
# Do nothing.
pass
| 21.066667
| 57
| 0.674051
|
f87da613ce51bebeb3d67efc525d924892bd3112
| 2,224
|
py
|
Python
|
src/pretalx/orga/signals.py
|
hnzlmnn/pretalx
|
fcdf1a03c9428c1207ee4f4228694b2ed8e7495b
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/orga/signals.py
|
hnzlmnn/pretalx
|
fcdf1a03c9428c1207ee4f4228694b2ed8e7495b
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/orga/signals.py
|
hnzlmnn/pretalx
|
fcdf1a03c9428c1207ee4f4228694b2ed8e7495b
|
[
"Apache-2.0"
] | null | null | null |
from django.dispatch import Signal
from pretalx.common.signals import EventPluginSignal
nav_event = EventPluginSignal(providing_args=["request"])
"""
This signal allows you to add additional views to the admin panel
navigation. You will get the request as a keyword argument ``request``.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You can also return
a ForkAwesome icon name with the key ``icon``, it will be respected depending
on the type of navigation. You should also return an ``active`` key with a boolean
set to ``True``, when this item should be marked as active. The ``request`` object
will have an attribute ``event``.
If you use this, you should read the documentation on :ref:`how to deal with URLs <urlconf>`
in pretalx.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
nav_global = Signal(providing_args=["request"])
"""
This signal allows you to add additional views to the navigation bar when no event is
selected. You will get the request as a keyword argument ``request``.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You can also return
a ForkAwesome icon name with the key ``icon``, it will be respected depending
on the type of navigation. You should also return an ``active`` key with a boolean
set to ``True``, when this item should be marked as active.
If you use this, you should read the documentation on :ref:`how to deal with URLs <urlconf>`
in pretalx.
This is no ``EventPluginSignal``, so you do not get the event in the ``sender`` argument
and you may get the signal regardless of whether your plugin is active.
"""
activate_event = EventPluginSignal(providing_args=['request'])
"""
This signal is sent out before an event goes live. It allows any installed
plugin to raise an Exception to prevent the event from going live. The
exception message will be exposed to the user.
You will get the request as a keyword argument ``request``.
Receivers are not expected to return a response.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
| 47.319149
| 92
| 0.764388
|
fb2bc67c6f8a82b95f709a0b02ac855ea50c2e51
| 748
|
py
|
Python
|
tests/tjctf_2020/test_stop.py
|
mariuszskon/autorop
|
5735073008f722fab00f3866ef4a05f04620593b
|
[
"MIT"
] | 15
|
2020-10-03T05:20:31.000Z
|
2022-03-20T06:19:29.000Z
|
tests/tjctf_2020/test_stop.py
|
mariuszskon/autorop
|
5735073008f722fab00f3866ef4a05f04620593b
|
[
"MIT"
] | 8
|
2020-10-02T09:51:39.000Z
|
2021-04-24T03:14:18.000Z
|
tests/tjctf_2020/test_stop.py
|
mariuszskon/autorop
|
5735073008f722fab00f3866ef4a05f04620593b
|
[
"MIT"
] | 2
|
2021-04-16T06:33:49.000Z
|
2021-09-03T09:21:10.000Z
|
from .. import *
BIN = "./tests/tjctf_2020/stop"
def test_stop(exploit):
# this is the example from the README
def send_letter_first(tube, data):
# the binary expects us to choose a letter first, before it takes input unsafely
tube.sendline("A")
# send actual payload
tube.sendline(data)
# create a starting state - modified to use fixture
s = exploit(BIN, lambda: process(BIN))
# set an overwriter function, if the buffer overflow input
# is not available immediately
s.overwriter = send_letter_first
# use base classic pipeline, with printf for leaking
pipeline = turnkey.Classic(leak=leak.Printf())
result = pipeline(s)
assert assertion.have_shell(result.target)
| 29.92
| 88
| 0.688503
|
e2cd84ae5292574ae30d4c172cfb9795cf9df278
| 6,127
|
py
|
Python
|
Common/extractor.py
|
RENCI-AUTOMAT/Data_services
|
eb60f822021b138298eabb3852b20b30739afaa5
|
[
"MIT"
] | 2
|
2022-02-01T04:10:30.000Z
|
2022-03-23T22:01:35.000Z
|
Common/extractor.py
|
RENCI-AUTOMAT/Data_services
|
eb60f822021b138298eabb3852b20b30739afaa5
|
[
"MIT"
] | 83
|
2020-08-18T16:09:43.000Z
|
2022-03-25T19:17:25.000Z
|
Common/extractor.py
|
RENCI-AUTOMAT/Data_services
|
eb60f822021b138298eabb3852b20b30739afaa5
|
[
"MIT"
] | null | null | null |
from Common.kgxmodel import kgxnode, kgxedge
from Common.node_types import ORIGINAL_KNOWLEDGE_SOURCE, PRIMARY_KNOWLEDGE_SOURCE, AGGREGATOR_KNOWLEDGE_SOURCES
class Extractor:
"""
This is a class so that it can be used to accumulate nodes and edges across multiple files or input streams
Also so that it can provide a few different interfaces (csv, sql) and keep the guts of the callback code in one
place.
"""
def __init__(self):
#You might thing it would be good to include all the extractors at this level, but they are really file or query
# level things. You might want to use the same extractor with two differently formatted files or two different
# sql queries.
self.node_ids = set()
self.nodes = []
self.edges = []
self.load_metadata = { 'record_counter': 0, 'skipped_record_counter': 0, 'errors': []}
self.errors = []
def csv_extract(self, infile,
subject_extractor,
object_extractor,
predicate_extractor,
subject_property_extractor,
object_property_extractor,
edge_property_extractor,
comment_character="#", delim='\t', has_header_row=False):
"""Read a csv, perform callbacks to retrieve node and edge info per row.
Assumes that all of the properties extractable for a node occur on the line with the node identifier"""
for i, line in enumerate(infile, start=1):
if comment_character is not None and line.startswith(comment_character):
continue
if has_header_row and i == 1:
continue
self.load_metadata['record_counter'] += 1
try:
x = line[:-1].split(delim)
self.parse_row(x, subject_extractor, object_extractor, predicate_extractor, subject_property_extractor, object_property_extractor, edge_property_extractor)
except Exception as e:
self.load_metadata['errors'].append(e.__str__())
self.load_metadata['skipped_record_counter'] += 1
def sql_extract(self, cursor, sql_query, subject_extractor, object_extractor, predicate_extractor, subject_property_extractor, object_property_extractor, edge_property_extractor):
"""Read a csv, perform callbacks to retrieve node and edge info per row.
Assumes that all of the properties extractable for a node occur on the line with the node identifier"""
cursor.execute(sql_query)
rows = cursor.fetchall()
for row in rows:
self.load_metadata['record_counter'] += 1
try:
self.parse_row(row, subject_extractor, object_extractor, predicate_extractor, subject_property_extractor, object_property_extractor, edge_property_extractor)
except Exception as e:
self.load_metadata['errors'].append(e.__str__())
self.load_metadata['skipped_record_counter'] += 1
def json_extract(self,
json_array,
subject_extractor,
object_extractor,
predicate_extractor,
subject_property_extractor,
object_property_extractor,
edge_property_extractor):
for item in json_array:
self.load_metadata['record_counter'] += 1
try:
self.parse_row(item, subject_extractor, object_extractor, predicate_extractor, subject_property_extractor, object_property_extractor, edge_property_extractor)
except Exception as e:
self.load_metadata['errors'].append(e.__str__())
self.load_metadata['skipped_record_counter'] += 1
return
def parse_row(self, row, subject_extractor, object_extractor, predicate_extractor, subject_property_extractor, object_property_extractor, edge_property_extractor):
# pull the information out of the edge
subject_id = subject_extractor(row)
object_id = object_extractor(row)
predicate = predicate_extractor(row)
subjectprops = subject_property_extractor(row)
objectprops = object_property_extractor(row)
edgeprops = edge_property_extractor(row)
# if we haven't seen the subject before, add it to nodes
if subject_id and subject_id not in self.node_ids:
subject_name = subjectprops.pop('name', None)
subject_categories = subjectprops.pop('categories', None)
subject_node = kgxnode(subject_id, name=subject_name, categories=subject_categories, nodeprops=subjectprops)
self.nodes.append(subject_node)
self.node_ids.add(subject_id)
# if we haven't seen the object before, add it to nodes
if object_id and object_id not in self.node_ids:
object_name = objectprops.pop('name', None)
object_categories = objectprops.pop('categories', None)
object_node = kgxnode(object_id, name=object_name, categories=object_categories, nodeprops=objectprops)
self.nodes.append(object_node)
self.node_ids.add(object_id)
if subject_id and object_id and predicate:
original_knowledge_source = edgeprops.pop(ORIGINAL_KNOWLEDGE_SOURCE, None)
primary_knowledge_source = edgeprops.pop(PRIMARY_KNOWLEDGE_SOURCE, None)
aggregator_knowledge_sources = edgeprops.pop(AGGREGATOR_KNOWLEDGE_SOURCES, None)
relation = edgeprops.pop('relation', predicate)
edge = kgxedge(subject_id,
object_id,
relation=relation,
predicate=predicate,
original_knowledge_source=original_knowledge_source,
primary_knowledge_source=primary_knowledge_source,
aggregator_knowledge_sources=aggregator_knowledge_sources,
edgeprops=edgeprops)
self.edges.append(edge)
| 52.367521
| 183
| 0.649747
|
163d36102c08c3d90054cb5e664b034159021e3c
| 9,330
|
py
|
Python
|
tensorflow/python/keras/utils/losses_utils.py
|
MKesenheimer/tensorflow
|
6222d762b86d93ead601cc64e7b3ae16d3311730
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/utils/losses_utils.py
|
MKesenheimer/tensorflow
|
6222d762b86d93ead601cc64e7b3ae16d3311730
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/utils/losses_utils.py
|
MKesenheimer/tensorflow
|
6222d762b86d93ead601cc64e7b3ae16d3311730
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities related to loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.losses.Reduction', v1=[])
class ReductionV2(object):
"""Types of loss reduction.
Contains the following values:
* `NONE`: Un-reduced weighted losses with the same shape as input.
* `SUM`: Scalar sum of weighted losses.
* `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
Note that when using `tf.distribute.Strategy`, this is the global batch
size across all the replicas that are contributing to a single step.
"""
NONE = 'none'
SUM = 'sum'
SUM_OVER_BATCH_SIZE = 'sum_over_batch_size'
@classmethod
def all(cls):
return (cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError('Invalid Reduction Key %s.' % key)
def squeeze_or_expand_dimensions(y_pred, y_true, sample_weight):
"""Squeeze or expand last dimension if needed.
1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
(using `confusion_matrix.remove_squeezable_dimensions`).
2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
from the new rank of `y_pred`.
If `sample_weight` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
y_true: Optional label `Tensor` whose dimensions match `y_pred`.
sample_weight: Optional weight scalar or `Tensor` whose dimensions match
`y_pred`.
Returns:
Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
the last dimension squeezed,
`sample_weight` could be extended by one dimension.
"""
y_pred_shape = y_pred.get_shape()
y_pred_rank = y_pred_shape.ndims
if y_true is not None:
# If sparse matrix is provided as `y_true`, the last dimension in `y_pred`
# may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)),
# y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3))
# In this case, we should not try to remove squeezable dimension.
y_true_shape = y_true.get_shape()
y_true_rank = y_true_shape.ndims
if (y_true_rank is not None) and (y_pred_rank is not None):
# Use static rank for `y_true` and `y_pred`.
if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1:
y_true, y_pred = confusion_matrix.remove_squeezable_dimensions(
y_true, y_pred)
else:
# Use dynamic rank.
rank_diff = array_ops.rank(y_pred) - array_ops.rank(y_true)
squeeze_dims = lambda: confusion_matrix.remove_squeezable_dimensions( # pylint: disable=g-long-lambda
y_true, y_pred)
is_last_dim_1 = math_ops.equal(1, array_ops.shape(y_pred)[-1])
maybe_squeeze_dims = lambda: control_flow_ops.cond( # pylint: disable=g-long-lambda
is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred))
y_true, y_pred = control_flow_ops.cond(
math_ops.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims)
if sample_weight is None:
return y_pred, y_true, None
sample_weight = ops.convert_to_tensor(sample_weight)
weights_shape = sample_weight.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0: # If weights is scalar, do nothing.
return y_pred, y_true, sample_weight
if (y_pred_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - y_pred_rank == 1:
sample_weight = array_ops.squeeze(sample_weight, [-1])
elif y_pred_rank - weights_rank == 1:
sample_weight = array_ops.expand_dims(sample_weight, [-1])
return y_pred, y_true, sample_weight
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(sample_weight)
rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])
def _maybe_expand_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff,
-1), lambda: array_ops.expand_dims(sample_weight, [-1]),
lambda: sample_weight)
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# squeeze or expand last dim of `sample_weight` if its rank differs by 1
# from the new rank of `y_pred`.
sample_weight = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
_maybe_adjust_weights)
return y_pred, y_true, sample_weight
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return math_ops.div_no_nan(total_loss, num_present, name='value')
def _num_elements(losses):
"""Computes the number of elements in `losses` tensor."""
with ops.name_scope(None, 'num_elements', values=[losses]) as scope:
return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)
def _reduce_weighted_loss(weighted_losses,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE):
"""Reduces the individual weighted loss measurements."""
if reduction == ReductionV2.NONE:
loss = weighted_losses
else:
loss = math_ops.reduce_sum(weighted_losses)
if reduction == ReductionV2.SUM_OVER_BATCH_SIZE:
num_replicas = ( # Used to convert from local to global batch size.
distribution_strategy_context.get_strategy().num_replicas_in_sync)
loss = _safe_mean(loss, num_replicas * _num_elements(weighted_losses))
return loss
def compute_weighted_loss(losses,
sample_weight=None,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, or be broadcastable to `losses`.
reduction: Type of `tf.losses.Reduction` to apply to loss. Default value is
`SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
Raises:
ValueError: If the shape of `sample_weight` is not compatible with `losses`.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
"""
ReductionV2.validate(reduction)
if sample_weight is None:
sample_weight = 1.0
with ops.name_scope(name, 'weighted_loss', (losses, sample_weight)):
# Update dimensions of `sample_weight` to match with `losses` if possible.
losses, _, sample_weight = squeeze_or_expand_dimensions(
losses, None, sample_weight)
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.cast(losses, dtypes.float32)
sample_weight = math_ops.cast(sample_weight, dtypes.float32)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, losses)
except ValueError:
# Reduce values to same ndim as weight array.
ndim = K.ndim(losses)
weight_ndim = K.ndim(sample_weight)
losses = K.mean(losses, axis=list(range(weight_ndim, ndim)))
sample_weight.get_shape().assert_is_compatible_with(losses.get_shape())
weighted_losses = math_ops.multiply(losses, sample_weight)
# Apply reduction function to the individual weighted losses.
loss = _reduce_weighted_loss(weighted_losses, reduction)
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
return loss
| 39.702128
| 108
| 0.715005
|
313cfe763de219e24c79e95f02b9c274217a145d
| 1,283
|
py
|
Python
|
references/process/textutil.py
|
arXiv/arxiv-references
|
a755aeaa864ff807ff16ae2c3960f9fee54d8dd8
|
[
"MIT"
] | 7
|
2019-04-21T07:22:23.000Z
|
2022-02-23T18:52:26.000Z
|
references/process/textutil.py
|
cul-it/arxiv-references
|
a755aeaa864ff807ff16ae2c3960f9fee54d8dd8
|
[
"MIT"
] | 4
|
2017-11-07T16:38:46.000Z
|
2018-05-04T19:53:55.000Z
|
references/process/textutil.py
|
cul-it/arxiv-references
|
a755aeaa864ff807ff16ae2c3960f9fee54d8dd8
|
[
"MIT"
] | 6
|
2019-01-10T22:02:15.000Z
|
2022-02-22T02:00:16.000Z
|
"""Text cleanup utilities."""
import re
import ftfy
import unidecode
punctuation_pat = re.compile(r"""([!"#$%&\'()*+,-./:;<=>?@[\\\]^_`{|}~])""")
hyphenline_pat = re.compile(r"-\s*\n\s*")
multiwhite_pat = re.compile(r"\s+")
cid_pat = re.compile(r"\(cid:\d+\)")
nonlet = re.compile(r"([^A-Za-z0-9 ])")
purenum = re.compile(r"\b[0-9]+\b")
def clean_text(txt: str, numok: bool = False) -> str:
"""
Normalize a set of text so that it can be compared with different sources.
Potentially with different encodings and varying whitespace etc.
"""
txt = txt.lower()
txt = cid_pat.sub(" UNK ", txt)
txt = hyphenline_pat.sub("", txt)
txt = punctuation_pat.sub(r" ", txt)
txt = nonlet.sub(r" ", txt)
if not numok:
txt = purenum.sub(r" ", txt)
txt = multiwhite_pat.sub(" ", txt)
txt = txt.encode('utf-8').decode('utf-8')
return txt.strip()
def clean_blob(blob: str, numok: bool = False) -> str:
"""Apply :func:`.clean_text` to each line in a blob of text."""
output = []
lines = blob.split('\n')
for line in lines:
txt = ftfy.fix_text(line, normalization='NFKC')
txt = unidecode.unidecode(txt)
txt = clean_text(txt, numok=numok)
output.append(txt)
return '\n'.join(output)
| 27.297872
| 78
| 0.5947
|
902e5572ed53ffcbd2b912121161ee8073a6c375
| 879
|
py
|
Python
|
stress tester/testgen.py
|
AI-Factor-y/stress-tester
|
b4f56ab07032ada565a5deed562a905dcf6e885f
|
[
"Apache-2.0"
] | null | null | null |
stress tester/testgen.py
|
AI-Factor-y/stress-tester
|
b4f56ab07032ada565a5deed562a905dcf6e885f
|
[
"Apache-2.0"
] | null | null | null |
stress tester/testgen.py
|
AI-Factor-y/stress-tester
|
b4f56ab07032ada565a5deed562a905dcf6e885f
|
[
"Apache-2.0"
] | null | null | null |
# code written by abhinav p
import random
import sys
random.seed()
sys.stdout=open("testcase.txt","w") #output file
def r(lower_limit,upper_limit):
return random.randint(lower_limit,upper_limit)
def ra(lower_limit,upper_limit):
return chr(ord('a')+r((ord(lower_limit)-ord('a')),(ord(upper_limit)-ord('a'))))
def raa():
return chr(ord('a')+r(0,25))
def rf(lower_limit,upper_limit,decimal_places):
return round(random.uniform(lower_limit,upper_limit),decimal_places)
def nl():
print("")
# global testcase count
testcase_count=100
def testcase():
n=r(2,50)
print(n)
for _ in range(n):
x=r(0,500)
print(x,end=" ")
nl()
for _ in range(n-1):
y=r(0,1)
print(y,end="")
nl()
if __name__=="__main__":
if(testcase_count!=1):
print(testcase_count)
for _ in range(testcase_count):
testcase()
| 15.981818
| 81
| 0.645051
|
44c12fd44f99bf136712726273386f59bc090211
| 1,205
|
py
|
Python
|
display_temp_onewire.py
|
jasonbartz/pi-demos
|
1b551a7ff33dbf8dae0782dd79e1cdce73b2f243
|
[
"MIT"
] | null | null | null |
display_temp_onewire.py
|
jasonbartz/pi-demos
|
1b551a7ff33dbf8dae0782dd79e1cdce73b2f243
|
[
"MIT"
] | null | null | null |
display_temp_onewire.py
|
jasonbartz/pi-demos
|
1b551a7ff33dbf8dae0782dd79e1cdce73b2f243
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python -u
import time
import os
import glob
from Adafruit_LED_Backpack import AlphaNum4
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
# Grabs the first probe out of the directory
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
display = AlphaNum4.AlphaNum4()
display.begin()
display.clear()
display.write_display()
def c_to_f(c):
return c * 9.0 / 5.0 + 32.0
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
return float(temp_string) / 1000.0
def write_display(text):
display.print_str(text)
display.write_display()
print 'Press Ctrl-C to quit.'
while True:
temp = read_temp()
display.clear()
temp_in_f = int(c_to_f(temp))
write_display("{} F".format(temp_in_f))
time.sleep(5)
write_display("{} C".format(int(temp)))
time.sleep(5)
| 20.423729
| 46
| 0.653112
|
9a9b990714825531a581b27ed99a9062e96ad3f9
| 2,616
|
py
|
Python
|
app/core/models.py
|
jingr1986/recipie-app-api
|
74e9b7efb52ddda97bd3f3af715f024ebbe403ad
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
jingr1986/recipie-app-api
|
74e9b7efb52ddda97bd3f3af715f024ebbe403ad
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
jingr1986/recipie-app-api
|
74e9b7efb52ddda97bd3f3af715f024ebbe403ad
|
[
"MIT"
] | null | null | null |
import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instace, filename):
"""generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UseManager(BaseUserManager):
def create_user(self, email, password=None, **extral_fields):
"""creates and saves a new users"""
if not email:
raise ValueError("users must have email address")
user = self.model(email=self.normalize_email(email), **extral_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UseManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""recipe objects"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| 28.434783
| 77
| 0.672401
|
bf4e40a887f421514a81b4433dcf1200690e0697
| 882
|
py
|
Python
|
setup.py
|
nghiemvdv/urbamt
|
e136f7c8ffbc9491edc2f960f0ecb2e7efa36b32
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nghiemvdv/urbamt
|
e136f7c8ffbc9491edc2f960f0ecb2e7efa36b32
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nghiemvdv/urbamt
|
e136f7c8ffbc9491edc2f960f0ecb2e7efa36b32
|
[
"Apache-2.0"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='urbamt',
version='0.0.1-b1',
author="Patrick Phat Nguyen",
author_email="me@patrickphat.com",
description="Universal Rule-based Machine Translation Toolkit (URBaMT)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/urbamt/urbamt",
packages=setuptools.find_packages(exclude=['docs', 'tests']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>3.6',
install_requires =[
'nltk<4',
],
extras_require={
'dev': [
'pytest',
'coverage',
],
}
)
| 27.5625
| 77
| 0.587302
|
d0db3699d4cd2bc881c998d0640b4b6ff764c219
| 14,610
|
py
|
Python
|
py3status/docstrings.py
|
apiraino/py3status
|
b6ebed775e635ffddcbde0f78efceacb21ac47d0
|
[
"BSD-3-Clause"
] | null | null | null |
py3status/docstrings.py
|
apiraino/py3status
|
b6ebed775e635ffddcbde0f78efceacb21ac47d0
|
[
"BSD-3-Clause"
] | null | null | null |
py3status/docstrings.py
|
apiraino/py3status
|
b6ebed775e635ffddcbde0f78efceacb21ac47d0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import ast
import re
import os.path
import difflib
from py3status.helpers import print_stderr
def modules_directory():
"""
Get the core modules directory.
"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "modules")
def parse_readme():
"""
Crude parsing of modules/README.md
returns a dict of {<module_name>: <documentation>}
"""
name = None
re_mod = re.compile(r'^\#\#\# <a name="(?P<name>[a-z_0-9]+)"></a>')
readme_file = os.path.join(modules_directory(), "README.md")
modules_dict = {}
with open(readme_file) as f:
for row in f.readlines():
match = re_mod.match(row)
if match:
name = match.group("name")
modules_dict[name] = []
continue
if row.startswith("---"):
name = None
continue
if name:
modules_dict[name].append(row)
return modules_dict
def core_module_docstrings(
include_core=True, include_user=False, config=None, format="md"
):
"""
Get docstrings for all core modules and user ones if requested
returns a dict of {<module_name>: <docstring>}
"""
paths = {}
docstrings = {}
if include_core:
for file in os.listdir(modules_directory()):
if file.endswith(".py"):
name = file[:-3]
if name != "__init__":
paths[name] = (os.path.join(modules_directory(), file), "core")
if include_user:
# include user modules
for include_path in sorted(config["include_paths"]):
include_path = os.path.abspath(include_path) + "/"
if not os.path.isdir(include_path):
continue
for file in sorted(os.listdir(include_path)):
if not file.endswith(".py"):
continue
name = file[:-3]
paths[name] = (os.path.join(include_path, file), "user")
for name in paths:
path, module_type = paths[name]
with open(path) as f:
try:
module = ast.parse(f.read())
except SyntaxError:
# there is a syntax error so ignore module
continue
raw_docstring = ast.get_docstring(module)
# prevent issue when no docstring exists
if raw_docstring is None:
continue
# remove any sample outputs
parts = re.split("^SAMPLE OUTPUT$", raw_docstring, flags=re.M)
docstring = parts[0]
if format == "md":
docstring = [
d for d in _from_docstring_md(str(docstring).strip().split("\n"))
]
elif format == "rst":
docstring = [
d for d in _from_docstring_rst(str(docstring).strip().split("\n"))
]
else:
raise Exception("`md` and `rst` format supported only")
docstrings[name] = docstring + ["\n"]
return docstrings
def create_readme(data):
"""
Create README.md text for the given module data.
"""
out = ['<a name="top"></a>Modules\n========\n\n']
# Links
for module in sorted(data.keys()):
desc = "".join(data[module]).strip().split("\n")[0]
format_str = "\n**[{name}](#{name})** — {desc}\n"
out.append(format_str.format(name=module, desc=desc))
# details
for module in sorted(data.keys()):
out.append(
'\n---\n\n### <a name="{name}"></a>{name}\n\n{details}\n'.format(
name=module, details="".join(data[module]).strip()
)
)
return "".join(out)
re_listing = re.compile(r"^\w.*:$")
# match in README.md
re_to_param = re.compile(r"^ - `([a-z]\S+)`($|[ \t])")
re_to_status = re.compile(r"^ - `({\S+})`($|[ \t])")
re_to_item = re.compile(r"^\s+-")
re_to_data = re.compile(r"^\*\*(author|license|source)\*\*($|[ \t])")
re_to_tag = re.compile("<([^.]*)>")
re_to_defaults = re.compile(r"\*(\(default.*\))\*")
# match in module docstring
re_from_param = re.compile(r"^ ([a-z<]\S+):($|[ \t])(.*)$")
re_from_status = re.compile(r"^\s+({\S+})($|[ \t])(.*)$")
re_from_item = re.compile(r"^\s+-(?=\s)")
re_from_data = re.compile("^@(author|license|source)($|[ \t])")
re_from_tag = re.compile("((`[^`]*`)|[<>&])")
re_from_defaults = re.compile(r"(\(default.*\))\s*$")
# for rst
re_lone_backtick = re.compile("(?<!`)`(?!`)")
def _reformat_docstring(doc, format_fn, code_newline=""):
"""
Go through lines of file and reformat using format_fn
"""
out = []
status = {"listing": False, "add_line": False, "eat_line": False}
code = False
for line in doc:
if status["add_line"]:
out.append("\n")
status["add_line"] = False
if status["eat_line"]:
status["eat_line"] = False
if line.strip() == "":
continue
# check for start/end of code block
if line.strip() == "```":
code = not code
out.append(line + code_newline)
continue
if not code:
# if we are in a block listing a blank line ends it
if line.rstrip() == "":
status["listing"] = False
# format the line
line = format_fn(line, status)
# see if block start
if re_listing.match(line):
status["listing"] = True
out.append(line.rstrip() + "\n")
return out
def _to_docstring(doc):
"""
format from Markdown to docstring
"""
def format_fn(line, status):
""" format function """
# swap < > to < >
line = re_to_tag.sub(r"<\1>", line)
if re_to_data.match(line):
line = re_to_data.sub(r"@\1 ", line)
status["eat_line"] = True
line = re_to_defaults.sub(r"\1", line)
if status["listing"]:
# parameters
if re_to_param.match(line):
line = re_to_param.sub(r" \1: ", line)
# status items
elif re_to_status.match(line):
line = re_to_status.sub(r" \1 ", line)
# bullets
elif re_to_item.match(line):
line = re_to_item.sub(r" -", line)
# is continuation line
else:
line = " " * 8 + line.lstrip()
return line
return _reformat_docstring(doc, format_fn)
def _from_docstring_md(doc):
"""
format from docstring to Markdown
"""
def format_fn(line, status):
""" format function """
def fix_tags(line):
# In markdown we need to escape < > and & for display
# but we don't want to do this is the value is quoted
# by backticks ``
def fn(match):
# swap matched chars
found = match.group(1)
if found == "<":
return "<"
if found == ">":
return ">"
if found == "&":
return "&"
return match.group(0)
return re_from_tag.sub(fn, line)
if re_from_data.match(line):
line = re_from_data.sub(r"**\1** ", line)
status["add_line"] = True
line = re_from_defaults.sub(r"*\1*", line)
if status["listing"]:
# parameters
if re_from_param.match(line):
m = re_from_param.match(line)
line = " - `{}` {}".format(m.group(1), fix_tags(m.group(3)))
# status items
elif re_from_status.match(line):
m = re_from_status.match(line)
line = " - `{}` {}".format(m.group(1), fix_tags(m.group(3)))
# bullets
elif re_from_item.match(line):
line = re_from_item.sub(r" -", fix_tags(line))
# is continuation line
else:
line = fix_tags(line)
line = " " * 4 + line.lstrip()
else:
line = fix_tags(line)
return line
return _reformat_docstring(doc, format_fn, code_newline="\n")
def _from_docstring_rst(doc):
"""
format from docstring to ReStructured Text
"""
def format_fn(line, status):
""" format function """
if re_from_data.match(line):
line = re_from_data.sub(r"**\1** ", line)
status["add_line"] = True
line = re_from_defaults.sub(r"*\1*", line)
if status["listing"]:
# parameters
if re_from_param.match(line):
m = re_from_param.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# status items
elif re_from_status.match(line):
m = re_from_status.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# bullets
elif re_from_item.match(line):
line = re_from_item.sub(r" -", line)
# is continuation line
else:
line = " " * 4 + line.lstrip()
# in .rst format code samples use double backticks vs single ones for
# .md This converts them.
line = re_lone_backtick.sub("``", line)
return line
return _reformat_docstring(doc, format_fn, code_newline="\n")
def update_docstrings():
"""
update the docstring of each module using info in the
modules/README.md file
"""
modules_dict = parse_readme()
files = {}
# update modules
for mod in modules_dict:
mod_file = os.path.join(modules_directory(), mod + ".py")
with open(mod_file) as f:
files[mod] = f.readlines()
for mod in files:
replaced = False
done = False
lines = False
out = []
quotes = None
for row in files[mod]:
# deal with single or double quoted docstring
if not quotes:
if row.strip().startswith('"""'):
quotes = '"""'
if row.strip().startswith("'''"):
quotes = "'''"
if quotes and row.strip().startswith(quotes) and not done:
out.append(row)
if not replaced:
out = out + [
"".join(_to_docstring(modules_dict[mod])).strip() + "\n"
]
replaced = True
if lines:
done = True
if not done and not lines:
lines = True
continue
if not lines or done:
out.append(row)
mod_file = os.path.join(modules_directory(), mod + ".py")
with open(mod_file, "w") as f:
f.writelines(out)
print_stderr("Modules updated from README.md")
def check_docstrings(show_diff=False, config=None, mods=None):
"""
Check docstrings in module match the README.md
"""
readme = parse_readme()
modules_readme = core_module_docstrings(config=config)
warned = False
if create_readme(readme) != create_readme(modules_readme):
for module in sorted(readme):
if mods and module not in mods:
continue
err = None
if module not in modules_readme:
err = "\tModule {} in README but not in /modules".format(module)
elif (
"".join(readme[module]).strip()
!= "".join(modules_readme[module]).strip()
):
err = "\tModule {} docstring does not match README".format(module)
if err:
if not warned:
print_stderr("Documentation does not match!\n")
warned = True
print_stderr(err)
for module in modules_readme:
if mods and module not in mods:
continue
if module not in readme:
print_stderr("\tModule {} in /modules but not in README".format(module))
if show_diff:
print_stderr(
"\n".join(
difflib.unified_diff(
create_readme(readme).split("\n"),
create_readme(modules_readme).split("\n"),
)
)
)
else:
if warned:
print_stderr("\nUse `py3status docstring check diff` to view diff.")
def update_readme_for_modules(modules):
"""
Update README.md updating the sections for the module names listed.
"""
readme = parse_readme()
module_docstrings = core_module_docstrings()
if modules == ["__all__"]:
modules = core_module_docstrings().keys()
for module in modules:
if module in module_docstrings:
print_stderr("Updating README.md for module {}".format(module))
readme[module] = module_docstrings[module]
else:
print_stderr("Module {} not in core modules".format(module))
# write the file
readme_file = os.path.join(modules_directory(), "README.md")
with open(readme_file, "w") as f:
f.write(create_readme(readme))
def show_modules(config, params):
"""
List modules available optionally with details.
"""
details = params[0] == "details"
if details:
modules_list = params[1:]
core_mods = True
user_mods = True
else:
user_mods = True
core_mods = True
modules_list = []
if len(params) == 2:
if params[1] == "user":
user_mods = True
core_mods = False
elif params[1] == "core":
user_mods = False
core_mods = True
if details:
print("Module details:")
else:
print("Available modules:")
modules = core_module_docstrings(
include_core=core_mods, include_user=user_mods, config=config
)
for name in sorted(modules.keys()):
if modules_list and name not in modules_list:
continue
module = _to_docstring(modules[name])
desc = module[0][:-1]
if details:
dash_len = len(name)
print("=" * dash_len)
print(name)
print("=" * dash_len)
for description in module:
print(description[:-1])
else:
print(" %-22s %s" % (name, desc))
| 32.611607
| 88
| 0.516153
|
1e545c86d65c440028c25b86f64410e4cbc853e1
| 29,738
|
py
|
Python
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_backend_operations.py
|
JianpingChen/azure-sdk-for-python
|
3072fc8c0366287fbaea1b02493a50259c3248a2
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_backend_operations.py
|
JianpingChen/azure-sdk-for-python
|
3072fc8c0366287fbaea1b02493a50259c3248a2
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_backend_operations.py
|
JianpingChen/azure-sdk-for-python
|
3072fc8c0366287fbaea1b02493a50259c3248a2
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BackendOperations(object):
"""BackendOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_service(
self,
resource_group_name, # type: str
service_name, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.BackendCollection"]
"""Lists a collection of backends in the specified service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| title |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| url |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BackendCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.BackendCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('BackendCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/backends'} # type: ignore
def get_entity_tag(
self,
resource_group_name, # type: str
service_name, # type: str
backend_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state (Etag) version of the backend specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param backend_id: Identifier of the Backend entity. Must be unique in the current API
Management service instance.
:type backend_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_tag.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'backendId': self._serialize.url("backend_id", backend_id, 'str', max_length=80, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/backends/{backendId}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_name, # type: str
backend_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BackendContract"
"""Gets the details of the backend specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param backend_id: Identifier of the Backend entity. Must be unique in the current API
Management service instance.
:type backend_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.BackendContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'backendId': self._serialize.url("backend_id", backend_id, 'str', max_length=80, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('BackendContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/backends/{backendId}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
service_name, # type: str
backend_id, # type: str
parameters, # type: "_models.BackendContract"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.BackendContract"
"""Creates or Updates a backend.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param backend_id: Identifier of the Backend entity. Must be unique in the current API
Management service instance.
:type backend_id: str
:param parameters: Create parameters.
:type parameters: ~azure.mgmt.apimanagement.models.BackendContract
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.BackendContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'backendId': self._serialize.url("backend_id", backend_id, 'str', max_length=80, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BackendContract')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('BackendContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('BackendContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/backends/{backendId}'} # type: ignore
def update(
self,
resource_group_name, # type: str
service_name, # type: str
backend_id, # type: str
if_match, # type: str
parameters, # type: "_models.BackendUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.BackendContract"
"""Updates an existing backend.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param backend_id: Identifier of the Backend entity. Must be unique in the current API
Management service instance.
:type backend_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Update parameters.
:type parameters: ~azure.mgmt.apimanagement.models.BackendUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.BackendContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'backendId': self._serialize.url("backend_id", backend_id, 'str', max_length=80, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BackendUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('BackendContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/backends/{backendId}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
service_name, # type: str
backend_id, # type: str
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the specified backend.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param backend_id: Identifier of the Backend entity. Must be unique in the current API
Management service instance.
:type backend_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'backendId': self._serialize.url("backend_id", backend_id, 'str', max_length=80, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/backends/{backendId}'} # type: ignore
def reconnect(
self,
resource_group_name, # type: str
service_name, # type: str
backend_id, # type: str
parameters=None, # type: Optional["_models.BackendReconnectContract"]
**kwargs # type: Any
):
# type: (...) -> None
"""Notifies the APIM proxy to create a new connection to the backend after the specified timeout.
If no timeout was specified, timeout of 2 minutes is used.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param backend_id: Identifier of the Backend entity. Must be unique in the current API
Management service instance.
:type backend_id: str
:param parameters: Reconnect request parameters.
:type parameters: ~azure.mgmt.apimanagement.models.BackendReconnectContract
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.reconnect.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'backendId': self._serialize.url("backend_id", backend_id, 'str', max_length=80, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'BackendReconnectContract')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
reconnect.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/backends/{backendId}/reconnect'} # type: ignore
| 51.09622
| 205
| 0.6565
|
9a423cfc3e893adf99451e4867b6567ebd4ebb1c
| 3,233
|
py
|
Python
|
virtual/Lib/site-packages/pylint/test/unittest_checker_classes.py
|
JamesKimari/pitch-one
|
aac9007716bf2e3b6446588a06508fac068f3d20
|
[
"MIT"
] | 4
|
2018-08-14T14:08:55.000Z
|
2021-02-19T02:58:07.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/unittest_checker_classes.py
|
evantoh/patient-management-system
|
6637eb1344775633759165260ed99843581c0e72
|
[
"Unlicense"
] | 32
|
2018-05-01T05:24:43.000Z
|
2022-03-11T23:20:39.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/unittest_checker_classes.py
|
evantoh/patient-management-system
|
6637eb1344775633759165260ed99843581c0e72
|
[
"Unlicense"
] | 2
|
2018-05-16T10:39:48.000Z
|
2019-02-22T09:13:34.000Z
|
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Derek Gustafson <degustaf@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Unit tests for the variables checker."""
import sys
import pytest
import astroid
from pylint.checkers import classes
from pylint.testutils import CheckerTestCase, Message, set_config
class TestVariablesChecker(CheckerTestCase):
CHECKER_CLASS = classes.ClassChecker
def test_bitbucket_issue_164(self):
"""Issue 164 report a false negative for access-member-before-definition"""
n1, n2 = astroid.extract_node("""
class MyClass1(object):
def __init__(self):
self.first += 5 #@
self.first = 0 #@
""")
message = Message('access-member-before-definition',
node=n1.target, args=('first', n2.lineno))
with self.assertAddsMessages(message):
self.walk(n1.root())
@set_config(exclude_protected=('_meta', '_manager'))
def test_exclude_protected(self):
"""Test that exclude-protected can be used to
exclude names from protected-access warning.
"""
node = astroid.parse("""
class Protected(object):
'''empty'''
def __init__(self):
self._meta = 42
self._manager = 24
self._teta = 29
OBJ = Protected()
OBJ._meta
OBJ._manager
OBJ._teta
""")
with self.assertAddsMessages(
Message('protected-access',
node=node.body[-1].value,
args='_teta')):
self.walk(node.root())
@pytest.mark.skipif(sys.version_info[0] != 3,
reason="The test works on Python 3.")
def test_regression_non_parent_init_called_tracemalloc(self):
# This used to raise a non-parent-init-called on Pylint 1.3
# See issue https://bitbucket.org/logilab/pylint/issue/308/
# for reference.
node = astroid.extract_node("""
from tracemalloc import Sequence
class _Traces(Sequence):
def __init__(self, traces): #@
Sequence.__init__(self)
""")
with self.assertNoMessages():
self.checker.visit_functiondef(node)
def test_super_init_not_called_regression(self):
# This should not emit a super-init-not-called
# warning. It previously did this, because
# ``next(node.infer())`` was used in that checker's
# logic and the first inferred node was an YES object,
# leading to this false positive.
node = astroid.extract_node("""
import ctypes
class Foo(ctypes.BigEndianStructure):
def __init__(self): #@
ctypes.BigEndianStructure.__init__(self)
""")
with self.assertNoMessages():
self.checker.visit_functiondef(node)
| 35.922222
| 83
| 0.612125
|
d87b1a5ff5a4ef1bc77885908e509c8c51a59731
| 2,125
|
py
|
Python
|
AI1/Lab02-DictionaryCreator/Lab02.py
|
rashidlasker/artificial-intelligence
|
da18010145b7dba80e5bc10c2498d5a4b0d60667
|
[
"MIT"
] | null | null | null |
AI1/Lab02-DictionaryCreator/Lab02.py
|
rashidlasker/artificial-intelligence
|
da18010145b7dba80e5bc10c2498d5a4b0d60667
|
[
"MIT"
] | null | null | null |
AI1/Lab02-DictionaryCreator/Lab02.py
|
rashidlasker/artificial-intelligence
|
da18010145b7dba80e5bc10c2498d5a4b0d60667
|
[
"MIT"
] | null | null | null |
""" +=========================================================================================+
|| Lab02: Dictionary Creator ||
|| Name: Rashid Lasker Date: 9/9/14 ||
+=========================================================================================+
This program generates a dictionary of neighbors for all the words in the given text file
and stores it in a binary file.
"""
#######################################<BEGINNING OF PROGRAM>#######################################
def generateListOfWords():
fileName = 'words.txt'
file = open(fileName, 'r')
words1 = file.read()
file.close
return words1.split()
def generateDictionaryOfNeighbors(inputList):
neighborDict = dict()
for b in range(0, len(inputList)):
yourWord = inputList[b]
neighbors = []
for n in range(0, len(inputList)):
thisWord = inputList[n]
diffLetters = 0
for x in range(len(thisWord)):
if yourWord[x] != thisWord[x]:
diffLetters += 1
if diffLetters > 1:
break
if diffLetters == 1:
neighbors.append(thisWord)
neighborDict[yourWord] = neighbors
return neighborDict
def saveIntoFile(inputDict):
newFileName = 'oneChangeDict.txt'
import pickle
with open(newFileName, 'wb') as handle:
pickle.dump(inputDict, handle, protocol=pickle.HIGHEST_PROTOCOL)
handle.close()
def main():
listOfWords = generateListOfWords()
print('Number of Values: ' + str(len(listOfWords)))
saveIntoFile(generateDictionaryOfNeighbors(listOfWords))
#===============================<GLOBAL CONSTANTS and GLOBAL IMPORTS>================================
from random import random, randint; from math import sqrt; from copy import deepcopy;
from time import clock; START_TIME = clock(); main(); print('\n +===<RUN TIME>===+');
print(' | %5.2f'%(clock()-START_TIME), 'seconds |'); print(' +================+')
##########################################<END OF PROGRAM>##########################################
| 40.865385
| 101
| 0.497412
|
ef36b30f5e8f9d916d849b2830315abf17f693f0
| 5,578
|
py
|
Python
|
2019/02/computer.py
|
GeoffRiley/AdventOfCode
|
27fe8670a1923cb3b0675784f5e855ad18c29c93
|
[
"Unlicense"
] | 2
|
2020-12-12T03:18:45.000Z
|
2021-12-17T00:35:33.000Z
|
2019/02/computer.py
|
GeoffRiley/AdventOfCode
|
27fe8670a1923cb3b0675784f5e855ad18c29c93
|
[
"Unlicense"
] | null | null | null |
2019/02/computer.py
|
GeoffRiley/AdventOfCode
|
27fe8670a1923cb3b0675784f5e855ad18c29c93
|
[
"Unlicense"
] | null | null | null |
from itertools import product
def computer(cells):
c = [int(v) for v in cells.split(',')]
c = simulate(c)
return ','.join(str(v) for v in c)
def simulate(c):
pc = 0
while c[pc] != 99:
if c[pc] == 1:
c[c[pc + 3]] = c[c[pc + 1]] + c[c[pc + 2]]
pc += 4
elif c[pc] == 2:
c[c[pc + 3]] = c[c[pc + 1]] * c[c[pc + 2]]
pc += 4
else:
raise TypeError
return c
def run_computer_recover(cells):
c = [int(v) for v in cells.split(',')]
c[1] = 12
c[2] = 2
c = simulate(c)
print(c[0])
def find_key_inputs(cells):
target = 19690720
result = 0
for noun, verb in product(range(100), range(100)):
c = [int(v) for v in cells.split(',')]
c[1] = noun
c[2] = verb
c = simulate(c)
result = c[0]
if result == target:
print(f'Noun: {noun}, Verb: {verb} results in {c[0]}')
OPCODES = {1:'+', 2:'*', 99:'halt'}
def opcode(memory, pc):
op, a, b, c = memory[pc:pc+4]
dump = " ".join(f'{v:02x}' for v in memory[pc:pc+4])
if op < 99:
return f'{dump} : [{c:02x}] ← [{a:02x}] {OPCODES[op]} [{b:02x}]'
else:
return f'{dump} : HALT'
def disassemble(cells):
memory = [int(v) for v in cells.split(',')]
targets = set()
pc = 0
while pc < len(memory)-3:
targets.add(memory[pc+1])
targets.add(memory[pc+2])
pc += 4
pc = 0
targets = sorted(targets)
while pc < len(memory)-3:
print(f'{pc:04x} : {opcode(memory,pc)}')
pc += 4
if __name__ == '__main__':
initial_cells = '1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,9,1,19,1,19,5,23,1,9,23,27,2,27,6,31,1,5,31,35,2,9,35,39,2,6,39,43,2,43,13,47,2,13,47,51,1,10,51,55,1,9,55,59,1,6,59,63,2,63,9,67,1,67,6,71,1,71,13,75,1,6,75,79,1,9,79,83,2,9,83,87,1,87,6,91,1,91,13,95,2,6,95,99,1,10,99,103,2,103,9,107,1,6,107,111,1,10,111,115,2,6,115,119,1,5,119,123,1,123,13,127,1,127,5,131,1,6,131,135,2,135,13,139,1,139,2,143,1,143,10,0,99,2,0,14,0'
run_computer_recover(initial_cells)
find_key_inputs(initial_cells)
disassemble(initial_cells)
"""
01: noun
02: verb
0000 : 01 00 00 03 : [03] ← [00] + [00] # [03] = 1 + 1 = 2
0004 : 01 01 02 03 : [03] ← [01] + [02] # [03] = noun + verb
0008 : 01 03 04 03 : [03] ← [03] + [04] # [03] = noun + verb + 1
000c : 01 05 00 03 : [03] ← [05] + [00] # [03] = 1 + 1 = 2
0010 : 02 09 01 13 : [13] ← [09] * [01] # [13] = 3 * noun
0014 : 01 13 05 17 : [17] ← [13] + [05] # [17] = (3 * noun) + 1
0018 : 01 09 17 1b : [1b] ← [09] + [17] # [1b] = 3 + (3 * noun) + 1 = (3 * noun) + 4
001c : 02 1b 06 1f : [1f] ← [1b] * [06] # [1f] = ((3 * noun) + 4) * 2 = (6 * noun) + 8
0020 : 01 05 1f 23 : [23] ← [05] + [1f] # [23] = 1 + (6 * noun) + 8 = (6 * noun) + 9
0024 : 02 09 23 27 : [27] ← [09] * [23] # [27] = 3 * ((6 * noun) + 9) = 18 * noun + 27
0028 : 02 06 27 2b : [2b] ← [06] * [27] # [2b] = 2 * (18 * noun + 27) = 36 * noun + 54
002c : 02 2b 0d 2f : [2f] ← [2b] * [0d] # [2f] = (36 * noun + 54) * 5 = 180 * noun + 270
0030 : 02 0d 2f 33 : [33] ← [0d] * [2f] # [33] = 5 * (180 * noun + 270) = 900 * noun + 1350
0034 : 01 0a 33 37 : [37] ← [0a] + [33] # [37] = 4 + (900 * noun + 1350) = 900 * noun + 1354
0038 : 01 09 37 3b : [3b] ← [09] + [37] # [3b] = 3 + (900 * noun + 1354) = 900 * noun + 1357
003c : 01 06 3b 3f : [3f] ← [06] + [3b] # [3f] = 2 + (900 * noun + 1357) = 900 * noun + 1359
0040 : 02 3f 09 43 : [43] ← [3f] * [09] # [43] = (900 * noun + 1359) * 3 = 2700 * noun + 4077
0044 : 01 43 06 47 : [47] ← [43] + [06] # [47] = (2700 * noun + 4077) + 2 = 2700 * noun + 4079
0048 : 01 47 0d 4b : [4b] ← [47] + [0d] # [4b] = (2700 * noun + 4079) + 5 = 2700 * noun + 4084
004c : 01 06 4b 4f : [4f] ← [06] + [4b] # [4f] = 2 + (2700 * noun + 4085) = 2700 * noun + 4086
0050 : 01 09 4f 53 : [53] ← [09] + [4f] # [53] = 3 + (2700 * noun + 4086) = 2700 * noun + 4089
0054 : 02 09 53 57 : [57] ← [09] * [53] # [57] = 3 * (2700 * noun + 4089) = 8100 * noun + 12267
0058 : 01 57 06 5b : [5b] ← [57] + [06] # [5b] = (8100 * noun + 12267) + 2 = 8100 * noun + 12269
005c : 01 5b 0d 5f : [5f] ← [5b] + [0d] # [5f] = (8100 * noun + 12269) + 5 = 8100 * noun + 12274
0060 : 02 06 5f 63 : [63] ← [06] * [5f] # [63] = 2 * (8100 * noun + 12274) = 16200 * noun + 24548
0064 : 01 0a 63 67 : [67] ← [0a] + [63] # [67] = 4 + (16200 * noun + 24548) = 16200 * noun + 24552
0068 : 02 67 09 6b : [6b] ← [67] * [09] # [6b] = (16200 * noun + 24552) * 3 = 48600 * noun + 73656
006c : 01 06 6b 6f : [6f] ← [06] + [6b] # [6f] = 2 + (48600 * noun + 73656) = 48600 * noun + 73658
0070 : 01 0a 6f 73 : [73] ← [0a] + [6f] # [73] = 4 + (48600 * noun + 73658) = 48600 * noun + 73662
0074 : 02 06 73 77 : [77] ← [06] * [73] # [77] = 2 * (48600 * noun + 73662) = 97200 * noun + 147324
0078 : 01 05 77 7b : [7b] ← [05] + [77] # [7b] = 1 + (97200 * noun + 147324) = 97200 * noun + 147325
007c : 01 7b 0d 7f : [7f] ← [7b] + [0d] # [7f] = (97200 * noun + 147325) + 5 = 97200 * noun + 147330
0080 : 01 7f 05 83 : [83] ← [7f] + [05] # [83] = (97200 * noun + 147330) + 1 = 97200 * noun + 147331
0084 : 01 06 83 87 : [87] ← [06] + [83] # [87] = 2 + (97200 * noun + 147331) = 97200 * noun + 147333
0088 : 02 87 0d 8b : [8b] ← [87] * [0d] # [8b] = 5 * (97200 * noun + 147333) = 486000 * noun + 736665
008c : 01 8b 02 8f : [8f] ← [8b] + [02] # [8f] = (486000 * noun + 736665) + verb
0090 : 01 8f 0a 00 : [00] ← [8f] + [0a] # [00] = (486000 * noun + 736665 + verb) + 4 = 486000 * noun + 736669 + verb
0094 : 63 02 00 0e : HALT
Code actually computes: 486000 * noun + 736669 + verb
"""
| 47.271186
| 425
| 0.492829
|
5d99108d1ab4d830bc4c959927f26ac740cc69b3
| 6,265
|
py
|
Python
|
wb/main/console_tool_wrapper/sh_tools/tools.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 23
|
2022-03-17T12:24:09.000Z
|
2022-03-31T09:13:30.000Z
|
wb/main/console_tool_wrapper/sh_tools/tools.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 18
|
2022-03-21T08:17:44.000Z
|
2022-03-30T12:42:30.000Z
|
wb/main/console_tool_wrapper/sh_tools/tools.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 16
|
2022-03-17T12:24:14.000Z
|
2022-03-31T12:15:12.000Z
|
"""
OpenVINO DL Workbench
Class for parameters of standard bash tools: rm, mkdir e.t.c.
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from typing import Match
from wb.main.console_tool_wrapper.console_parameter_validator import ConsoleParametersTypes
from wb.main.console_tool_wrapper.console_tool import ConsoleTool
class ShTool(ConsoleTool):
def __init__(self, command: str, parameters: list = None, environment: dict = None):
super().__init__(parameters, environment)
self.exe = command
self.parameter_prefix = '-'
def set_openvino_package_root_parameter(self, value: str):
self.set_parameter(name='openvino-package-root', value=value, parameter_type=ConsoleParametersTypes.path)
# pylint: disable=invalid-name
class SetupTargetToolResult:
os: str
has_root_privileges: bool
has_internet_connection: bool
python_version: str
pip_version: str
home_directory: str
cpu_full_name: str
cpu_cores_number: int
cpu_frequency: str
class SetupTargetTool(ShTool):
has_internet_connection = re.compile(r'(The target has internet connection\.)')
has_root_privileges = re.compile(r'(The current user has root privileges\.)')
python_found_message = re.compile(r'(?:Python (?P<python_version>\d\.\d) is supported\.)')
pip_found_message = re.compile(r'(?:Pip (?P<pip_version>(\d.?)+) is supported\.)')
os_message = re.compile(r'(?:(?P<os>.*) is supported\.)')
home_directory_message = re.compile(r'(?:The home directory is (?P<home_directory>\/([\w]+\/?)+)\.)')
full_cpu_name_message = re.compile(r'(?:Full CPU name is (?P<cpu_name>.*))')
cpu_cores_message = re.compile(r'(?:CPU cores number: (?P<cpu_cores>\d))')
cpu_frequency_range_message = re.compile(r'(?:CPU frequency range: (?P<cpu_frequency>\d\.\d(-\d\.\d)?\sGHz))')
@staticmethod
def parse_tool_output(output: str) -> SetupTargetToolResult:
result = SetupTargetToolResult()
result.has_internet_connection = bool(SetupTargetTool.has_internet_connection.findall(output))
result.has_root_privileges = bool(SetupTargetTool.has_root_privileges.findall(output))
python_found_message: Match = SetupTargetTool.python_found_message.search(output)
result.python_version = python_found_message.group('python_version') if python_found_message else None
pip_found_message: Match = SetupTargetTool.pip_found_message.search(output)
result.pip_version = pip_found_message.group('pip_version') if pip_found_message else None
os_message: Match = SetupTargetTool.os_message.search(output)
result.os = os_message.group('os') if os_message else None
home_directory_message: Match = SetupTargetTool.home_directory_message.search(output)
result.home_directory = home_directory_message.group('home_directory') if home_directory_message else None
full_cpu_name_message: Match = SetupTargetTool.full_cpu_name_message.search(output)
result.cpu_full_name = full_cpu_name_message.group('cpu_name')
cpu_cores_message: Match = SetupTargetTool.cpu_cores_message.search(output)
result.cpu_cores_number = int(cpu_cores_message.group('cpu_cores'))
cpu_frequency_range_message: Match = SetupTargetTool.cpu_frequency_range_message.search(output)
result.cpu_frequency = cpu_frequency_range_message.group('cpu_frequency')
return result
class PingTargetTool(ShTool):
def set_output_path_parameter(self, value: str):
self.set_parameter(name='output', value=value, parameter_type=ConsoleParametersTypes.path)
@property
def get_output_parameter_value(self) -> str:
return self.get_parameter_value('output')
class TarGzTool(ConsoleTool):
def __init__(self, archive_path: str, destination_path: str = None):
super().__init__([
dict(name='xfp', value=archive_path, parameter_type=ConsoleParametersTypes.path)
])
self.exe = 'tar'
self.parameter_prefix = ''
if destination_path:
self.set_parameter(name='-C', value=destination_path, parameter_type=ConsoleParametersTypes.path)
class ZIPTool(ConsoleTool):
def __init__(self, archive_path: str, destination_path: str):
super().__init__([
dict(name='o', value=archive_path, parameter_type=ConsoleParametersTypes.path),
dict(name='d', value=destination_path, parameter_type=ConsoleParametersTypes.path)
])
self.exe = 'unzip'
class RMTool(ConsoleTool):
def __init__(self, path: str):
super().__init__([
dict(name='rf', value=path, parameter_type=ConsoleParametersTypes.path)
])
self.exe = 'rm'
class MKDirTool(ConsoleTool):
def __init__(self, path: str):
super().__init__([
dict(name='p', value=path, parameter_type=ConsoleParametersTypes.path)
])
self.exe = 'mkdir'
class EchoTool(ConsoleTool):
def __init__(self, string: str):
super().__init__([
dict(name='', value=string, parameter_type=ConsoleParametersTypes.echo)
])
self.exe = 'echo'
@property
def console_command(self) -> str:
self.validate()
params = ' '.join([param.value for param in self.params])
return '{exe} "{params}"'.format(exe=self.exe, params=params)
class KillTool(ConsoleTool):
def __init__(self, tool: ConsoleTool):
super().__init__()
process_to_kill = tool.console_command
self.exe = ' | '.join(('ps axf',
f'grep "{process_to_kill}"',
'grep -v grep',
'awk \'{print "kill -9 " $1}\' | sh'))
| 40.419355
| 114
| 0.698962
|
17aa5f0d1e2f6d19dc4000a2824d43dfdb9ec848
| 1,263
|
py
|
Python
|
line_detection_module/yolov5/TabDetectDewarp/tab_det.py
|
nhatnxn/ID_Passport-OCR
|
78322ec2b9648d0b027326dced7c4aec967bcab3
|
[
"MIT"
] | 1
|
2021-06-30T11:01:25.000Z
|
2021-06-30T11:01:25.000Z
|
line_detection_module/yolov5/TabDetectDewarp/tab_det.py
|
nhatnxn/ID_Passport-OCR
|
78322ec2b9648d0b027326dced7c4aec967bcab3
|
[
"MIT"
] | null | null | null |
line_detection_module/yolov5/TabDetectDewarp/tab_det.py
|
nhatnxn/ID_Passport-OCR
|
78322ec2b9648d0b027326dced7c4aec967bcab3
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from .utils import increase_border
from .yolov5 import inference, polygon_from_corners
def detect_bbox(im):
"""detect 9 information box
Args:
im (np.array): input image
Returns:
dict:
{
'address_line_1': [x,y,w,h]
'address_line_2': [x,y,w,h]
'birthday': [x,y,w,h]
'hometown_line_1': [x,y,w,h]
'hometown_line_2': [x,y,w,h]
'id': [x,y,w,h]
'name': [x,y,w,h]
'nation': [x,y,w,h]
'sex': [x,y,w,h]
}
"""
PADDING_SIZE = 0
info = {}
list_dict = ['address_line_1','address_line_2', 'birthday', 'hometown_line_1', 'hometown_line_2', 'id', 'name', 'nation', 'sex', 'passport']
# check input
if im is None:
return []
target = inference(im)
pts = polygon_from_corners(target)
# pts = pts.astype(int)
# infs = increase_border(pts, PADDING_SIZE)
for i, inf in enumerate(pts):
info[list_dict[i]] = inf
# in = [(int(p[0]), int(p[1])) for p in corners]
return info
| 25.77551
| 150
| 0.471892
|
ced1a8d5d6d7d1b8843b901cb076f2549b904e4e
| 1,638
|
py
|
Python
|
VB_Classes/pointPolygonTest_demo.py
|
bobdavies2000/OpenCVB
|
1d339a94643a97e2d34f82dc7776677a8566d71d
|
[
"MIT"
] | 69
|
2019-07-17T21:20:37.000Z
|
2022-03-23T08:38:03.000Z
|
VB_Classes/pointPolygonTest_demo.py
|
bobdavies2000/OpenCVB
|
1d339a94643a97e2d34f82dc7776677a8566d71d
|
[
"MIT"
] | 5
|
2021-02-05T05:48:50.000Z
|
2022-03-12T01:43:15.000Z
|
VB_Classes/pointPolygonTest_demo.py
|
bobdavies2000/OpenCVB
|
1d339a94643a97e2d34f82dc7776677a8566d71d
|
[
"MIT"
] | 6
|
2019-12-24T05:36:52.000Z
|
2021-02-19T15:55:13.000Z
|
import cv2 as cv
import numpy as np
titleWindow = 'PointPolygonTest_demo.py'
# Create an image
r = 100
src = np.zeros((4*r, 4*r), dtype=np.uint8)
# Create a sequence of points to make a contour
vert = [None]*6
vert[0] = (3*r//2, int(1.34*r))
vert[1] = (1*r, 2*r)
vert[2] = (3*r//2, int(2.866*r))
vert[3] = (5*r//2, int(2.866*r))
vert[4] = (3*r, 2*r)
vert[5] = (5*r//2, int(1.34*r))
# Draw it in src
for i in range(6):
cv.line(src, vert[i], vert[(i+1)%6], ( 255 ), 3)
# Get the contours
contours, _ = cv.findContours(src, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# Calculate the distances to the contour
raw_dist = np.empty(src.shape, dtype=np.float32)
for i in range(src.shape[0]):
for j in range(src.shape[1]):
raw_dist[i,j] = cv.pointPolygonTest(contours[0], (j,i), True)
minVal, maxVal, _, maxDistPt = cv.minMaxLoc(raw_dist)
minVal = abs(minVal)
maxVal = abs(maxVal)
# Depicting the distances graphically
drawing = np.zeros((src.shape[0], src.shape[1], 3), dtype=np.uint8)
for i in range(src.shape[0]):
for j in range(src.shape[1]):
if raw_dist[i,j] < 0:
drawing[i,j,0] = 255 - abs(raw_dist[i,j]) * 255 / minVal
elif raw_dist[i,j] > 0:
drawing[i,j,2] = 255 - raw_dist[i,j] * 255 / maxVal
else:
drawing[i,j,0] = 255
drawing[i,j,1] = 255
drawing[i,j,2] = 255
cv.circle(drawing,maxDistPt, int(maxVal),255, 1, cv.LINE_8, 0)
dst2 = np.zeros((src.shape[0], src.shape[1], 3), dtype=np.uint8)
cv.cvtColor(src, cv.COLOR_GRAY2BGR, dst2)
CombinedImages = cv.hconcat([dst2, drawing])
cv.imshow(titleWindow, CombinedImages)
cv.waitKey()
| 30.333333
| 72
| 0.626984
|
4196a1bcd558b70f0e0c2c625bdf122ac0a4e41a
| 164
|
py
|
Python
|
blueprints/blue.py
|
Night-Developer/Flask-Simple-Structure
|
84f408c2e56427e45482e4edfab72d1d07214522
|
[
"MIT"
] | 1
|
2021-01-24T03:07:19.000Z
|
2021-01-24T03:07:19.000Z
|
blueprints/blue.py
|
Night-Developer/Flask-Simple-Structure
|
84f408c2e56427e45482e4edfab72d1d07214522
|
[
"MIT"
] | null | null | null |
blueprints/blue.py
|
Night-Developer/Flask-Simple-Structure
|
84f408c2e56427e45482e4edfab72d1d07214522
|
[
"MIT"
] | 1
|
2020-12-04T13:41:01.000Z
|
2020-12-04T13:41:01.000Z
|
from flask import Blueprint
blueprint = Blueprint('blueprint',__name__,template_folder='templates')
@blueprint.route('/')
def show():
return 'Hola blue print'
| 23.428571
| 71
| 0.75
|
a65a525f8c913ccbb2cf7ade73697a2418e79714
| 4,053
|
py
|
Python
|
code_search/function_parser/parsers/java_parser.py
|
novoselrok/codesnippetsearch
|
11310a8bfc9553df86dd98b120306159fd030b28
|
[
"MIT"
] | 70
|
2020-05-13T23:43:25.000Z
|
2022-03-07T07:41:54.000Z
|
code_search/function_parser/parsers/java_parser.py
|
novoselrok/codesnippetsearch
|
11310a8bfc9553df86dd98b120306159fd030b28
|
[
"MIT"
] | 18
|
2020-05-14T13:59:42.000Z
|
2022-02-27T09:37:01.000Z
|
code_search/function_parser/parsers/java_parser.py
|
novoselrok/codesnippetsearch
|
11310a8bfc9553df86dd98b120306159fd030b28
|
[
"MIT"
] | 5
|
2020-05-14T18:13:45.000Z
|
2022-01-03T07:32:33.000Z
|
from typing import List, Dict, Any
from code_search.function_parser.parsers.language_parser import LanguageParser, match_from_span, tokenize_code
from code_search.function_parser.parsers.comment_utils import strip_c_style_comment_delimiters, get_docstring_summary
class JavaParser(LanguageParser):
BLACKLISTED_FUNCTION_NAMES = {'toString', 'hashCode', 'equals', 'finalize', 'notify', 'notifyAll', 'clone'}
@staticmethod
def get_definitions(tree, blob: str) -> List[Dict[str, Any]]:
classes = (node for node in tree.root_node.children if node.type == 'class_declaration')
definitions = []
for class_ in classes:
class_identifier = match_from_span(
[child for child in class_.children if child.type == 'identifier'][0], blob).strip()
for child in (child for child in class_.children if child.type == 'class_body'):
for idx, node in enumerate(child.children):
if node.type == 'method_declaration':
if JavaParser.is_method_body_empty(node):
continue
docstring = ''
if idx - 1 >= 0 and child.children[idx - 1].type == 'comment':
docstring = match_from_span(child.children[idx - 1], blob)
docstring = strip_c_style_comment_delimiters(docstring)
docstring_summary = get_docstring_summary(docstring)
metadata = JavaParser.get_function_metadata(node, blob)
if metadata['identifier'] in JavaParser.BLACKLISTED_FUNCTION_NAMES:
continue
definitions.append({
'type': node.type,
'identifier': '{}.{}'.format(class_identifier, metadata['identifier']),
'parameters': metadata['parameters'],
'function': match_from_span(node, blob),
'function_tokens': tokenize_code(node, blob),
'docstring': docstring,
'docstring_summary': docstring_summary,
'start_point': node.start_point,
'end_point': node.end_point
})
return definitions
@staticmethod
def get_class_metadata(class_node, blob: str) -> Dict[str, str]:
metadata = {
'identifier': '',
'argument_list': '',
}
is_header = False
for n in class_node.children:
if is_header:
if n.type == 'identifier':
metadata['identifier'] = match_from_span(n, blob).strip('(:')
elif n.type == 'argument_list':
metadata['argument_list'] = match_from_span(n, blob)
if n.type == 'class':
is_header = True
elif n.type == ':':
break
return metadata
@staticmethod
def is_method_body_empty(node):
for c in node.children:
if c.type in {'method_body', 'constructor_body'}:
if c.start_point[0] == c.end_point[0]:
return True
@staticmethod
def get_function_metadata(function_node, blob: str) -> Dict[str, str]:
metadata = {
'identifier': '',
'parameters': '',
}
parameters = []
for n in function_node.children:
if n.type == 'identifier':
metadata['identifier'] = match_from_span(n, blob).strip('(')
elif n.type == 'formal_parameters':
for fp_child in n.children:
if fp_child.type == 'formal_parameter':
parameters.append(match_from_span(fp_child, blob))
metadata['parameters'] = ' '.join(parameters)
return metadata
| 45.033333
| 118
| 0.529484
|
978bdea54598603b38b4c5f214b75968d19f8bbe
| 3,160
|
py
|
Python
|
foxylib/tools/function/function_tool.py
|
lbox-kr/foxylib
|
0c3cf236392a6ee639fe919e06dc68cc47812fcd
|
[
"BSD-3-Clause"
] | null | null | null |
foxylib/tools/function/function_tool.py
|
lbox-kr/foxylib
|
0c3cf236392a6ee639fe919e06dc68cc47812fcd
|
[
"BSD-3-Clause"
] | 1
|
2021-05-18T07:08:09.000Z
|
2021-05-18T07:08:09.000Z
|
foxylib/tools/function/function_tool.py
|
lbox-kr/foxylib
|
0c3cf236392a6ee639fe919e06dc68cc47812fcd
|
[
"BSD-3-Clause"
] | null | null | null |
import inspect
import logging
import time
from functools import wraps, reduce
from future.utils import lfilter
from nose.tools import assert_equal
from foxylib.tools.native.class_tool import ClassTool
class FunctionTool:
@classmethod
def returnvalue2func_simple(cls, rv):
return lambda: rv
@classmethod
def returnvalue2func(cls, rv):
def f(*_,**__): return rv
return f
@classmethod
def func2cls(cls, meth):
if inspect.ismethod(meth):
for clazz in inspect.getmro(meth.__self__.__class__):
if clazz.__dict__.get(meth.__name__) is meth:
return clazz
meth = meth.__func__ # fallback to __qualname__ parsing
if inspect.isfunction(meth):
str_path = meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0]
clazz = reduce(getattr, str_path.split('.'), inspect.getmodule(meth),)
if isinstance(clazz, type):
return clazz
return None
@classmethod
def func2name(cls, f): return f.__name__
@classmethod
def func2class_func_name_list(cls, f):
l = []
clazz = FunctionTool.func2cls(f)
if clazz: l.append(ClassTool.cls2name(clazz))
l.append(FunctionTool.func2name(f))
return l
@classmethod
def func2class_func_name(cls, f):
return ".".join(cls.func2class_func_name_list(f))
@classmethod
def wrap2negate(cls, f):
def wrapped(*a, **k):
return not f(*a, **k)
return wrapped
@classmethod
def func2wrapped(cls, f):
def wrapped(*_, **__): return f(*_, **__)
return wrapped
@classmethod
def wrapper2wraps_applied(cls, wrapper_in):
def wrapper(f):
return wraps(f)(cls.func2wrapped(wrapper_in(f)))
return wrapper
@classmethod
def f_args2f_tuple(cls, f_args):
def f_tuple(args, **kwargs):
return f_args(*args, **kwargs)
return f_tuple
@classmethod
def funcs2piped(cls, funcs):
if not funcs: raise Exception()
def f(*args, **kwargs):
v_init = funcs[0](*args, **kwargs)
v = reduce(lambda x, f: f(x), funcs[1:], v_init)
return v
return f
@classmethod
def funcs2f_all(cls, f_list):
def f_all(*_, **__):
return all(f(*_,**__) for f in f_list)
return f_all
@classmethod
def funcs2any(cls, f_list):
def f_any(*_, **__):
return all(f(*_, **__) for f in f_list)
return f_any
@classmethod
def idfun(cls, x):
return x
@classmethod
def func2module_qualname(cls, f):
return tuple([getattr(f, k) for k in ["__module__", "__qualname__"]])
@classmethod
def func2fullpath(cls, f):
return ".".join(cls.func2module_qualname(f))
wrap2negate = FunctionTool.wrap2negate
f_a2t = FunctionTool.f_args2f_tuple
funcs2piped = FunctionTool.funcs2piped
idfun = FunctionTool.idfun
funcs2f_all = FunctionTool.funcs2f_all
rv2f0 = FunctionTool.returnvalue2func_simple
rv2f = FunctionTool.returnvalue2func
| 24.6875
| 83
| 0.616456
|
c08fa755ceae3b80d171e18b562167143c1e735a
| 5,106
|
py
|
Python
|
classifiers/DL_classifier.py
|
LANZhengyang/TSC_gait_analysis
|
1b7b0ac8e2ee202acb06cd999e156941420c294c
|
[
"MIT"
] | null | null | null |
classifiers/DL_classifier.py
|
LANZhengyang/TSC_gait_analysis
|
1b7b0ac8e2ee202acb06cd999e156941420c294c
|
[
"MIT"
] | null | null | null |
classifiers/DL_classifier.py
|
LANZhengyang/TSC_gait_analysis
|
1b7b0ac8e2ee202acb06cd999e156941420c294c
|
[
"MIT"
] | null | null | null |
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import imp
LRP = imp.load_source('LRP', './LRP/__init__.py')
from LRP.lrp import LRP
import copy
class Dataset_torch(Dataset):
def __init__(self, data,with_label=True):
self.with_label = with_label
if self.with_label:
self.data_x, self.data_y = data
else:
self.data_x = data
def __len__(self):
return len(self.data_x)
def __getitem__(self, idx):
if self.with_label:
return self.data_x[idx], self.data_y[idx]
else:
return self.data_x[idx]
class DL_classifier:
def __init__(self,model):
self.model = model
def load(self,model_dir):
self.model = self.model.load_from_checkpoint(model_dir)
return self
def fit(self, x_train, y_train, x_val, y_val, batch_size, earlystopping=False, et_patience=10, max_epochs=50, gpu = [0], default_root_dir=None):
train_set = Dataset_torch([x_train, y_train])
test_set = Dataset_torch([x_val, y_val])
data_loader_train = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size,shuffle=True,num_workers=4)
data_loader_test = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size,shuffle=True,num_workers=4)
if not earlystopping:
self.trainer = pl.Trainer( gpus=gpu,max_epochs= max_epochs, default_root_dir= default_root_dir)
elif earlystopping:
early_stop_callback = EarlyStopping(
monitor='test_loss',
min_delta=0.00,
patience=et_patience,
verbose=True)
self.trainer = pl.Trainer(gpus=gpu,max_epochs= max_epochs, callbacks=[early_stop_callback], default_root_dir=default_root_dir)
self.trainer.fit(self.model, data_loader_train, data_loader_test)
def predict(self, x_pred, batch_size,gpu=[0]):
pred_set = Dataset_torch(x_pred,with_label=False)
data_loader_pred = torch.utils.data.DataLoader(dataset=pred_set, batch_size=batch_size,num_workers=4)
trainer = pl.Trainer(gpus=gpu)
pred = trainer.predict(model=self.model,dataloaders = data_loader_pred)
y_pre = torch.tensor([torch.argmax(i) for i in torch.cat(pred)])
return y_pre
def compute_LRP(self,x_test,y_test, rule='z_rule_no_bias'):
model = self.model
model = model.eval()
lrp = LRP(model.double(), rule=rule)
index_one = np.where(y_test==1)[0]
index_zero = np.where(y_test==0)[0]
pred_set_label_zero = Dataset_torch([x_test[index_zero],y_test[index_zero]])
data_loader_pred_label_zero = torch.utils.data.DataLoader(dataset=pred_set_label_zero, batch_size=1,num_workers=4)
relevance_list_label_zero = []
for num, (image, label) in enumerate(data_loader_pred_label_zero):
relevance_list_label_zero.append(lrp(image).cpu().detach().numpy()[0])
relevance_list_label_zero = np.array(relevance_list_label_zero)
r_mean_label_zero = np.mean(relevance_list_label_zero,axis=0)
d_mean_label_zero = np.mean(x_test[index_zero],axis=0)
max_mean_index_zero = np.max(r_mean_label_zero)
min_mean_index_zero = np.min(r_mean_label_zero)
pred_set_label_one = Dataset_torch((x_test[index_one],y_test[index_one]))
data_loader_pred_label_one = torch.utils.data.DataLoader(dataset=pred_set_label_one, batch_size=1,num_workers=4)
relevance_list_label_one = []
for num, (image, label) in enumerate(data_loader_pred_label_one):
relevance_list_label_one.append(lrp(image).cpu().detach().numpy()[0])
relevance_list_label_one = np.array(relevance_list_label_one)
r_mean_label_one = np.mean(relevance_list_label_one,axis=0)
d_mean_label_one = np.mean(x_test[index_one],axis=0)
max_mean_index_one = np.max(r_mean_label_one)
min_mean_index_one = np.min(r_mean_label_one)
lim_max = np.max([max_mean_index_one,max_mean_index_zero])
lim_min = np.min([min_mean_index_one,min_mean_index_zero])
return relevance_list_label_zero, relevance_list_label_one, r_mean_label_zero, r_mean_label_one, d_mean_label_zero, d_mean_label_one, lim_min, lim_max
| 40.848
| 162
| 0.639444
|
65024524ef5bb3e97b6cfecb4a291cec33e891d3
| 16,924
|
py
|
Python
|
alfworld/agents/agent/vision_dagger_agent.py
|
zhaozj89/alfworld_meta_dqn
|
4ad3ee6e57a6b808d4d90d48f00f14e4e8ec593d
|
[
"MIT"
] | 42
|
2020-10-19T12:18:58.000Z
|
2022-03-11T05:48:03.000Z
|
alfworld/agents/agent/vision_dagger_agent.py
|
zhaozj89/alfworld_meta_dqn
|
4ad3ee6e57a6b808d4d90d48f00f14e4e8ec593d
|
[
"MIT"
] | 23
|
2020-10-26T01:25:36.000Z
|
2022-03-20T21:29:03.000Z
|
alfworld/agents/agent/vision_dagger_agent.py
|
zhaozj89/alfworld_meta_dqn
|
4ad3ee6e57a6b808d4d90d48f00f14e4e8ec593d
|
[
"MIT"
] | 14
|
2020-10-19T12:20:27.000Z
|
2022-02-09T22:58:40.000Z
|
import os
import sys
import copy
import numpy as np
import torch
import torch.nn.functional as F
import alfworld.agents
import alfworld.agents.modules.memory as memory
from alfworld.agents.agent import TextDAggerAgent
from alfworld.agents.modules.generic import to_np, to_pt, _words_to_ids, pad_sequences, preproc, max_len, ez_gather_dim_1, LinearSchedule
from alfworld.agents.modules.layers import NegativeLogLoss, masked_mean, compute_mask
from alfworld.agents.detector.mrcnn import load_pretrained_model
import torchvision.transforms as T
from torchvision import models
from torchvision.ops import boxes as box_ops
class VisionDAggerAgent(TextDAggerAgent):
'''
Vision Agent trained with DAgger
'''
def __init__(self, config):
super().__init__(config)
assert self.action_space == "generation"
self.use_gpu = config['general']['use_cuda']
self.transform = T.Compose([T.ToTensor()])
# choose vision model
self.vision_model_type = config['vision_dagger']['model_type']
self.use_exploration_frame_feats = config['vision_dagger']['use_exploration_frame_feats']
self.sequence_aggregation_method = config['vision_dagger']['sequence_aggregation_method']
# initialize model
if self.vision_model_type in {'resnet'}:
self.detector = models.resnet18(pretrained=True)
self.detector.eval()
if self.use_gpu:
self.detector.cuda()
elif self.vision_model_type in {'maskrcnn', 'maskrcnn_whole'}:
pretrained_model_path = config['mask_rcnn']['pretrained_model_path']
self.mask_rcnn_top_k_boxes = self.config['vision_dagger']['maskrcnn_top_k_boxes']
self.avg2dpool = torch.nn.AvgPool2d((13, 13))
self.detector = load_pretrained_model(pretrained_model_path)
self.detector.roi_heads.register_forward_hook(self.box_features_hook)
self.detection_box_features = []
self.fpn_pooled_features = []
self.detector.eval()
if self.use_gpu:
self.detector.cuda()
elif self.vision_model_type in {"no_vision"}:
print("No Vision Agent")
else:
raise NotImplementedError()
def box_features_hook(self, module, input, output):
'''
hook for extracting features from MaskRCNN
'''
features, proposals, image_shapes, targets = input
box_features = module.box_roi_pool(features, proposals, image_shapes)
box_features = module.box_head(box_features)
class_logits, box_regression = module.box_predictor(box_features)
device = class_logits.device
num_classes = class_logits.shape[-1]
boxes_per_image = [len(boxes_in_image) for boxes_in_image in proposals]
pred_boxes = module.box_coder.decode(box_regression, proposals)
pred_scores = F.softmax(class_logits, -1)
# split boxes and scores per image
pred_boxes = pred_boxes.split(boxes_per_image, 0)
pred_scores = pred_scores.split(boxes_per_image, 0)
all_boxes = []
all_scores = []
all_labels = []
all_keeps = []
for boxes, scores, image_shape in zip(pred_boxes, pred_scores, image_shapes):
boxes = box_ops.clip_boxes_to_image(boxes, image_shape)
# create labels for each prediction
labels = torch.arange(num_classes, device=device)
labels = labels.view(1, -1).expand_as(scores)
# remove predictions with the background label
boxes = boxes[:, 1:]
scores = scores[:, 1:]
labels = labels[:, 1:]
# batch everything, by making every class prediction be a separate instance
boxes = boxes.reshape(-1, 4)
scores = scores.flatten()
labels = labels.flatten()
# remove low scoring boxes
inds = torch.nonzero(scores > module.score_thresh).squeeze(1)
boxes, scores, labels = boxes[inds], scores[inds], labels[inds]
# remove empty boxes
keep = box_ops.remove_small_boxes(boxes, min_size=1e-2)
boxes, scores, labels = boxes[keep], scores[keep], labels[keep]
# non-maximum suppression, independently done per class
keep = box_ops.batched_nms(boxes, scores, labels, module.nms_thresh)
# keep only topk scoring predictions
keep = keep[:self.mask_rcnn_top_k_boxes]
boxes, scores, labels = boxes[keep], scores[keep], labels[keep]
all_boxes.append(boxes)
all_scores.append(scores)
all_labels.append(labels)
all_keeps.append(keep)
box_features_per_image = []
for keep in all_keeps:
box_features_per_image.append(box_features[keep])
self.detection_box_features = box_features_per_image
self.fpn_pooled_features = self.avg2dpool(features['pool']).squeeze(-1).squeeze(-1)
# visual features for state representation
def extract_visual_features(self, images):
with torch.no_grad():
if "resnet" in self.vision_model_type:
image_tensors = [self.transform(i).cuda() if self.use_gpu else self.transform() for i in images]
image_tensors = torch.stack(image_tensors, dim=0)
res_out = self.detector(image_tensors)
res_out_list = [res_out[i].unsqueeze(0) for i in range(res_out.shape[0])]
return res_out_list
elif "maskrcnn" in self.vision_model_type:
image_tensors = [self.transform(i).cuda() if self.use_gpu else self.transform(i) for i in images]
self.detector(image_tensors) # hook writes to self.detection_box_features
if "maskrcnn_whole" in self.vision_model_type:
return [i.unsqueeze(0) for i in self.fpn_pooled_features]
else:
return self.detection_box_features
elif "no_vision" in self.vision_model_type:
batch_size = len(images)
zeros = [torch.zeros((1, 1000)) for _ in range(batch_size)]
if self.use_gpu:
zeros = [z.cuda() for z in zeros]
return zeros
else:
raise NotImplementedError()
# without recurrency
def train_dagger(self):
if len(self.dagger_memory) < self.dagger_replay_batch_size:
return None
transitions = self.dagger_memory.sample(self.dagger_replay_batch_size)
if transitions is None:
return None
batch = memory.dagger_transition(*zip(*transitions))
if self.action_space == "generation":
return self.command_generation_teacher_force(batch.observation_list, batch.task_list, batch.target_list)
else:
raise NotImplementedError()
# with recurrency
def train_dagger_recurrent(self):
if len(self.dagger_memory) < self.dagger_replay_batch_size:
return None
sequence_of_transitions, contains_first_step = self.dagger_memory.sample_sequence(self.dagger_replay_batch_size, self.dagger_replay_sample_history_length)
if sequence_of_transitions is None:
return None
batches = []
for transitions in sequence_of_transitions:
batch = memory.dagger_transition(*zip(*transitions))
batches.append(batch)
if self.action_space == "generation":
return self.command_generation_recurrent_teacher_force([batch.observation_list for batch in batches], [batch.task_list for batch in batches], [batch.target_list for batch in batches], contains_first_step)
else:
raise NotImplementedError()
def command_generation_teacher_force(self, observation_feats, task_desc_strings, target_strings):
input_target_strings = [" ".join(["[CLS]"] + item.split()) for item in target_strings]
output_target_strings = [" ".join(item.split() + ["[SEP]"]) for item in target_strings]
batch_size = len(observation_feats)
aggregated_obs_feat = self.aggregate_feats_seq(observation_feats)
h_obs = self.online_net.vision_fc(aggregated_obs_feat)
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
h_td_mean = self.online_net.masked_mean(h_td, td_mask).unsqueeze(1)
h_obs = h_obs.to(h_td_mean.device)
vision_td = torch.cat((h_obs, h_td_mean), dim=1) # batch x k boxes x hi
vision_td_mask = torch.ones((batch_size, h_obs.shape[1]+h_td_mean.shape[1])).to(h_td_mean.device)
input_target = self.get_word_input(input_target_strings)
ground_truth = self.get_word_input(output_target_strings) # batch x target_length
target_mask = compute_mask(input_target) # mask of ground truth should be the same
pred = self.online_net.vision_decode(input_target, target_mask, vision_td, vision_td_mask, None) # batch x target_length x vocab
batch_loss = NegativeLogLoss(pred * target_mask.unsqueeze(-1), ground_truth, target_mask, smoothing_eps=self.smoothing_eps)
loss = torch.mean(batch_loss)
if loss is None:
return None, None
# Backpropagate
self.online_net.zero_grad()
self.optimizer.zero_grad()
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.online_net.parameters(), self.clip_grad_norm)
self.optimizer.step() # apply gradients
return to_np(pred), to_np(loss)
def command_generation_recurrent_teacher_force(self, seq_observation_feats, seq_task_desc_strings, seq_target_strings, contains_first_step=False):
loss_list = []
previous_dynamics = None
batch_size = len(seq_observation_feats[0])
h_td, td_mask = self.encode(seq_task_desc_strings[0], use_model="online")
h_td_mean = self.online_net.masked_mean(h_td, td_mask).unsqueeze(1)
for step_no in range(self.dagger_replay_sample_history_length):
input_target_strings = [" ".join(["[CLS]"] + item.split()) for item in seq_target_strings[step_no]]
output_target_strings = [" ".join(item.split() + ["[SEP]"]) for item in seq_target_strings[step_no]]
obs = [o.to(h_td.device) for o in seq_observation_feats[step_no]]
aggregated_obs_feat = self.aggregate_feats_seq(obs)
h_obs = self.online_net.vision_fc(aggregated_obs_feat)
vision_td = torch.cat((h_obs, h_td_mean), dim=1) # batch x k boxes x hid
vision_td_mask = torch.ones((batch_size, h_obs.shape[1]+h_td_mean.shape[1])).to(h_td_mean.device)
averaged_vision_td_representation = self.online_net.masked_mean(vision_td, vision_td_mask)
current_dynamics = self.online_net.rnncell(averaged_vision_td_representation, previous_dynamics) if previous_dynamics is not None else self.online_net.rnncell(averaged_vision_td_representation)
input_target = self.get_word_input(input_target_strings)
ground_truth = self.get_word_input(output_target_strings) # batch x target_length
target_mask = compute_mask(input_target) # mask of ground truth should be the same
pred = self.online_net.vision_decode(input_target, target_mask, vision_td, vision_td_mask, current_dynamics) # batch x target_length x vocab
previous_dynamics = current_dynamics
if (not contains_first_step) and step_no < self.dagger_replay_sample_update_from:
previous_dynamics = previous_dynamics.detach()
continue
batch_loss = NegativeLogLoss(pred * target_mask.unsqueeze(-1), ground_truth, target_mask, smoothing_eps=self.smoothing_eps)
loss = torch.mean(batch_loss)
loss_list.append(loss)
loss = torch.stack(loss_list).mean()
if loss is None:
return None
# Backpropagate
self.online_net.zero_grad()
self.optimizer.zero_grad()
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.online_net.parameters(), self.clip_grad_norm)
self.optimizer.step() # apply gradients
return to_np(loss)
def command_generation_greedy_generation(self, observation_feats, task_desc_strings, previous_dynamics):
with torch.no_grad():
batch_size = len(observation_feats)
aggregated_obs_feat = self.aggregate_feats_seq(observation_feats)
h_obs = self.online_net.vision_fc(aggregated_obs_feat)
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
h_td_mean = self.online_net.masked_mean(h_td, td_mask).unsqueeze(1)
h_obs = h_obs.to(h_td_mean.device)
vision_td = torch.cat((h_obs, h_td_mean), dim=1) # batch x k boxes x hid
vision_td_mask = torch.ones((batch_size, h_obs.shape[1]+h_td_mean.shape[1])).to(h_td_mean.device)
if self.recurrent:
averaged_vision_td_representation = self.online_net.masked_mean(vision_td, vision_td_mask)
current_dynamics = self.online_net.rnncell(averaged_vision_td_representation, previous_dynamics) if previous_dynamics is not None else self.online_net.rnncell(averaged_vision_td_representation)
else:
current_dynamics = None
# greedy generation
input_target_list = [[self.word2id["[CLS]"]] for i in range(batch_size)]
eos = np.zeros(batch_size)
for _ in range(self.max_target_length):
input_target = copy.deepcopy(input_target_list)
input_target = pad_sequences(input_target, maxlen=max_len(input_target)).astype('int32')
input_target = to_pt(input_target, self.use_cuda)
target_mask = compute_mask(input_target) # mask of ground truth should be the same
pred = self.online_net.vision_decode(input_target, target_mask, vision_td, vision_td_mask, current_dynamics) # batch x target_length x vocab
# pointer softmax
pred = to_np(pred[:, -1]) # batch x vocab
pred = np.argmax(pred, -1) # batch
for b in range(batch_size):
new_stuff = [pred[b]] if eos[b] == 0 else []
input_target_list[b] = input_target_list[b] + new_stuff
if pred[b] == self.word2id["[SEP]"]:
eos[b] = 1
if np.sum(eos) == batch_size:
break
res = [self.tokenizer.decode(item) for item in input_target_list]
res = [item.replace("[CLS]", "").replace("[SEP]", "").strip() for item in res]
res = [item.replace(" in / on ", " in/on " ) for item in res]
return res, current_dynamics
def get_vision_feat_mask(self, observation_feats):
batch_size = len(observation_feats)
num_vision_feats = [of.shape[0] for of in observation_feats]
max_feat_len = max(num_vision_feats)
mask = torch.zeros((batch_size, max_feat_len))
for b, num_vision_feat in enumerate(num_vision_feats):
mask[b,:num_vision_feat] = 1
return mask
def extract_exploration_frame_feats(self, exploration_frames):
exploration_frame_feats = []
for batch in exploration_frames:
ef_feats = []
for image in batch:
ef_feats.append(self.extract_visual_features([image])[0])
# cat_feats = torch.cat(ef_feats, dim=0)
max_feat_len = max([f.shape[0] for f in ef_feats])
stacked_feats = self.online_net.vision_fc.pad_and_stack(ef_feats, max_feat_len=max_feat_len)
stacked_feats = stacked_feats.view(-1, self.online_net.vision_fc.in_features)
exploration_frame_feats.append(stacked_feats)
return exploration_frame_feats
def aggregate_feats_seq(self, feats):
if self.sequence_aggregation_method == "sum":
return [f.sum(0).unsqueeze(0) for f in feats]
elif self.sequence_aggregation_method == "average":
return [f.mean(0).unsqueeze(0) for f in feats]
elif self.sequence_aggregation_method == "rnn":
max_feat_len = max([f.shape[0] for f in feats])
feats_stack = self.online_net.vision_fc.pad_and_stack(feats, max_feat_len=max_feat_len)
feats_h, feats_c = self.online_net.vision_feat_seq_rnn(feats_stack)
aggregated_feats = feats_h[:,0,:].unsqueeze(1)
return [b for b in aggregated_feats]
else:
raise ValueError("sequence_aggregation_method must be sum, average or rnn")
| 49.197674
| 216
| 0.662018
|
ed2f270089d6bdbe40581e4887d6aa66c7dec163
| 2,461
|
py
|
Python
|
nobel_physics_prizes/src/data/country_utils.py
|
covuworie/nobel-physics-prizes
|
f89a32cd6eb9bbc9119a231bffee89b177ae847a
|
[
"MIT"
] | 3
|
2019-08-21T05:35:42.000Z
|
2020-10-08T21:28:51.000Z
|
nobel_physics_prizes/src/data/country_utils.py
|
covuworie/nobel-physics-prizes
|
f89a32cd6eb9bbc9119a231bffee89b177ae847a
|
[
"MIT"
] | 139
|
2018-09-01T23:15:59.000Z
|
2021-02-02T22:01:39.000Z
|
nobel_physics_prizes/src/data/country_utils.py
|
covuworie/nobel-physics-prizes
|
f89a32cd6eb9bbc9119a231bffee89b177ae847a
|
[
"MIT"
] | null | null | null |
import numpy as np
def nationality_to_alpha2_code(text, nationalities):
"""Create ISO 3166-1 alpha-2 country codes from nationalities.
Use the nationality to find ISO 3166-1 alpha-2
country codes. This function should only be called
for a subset of the places dataframe where country is
not defined and latitude or longitude is not (or
equivalently ISO 3166-1 alpha-2 country code is not
defined).
Args:
text (str): Text containing nationalities.
nationalities (pandas.Dataframe): Dataframe of
nationalities data.
Returns:
`str` or `numpy.nan`: Pipe separated list of ISO 3166-1
alpha-2 country codes if found, otherwise numpy.nan.
"""
if isinstance(text, float):
return np.nan
# try as is
texts_to_check = {text}
# flatten all demonyms in nationalities dataframe and
# try any of those that are found in text
try:
nationality_to_alpha2_code.demonyms
except AttributeError:
nationality_to_alpha2_code.demonyms = np.ravel(
nationalities.drop('ISO 3166 Code', axis=1))
nationality_to_alpha2_code.demonyms = [
str(demonym) for demonym in nationality_to_alpha2_code.demonyms
if str(demonym) != 'nan']
for demonym in nationality_to_alpha2_code.demonyms:
if demonym in text:
texts_to_check.add(demonym)
# remove Ireland or Irish for special case of Northern Ireland or
# Northern Irish
if 'Northern Ireland' in texts_to_check and 'Ireland' in texts_to_check:
texts_to_check.remove('Ireland')
if 'Northern Irish' in texts_to_check and 'Irish' in texts_to_check:
texts_to_check.remove('Irish')
# also try with an 's' on the end
if text.endswith('s'):
texts_to_check.add(text[:-1])
alpha2_codes = set()
for text_to_check in texts_to_check:
# check all columns except ISO 3166 Code
for column in nationalities.columns[1:]:
nationality_present = nationalities[
text_to_check == nationalities[column]]
if not nationality_present.empty:
alpha2_codes.add(
nationality_present[
'ISO 3166 Code'].values[0])
if not alpha2_codes:
return np.nan
alpha2_codes = list(alpha2_codes)
alpha2_codes.sort()
alpha2_codes = '|'.join(alpha2_codes)
return alpha2_codes
| 33.256757
| 76
| 0.659488
|
3069ae4bc391c5f26697df57ae593e34ff755dc2
| 3,356
|
py
|
Python
|
find_and_edit.py
|
shanenoi/Finder
|
017fa83a5d4d468fd513c207b8f21153daba516a
|
[
"Unlicense"
] | 1
|
2020-12-05T06:45:18.000Z
|
2020-12-05T06:45:18.000Z
|
find_and_edit.py
|
shanenoi/Finder
|
017fa83a5d4d468fd513c207b8f21153daba516a
|
[
"Unlicense"
] | 1
|
2020-12-05T05:26:21.000Z
|
2020-12-05T05:26:21.000Z
|
find_and_edit.py
|
shanenoi/Finder
|
017fa83a5d4d468fd513c207b8f21153daba516a
|
[
"Unlicense"
] | null | null | null |
from os import popen, system, path, listdir
from sys import argv, exit
from re import search, findall, sub
DEFAULT_EDITOR = "/usr/bin/vi"
GREEN = u"\u001b[32;1m"
BLUE = u"\u001b[34m"
RESET_ALL = u"\u001b[0m"
def disable_color():
global GREEN
global BLUE
global RESET_ALL
GREEN = ""
BLUE = ""
RESET_ALL = ""
def default_editor(path):
system(f"{DEFAULT_EDITOR} '{path}'")
def default_index_input():
return input("index: ")
class Finder(object):
COMMAND = ""
RESULT = []
index = 0
BREAK = ""
def __init__(self, excludes:list=[]):
self.COMMAND = " ".join([f"| grep -v \"{i}\"" for i in excludes])
def find(self, index_input=default_index_input, editor=default_editor):
command_reader = popen(self.COMMAND)
try:
while (_file:=command_reader.readline()[:-1]):
self.process_result(_file)
except KeyboardInterrupt:
print(self.BREAK)
finally:
return self.result_behaviour(index_input=index_input,
editor=editor)
def result_behaviour(self, index_input, editor):
try:
index = index_input()
if index.isdigit():
index = int(index)
if index in range(len(self.RESULT)):
editor(self.RESULT[index])
except (KeyboardInterrupt, EOFError):
print(self.BREAK)
finally:
return 0
def process_result(self, value):
print(value)
class FindByContent(Finder):
def __init__(self, regex:str, excludes:list=[]):
super().__init__(excludes=excludes)
self.COMMAND = "find . -type f " + self.COMMAND
self.__regex = regex
def process_result(self, value):
lines = open(value, encoding="latin1").read().split("\n")
for num_line, line in enumerate(lines):
result = search(f".{{0,9}}{self.__regex}.{{0,9}}", line)
if result:
print(f"[{self.index:>3}] linenum: {num_line+1} " +
f"of {GREEN+value+RESET_ALL}: " +
f"{BLUE}... {result.group()} ...{RESET_ALL}")
self.RESULT.append(value)
self.index += 1
class FindByName(Finder):
def __init__(self, regex, excludes:list=[]):
self.COMMAND = f"find . -type f | grep -P \"{regex}\"" + self.COMMAND
self.__regex = regex
def process_result(self, value):
result = sub(f"({self.__regex})",BLUE+r"\1"+GREEN,value)
print(f'[{self.index:>3}] {GREEN+result+RESET_ALL}')
self.RESULT.append(value)
self.index += 1
def main(args):
# define it for using this code for vim ext
somethings = None
if args[0] == "n":
somethings = FindByName(args[1], args[2]).find()
elif args[0] == "c":
somethings = FindByContent(args[1], args[2]).find()
return somethings
if __name__ == "__main__":
args = []
if argv.__len__() == 1:
print("Finder was written by Shanenoi!")
exit(0)
args.append(argv[1])
args.append([])
for ele in argv[2:]:
if ele == "-v":
args.append([])
continue
if ele:
args[-1].append(ele)
args[1] = " ".join(args[1])
args.append([])
main(args)
| 26.425197
| 77
| 0.554231
|
5e854bba3f4a3418ad628da8361ce824a62185d9
| 836
|
py
|
Python
|
ask-sdk-model-runtime/ask_sdk_model_runtime/lwa/__init__.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | 90
|
2018-09-19T21:56:42.000Z
|
2022-03-30T11:25:21.000Z
|
ask-sdk-model-runtime/ask_sdk_model_runtime/lwa/__init__.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | 11
|
2018-09-23T12:16:48.000Z
|
2021-06-10T19:49:45.000Z
|
ask-sdk-model-runtime/ask_sdk_model_runtime/lwa/__init__.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | 28
|
2018-09-19T22:30:38.000Z
|
2022-02-22T22:57:07.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
from __future__ import absolute_import
from .access_token_request import AccessTokenRequest
from .access_token_response import AccessTokenResponse
from .access_token import AccessToken
from .error import Error
from .lwa_client import LwaClient
| 36.347826
| 67
| 0.777512
|
d395273cd58442bc1168532d98b1f7b617b45eea
| 2,604
|
py
|
Python
|
t2/test3.py
|
thagd/matias.exe
|
87b5d425bc9a7334179b5fbce9cd5aa41caab302
|
[
"MIT"
] | 2
|
2019-10-31T03:51:49.000Z
|
2019-12-03T00:53:50.000Z
|
t2/test3.py
|
thagd/matias.exe
|
87b5d425bc9a7334179b5fbce9cd5aa41caab302
|
[
"MIT"
] | null | null | null |
t2/test3.py
|
thagd/matias.exe
|
87b5d425bc9a7334179b5fbce9cd5aa41caab302
|
[
"MIT"
] | 3
|
2019-09-03T00:48:16.000Z
|
2019-10-22T17:47:06.000Z
|
#!/usr/bin/env python3
import os
import random
from mytcputils import *
from mytcp import Servidor
class CamadaRede:
def __init__(self):
self.callback = None
self.fila = []
def registrar_recebedor(self, callback):
self.callback = callback
def enviar(self, segmento, dest_addr):
self.fila.append((segmento, dest_addr))
recebido = b''
def dados_recebidos(c, dados):
global recebido
recebido += dados
conexao = None
def conexao_aceita(c):
global conexao
conexao = c
c.registrar_recebedor(dados_recebidos)
rede = CamadaRede()
dst_port = random.randint(10, 1023)
servidor = Servidor(rede, dst_port)
servidor.registrar_monitor_de_conexoes_aceitas(conexao_aceita)
src_port = random.randint(1024, 0xffff)
seq_no = random.randint(0, 0xffff)
src_addr, dst_addr = '10.%d.1.%d'%(random.randint(1, 10), random.randint(0,255)), '10.%d.1.%d'%(random.randint(11, 20), random.randint(0, 255))
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, 0, FLAGS_SYN), src_addr, dst_addr))
segmento, _ = rede.fila[0]
_, _, ack_no, ack, flags, _, _, _ = read_header(segmento)
assert 4*(flags>>12) == len(segmento), 'O SYN+ACK não deveria ter payload'
assert (flags & FLAGS_ACK) == FLAGS_ACK
rede.fila.clear()
seq_no += 1
ack_no += 1
assert ack == seq_no
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK), src_addr, dst_addr))
rede.fila.clear()
payload = os.urandom(MSS)
conexao.enviar(payload)
assert len(rede.fila) == 1
segmento, _ = rede.fila[0]
_, _, seq, ack, flags, _, _, _ = read_header(segmento)
assert seq == ack_no
assert (flags & FLAGS_ACK) == FLAGS_ACK and ack == seq_no
assert segmento[4*(flags>>12):] == payload
ack_no += MSS
rede.fila.clear()
payload = b'hello world'
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK) + payload, src_addr, dst_addr))
seq_no += len(payload)
assert recebido == payload
recebido = b''
rede.fila.clear()
for i in range(5):
nseg = random.randint(2,10)
payload = os.urandom(nseg*MSS)
conexao.enviar(payload)
for j in range(nseg):
segmento, _ = rede.fila.pop(0)
_, _, seq, ack, flags, _, _, _ = read_header(segmento)
assert seq == ack_no
assert (flags & FLAGS_ACK) == FLAGS_ACK and ack == seq_no
assert segmento[4*(flags>>12):] == payload[j*MSS:(j+1)*MSS]
ack_no += MSS
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, ack_no, FLAGS_ACK), src_addr, dst_addr))
| 32.962025
| 143
| 0.700461
|
8837f13b086e995bacb06f0469d46794af55ee66
| 7,797
|
py
|
Python
|
readthedocs/projects/constants.py
|
durwasa-chakraborty/readthedocs.org
|
8a2a95ef3f238e6320061a211f9d59c6c7e5b8bd
|
[
"MIT"
] | 1
|
2021-08-30T08:18:44.000Z
|
2021-08-30T08:18:44.000Z
|
readthedocs/projects/constants.py
|
durwasa-chakraborty/readthedocs.org
|
8a2a95ef3f238e6320061a211f9d59c6c7e5b8bd
|
[
"MIT"
] | null | null | null |
readthedocs/projects/constants.py
|
durwasa-chakraborty/readthedocs.org
|
8a2a95ef3f238e6320061a211f9d59c6c7e5b8bd
|
[
"MIT"
] | 1
|
2021-01-28T19:18:28.000Z
|
2021-01-28T19:18:28.000Z
|
# -*- coding: utf-8 -*-
"""
Project constants.
Default values and other various configuration for projects, including available
theme names and repository types.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import re
from django.utils.translation import ugettext_lazy as _
THEME_DEFAULT = 'default'
THEME_SPHINX = 'sphinxdoc'
THEME_SCROLLS = 'scrolls'
THEME_AGOGO = 'agogo'
THEME_TRADITIONAL = 'traditional'
THEME_NATURE = 'nature'
THEME_HAIKU = 'haiku'
DOCUMENTATION_CHOICES = (
('auto', _('Automatically Choose')),
('sphinx', _('Sphinx Html')),
('mkdocs', _('Mkdocs (Markdown)')),
('sphinx_htmldir', _('Sphinx HtmlDir')),
('sphinx_singlehtml', _('Sphinx Single Page HTML')),
)
DEFAULT_THEME_CHOICES = (
# Translators: This is a name of a Sphinx theme.
(THEME_DEFAULT, _('Default')),
# Translators: This is a name of a Sphinx theme.
(THEME_SPHINX, _('Sphinx Docs')),
# (THEME_SCROLLS, 'Scrolls'),
# (THEME_AGOGO, 'Agogo'),
# Translators: This is a name of a Sphinx theme.
(THEME_TRADITIONAL, _('Traditional')),
# Translators: This is a name of a Sphinx theme.
(THEME_NATURE, _('Nature')),
# Translators: This is a name of a Sphinx theme.
(THEME_HAIKU, _('Haiku')),
)
SAMPLE_FILES = (
('Installation', 'projects/samples/installation.rst.html'),
('Getting started', 'projects/samples/getting_started.rst.html'),
)
SCRAPE_CONF_SETTINGS = [
'copyright',
'project',
'version',
'release',
'source_suffix',
'html_theme',
'extensions',
]
HEADING_MARKUP = (
(1, '='),
(2, '-'),
(3, '^'),
(4, '"'),
)
LIVE_STATUS = 1
DELETED_STATUS = 99
STATUS_CHOICES = (
(LIVE_STATUS, _('Live')),
(DELETED_STATUS, _('Deleted')),
)
REPO_TYPE_GIT = 'git'
REPO_TYPE_SVN = 'svn'
REPO_TYPE_HG = 'hg'
REPO_TYPE_BZR = 'bzr'
REPO_CHOICES = (
(REPO_TYPE_GIT, _('Git')),
(REPO_TYPE_SVN, _('Subversion')),
(REPO_TYPE_HG, _('Mercurial')),
(REPO_TYPE_BZR, _('Bazaar')),
)
PUBLIC = 'public'
PROTECTED = 'protected'
PRIVATE = 'private'
PRIVACY_CHOICES = (
(PUBLIC, _('Public')),
(PROTECTED, _('Protected')),
(PRIVATE, _('Private')),
)
IMPORTANT_VERSION_FILTERS = {
'slug': 'important',
}
# in the future this constant can be replaced with a implementation that
# detect all available Python interpreters in the fly (Maybe using
# update-alternatives linux tool family?).
PYTHON_CHOICES = (
('python', _('CPython 2.x')),
('python3', _('CPython 3.x')),
)
# Via http://sphinx-doc.org/latest/config.html#confval-language
# Languages supported for the lang_slug in the URL
# Translations for builtin Sphinx messages only available for a subset of these
LANGUAGES = (
('aa', 'Afar'),
('ab', 'Abkhaz'),
('af', 'Afrikaans'),
('am', 'Amharic'),
('ar', 'Arabic'),
('as', 'Assamese'),
('ay', 'Aymara'),
('az', 'Azerbaijani'),
('ba', 'Bashkir'),
('be', 'Belarusian'),
('bg', 'Bulgarian'),
('bh', 'Bihari'),
('bi', 'Bislama'),
('bn', 'Bengali'),
('bo', 'Tibetan'),
('br', 'Breton'),
('ca', 'Catalan'),
('co', 'Corsican'),
('cs', 'Czech'),
('cy', 'Welsh'),
('da', 'Danish'),
('de', 'German'),
('dz', 'Dzongkha'),
('el', 'Greek'),
('en', 'English'),
('eo', 'Esperanto'),
('es', 'Spanish'),
('et', 'Estonian'),
('eu', 'Basque'),
('fa', 'Iranian'),
('fi', 'Finnish'),
('fj', 'Fijian'),
('fo', 'Faroese'),
('fr', 'French'),
('fy', 'Western Frisian'),
('ga', 'Irish'),
('gd', 'Scottish Gaelic'),
('gl', 'Galician'),
('gn', 'Guarani'),
('gu', 'Gujarati'),
('ha', 'Hausa'),
('hi', 'Hindi'),
('he', 'Hebrew'),
('hr', 'Croatian'),
('hu', 'Hungarian'),
('hy', 'Armenian'),
('ia', 'Interlingua'),
('id', 'Indonesian'),
('ie', 'Interlingue'),
('ik', 'Inupiaq'),
('is', 'Icelandic'),
('it', 'Italian'),
('iu', 'Inuktitut'),
('ja', 'Japanese'),
('jv', 'Javanese'),
('ka', 'Georgian'),
('kk', 'Kazakh'),
('kl', 'Kalaallisut'),
('km', 'Khmer'),
('kn', 'Kannada'),
('ko', 'Korean'),
('ks', 'Kashmiri'),
('ku', 'Kurdish'),
('ky', 'Kyrgyz'),
('la', 'Latin'),
('ln', 'Lingala'),
('lo', 'Lao'),
('lt', 'Lithuanian'),
('lv', 'Latvian'),
('mg', 'Malagasy'),
('mi', 'Maori'),
('mk', 'Macedonian'),
('ml', 'Malayalam'),
('mn', 'Mongolian'),
('mr', 'Marathi'),
('ms', 'Malay'),
('mt', 'Maltese'),
('my', 'Burmese'),
('na', 'Nauru'),
('ne', 'Nepali'),
('nl', 'Dutch'),
('no', 'Norwegian'),
('oc', 'Occitan'),
('om', 'Oromo'),
('or', 'Oriya'),
('pa', 'Panjabi'),
('pl', 'Polish'),
('ps', 'Pashto'),
('pt', 'Portuguese'),
('qu', 'Quechua'),
('rm', 'Romansh'),
('rn', 'Kirundi'),
('ro', 'Romanian'),
('ru', 'Russian'),
('rw', 'Kinyarwanda'),
('sa', 'Sanskrit'),
('sd', 'Sindhi'),
('sg', 'Sango'),
('si', 'Sinhala'),
('sk', 'Slovak'),
('sl', 'Slovenian'),
('sm', 'Samoan'),
('sn', 'Shona'),
('so', 'Somali'),
('sq', 'Albanian'),
('sr', 'Serbian'),
('ss', 'Swati'),
('st', 'Southern Sotho'),
('su', 'Sudanese'),
('sv', 'Swedish'),
('sw', 'Swahili'),
('ta', 'Tamil'),
('te', 'Telugu'),
('tg', 'Tajik'),
('th', 'Thai'),
('ti', 'Tigrinya'),
('tk', 'Turkmen'),
('tl', 'Tagalog'),
('tn', 'Tswana'),
('to', 'Tonga'),
('tr', 'Turkish'),
('ts', 'Tsonga'),
('tt', 'Tatar'),
('tw', 'Twi'),
('ug', 'Uyghur'),
('uk', 'Ukrainian'),
('ur', 'Urdu'),
('uz', 'Uzbek'),
('vi', 'Vietnamese'),
('vo', 'Volapuk'),
('wo', 'Wolof'),
('xh', 'Xhosa'),
('yi', 'Yiddish'),
('yo', 'Yoruba'),
('za', 'Zhuang'),
('zh', 'Chinese'),
('zu', 'Zulu'),
# Try these to test our non-2 letter language support
('nb_NO', 'Norwegian Bokmal'),
('pt_BR', 'Brazilian Portuguese'),
('uk_UA', 'Ukrainian'),
('zh_CN', 'Simplified Chinese'),
('zh_TW', 'Traditional Chinese'),
)
LANGUAGES_REGEX = '|'.join([re.escape(code[0]) for code in LANGUAGES])
PROGRAMMING_LANGUAGES = (
('words', 'Only Words'),
('py', 'Python'),
('js', 'JavaScript'),
('php', 'PHP'),
('ruby', 'Ruby'),
('perl', 'Perl'),
('java', 'Java'),
('go', 'Go'),
('julia', 'Julia'),
('c', 'C'),
('csharp', 'C#'),
('cpp', 'C++'),
('objc', 'Objective-C'),
('css', 'CSS'),
('ts', 'TypeScript'),
('swift', 'Swift'),
('vb', 'Visual Basic'),
('r', 'R'),
('scala', 'Scala'),
('groovy', 'Groovy'),
('coffee', 'CoffeeScript'),
('lua', 'Lua'),
('haskell', 'Haskell'),
('other', 'Other'),
)
LOG_TEMPLATE = '(Build) [{project}:{version}] {msg}'
PROJECT_PK_REGEX = '(?:[-\w]+)'
PROJECT_SLUG_REGEX = '(?:[-\w]+)'
GITHUB_REGEXS = [
re.compile('github.com/(.+)/(.+)(?:\.git){1}$'),
re.compile('github.com/(.+)/(.+)'),
re.compile('github.com:(.+)/(.+)\.git$'),
]
BITBUCKET_REGEXS = [
re.compile('@bitbucket.org/(.+)/(.+)\.git$'),
re.compile('bitbucket.org/(.+)/(.+)/'),
re.compile('bitbucket.org/(.+)/(.+)'),
re.compile('bitbucket.org:(.+)/(.+)\.git$'),
]
GITLAB_REGEXS = [
re.compile('gitlab.com/(.+)/(.+)(?:\.git){1}$'),
re.compile('gitlab.com/(.+)/(.+)'),
re.compile('gitlab.com:(.+)/(.+)\.git$'),
]
GITHUB_URL = (
'https://github.com/{user}/{repo}/'
'{action}/{version}{docroot}{path}{source_suffix}')
BITBUCKET_URL = (
'https://bitbucket.org/{user}/{repo}/'
'src/{version}{docroot}{path}{source_suffix}')
GITLAB_URL = (
'https://gitlab.com/{user}/{repo}/'
'{action}/{version}{docroot}{path}{source_suffix}')
| 24.442006
| 80
| 0.518276
|
5604b08806f3ac5333bd16ffa3dd09298725c336
| 236
|
py
|
Python
|
Leetcode/0344. Reverse String.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | 1
|
2021-07-15T18:40:26.000Z
|
2021-07-15T18:40:26.000Z
|
Leetcode/0344. Reverse String.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | null | null | null |
Leetcode/0344. Reverse String.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | null | null | null |
class Solution:
def reverseString(self, s: list[str]) -> None:
left = 0
right = len(s) - 1
while left < right:
s[left], s[right] = s[right], s[left]
left, right = left + 1, right + 1
| 26.222222
| 50
| 0.491525
|
abfbb3de1dc0b20f835c187f60dc0768a353041f
| 16,772
|
py
|
Python
|
gym/monitoring/monitor.py
|
leopauly/Observation-Learning-Simulations
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
[
"MIT"
] | 49
|
2017-12-11T11:00:02.000Z
|
2022-03-30T05:19:31.000Z
|
gym/monitoring/monitor.py
|
leopauly/Observation-Learning-Simulations
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
[
"MIT"
] | 2
|
2018-01-01T17:39:56.000Z
|
2019-07-24T04:49:08.000Z
|
gym/monitoring/monitor.py
|
leopauly/Observation-Learning-Simulations
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
[
"MIT"
] | 12
|
2017-12-13T11:52:17.000Z
|
2020-12-03T00:53:29.000Z
|
import atexit
import logging
import json
import numpy as np
import os
import six
import sys
import threading
import weakref
from gym import error, version
from gym.monitoring import stats_recorder, video_recorder
from gym.utils import atomic_write, closer, seeding
logger = logging.getLogger(__name__)
FILE_PREFIX = 'openaigym'
MANIFEST_PREFIX = FILE_PREFIX + '.manifest'
def detect_training_manifests(training_dir, files=None):
if files is None:
files = os.listdir(training_dir)
return [os.path.join(training_dir, f) for f in files if f.startswith(MANIFEST_PREFIX + '.')]
def detect_monitor_files(training_dir):
return [os.path.join(training_dir, f) for f in os.listdir(training_dir) if f.startswith(FILE_PREFIX + '.')]
def clear_monitor_files(training_dir):
files = detect_monitor_files(training_dir)
if len(files) == 0:
return
logger.info('Clearing %d monitor files from previous run (because force=True was provided)', len(files))
for file in files:
os.unlink(file)
def capped_cubic_video_schedule(episode_id):
if episode_id < 1000:
return int(round(episode_id ** (1. / 3))) ** 3 == episode_id
else:
return episode_id % 1000 == 0
def disable_videos(episode_id):
return False
monitor_closer = closer.Closer()
# This method gets used for a sanity check in scoreboard/api.py. It's
# not intended for use outside of the gym codebase.
def _open_monitors():
return list(monitor_closer.closeables.values())
class Monitor(object):
"""A configurable monitor for your training runs.
Every env has an attached monitor, which you can access as
'env.monitor'. Simple usage is just to call 'monitor.start(dir)'
to begin monitoring and 'monitor.close()' when training is
complete. This will record stats and will periodically record a video.
For finer-grained control over how often videos are collected, use the
video_callable argument, e.g.
'monitor.start(video_callable=lambda count: count % 100 == 0)'
to record every 100 episodes. ('count' is how many episodes have completed)
Depending on the environment, video can slow down execution. You
can also use 'monitor.configure(video_callable=lambda count: False)' to disable
video.
Monitor supports multiple threads and multiple processes writing
to the same directory of training data. The data will later be
joined by scoreboard.upload_training_data and on the server.
Args:
env (gym.Env): The environment instance to monitor.
Attributes:
id (Optional[str]): The ID of the monitored environment
"""
def __init__(self, env):
# Python's GC allows refcycles *or* for objects to have a
# __del__ method. So we need to maintain a weakref to env.
#
# https://docs.python.org/2/library/gc.html#gc.garbage
self._env_ref = weakref.ref(env)
self.videos = []
self.stats_recorder = None
self.video_recorder = None
self.enabled = False
self.episode_id = 0
self._monitor_id = None
@property
def env(self):
env = self._env_ref()
if env is None:
raise error.Error("env has been garbage collected. To keep using a monitor, you must keep around a reference to the env object. (HINT: try assigning the env to a variable in your code.)")
return env
def start(self, directory, video_callable=None, force=False, resume=True,
write_upon_reset=False, uid=None, mode=None):
"""Start monitoring.
Args:
directory (str): A per-training run directory where to record stats.
video_callable (Optional[function, False]): function that takes in the index of the episode and outputs a boolean, indicating whether we should record a video on this episode. The default (for video_callable is None) is to take perfect cubes, capped at 1000. False disables video recording.
force (bool): Clear out existing training data from this directory (by deleting every file prefixed with "openaigym.").
resume (bool): Retain the training data already in this directory, which will be merged with our new data
write_upon_reset (bool): Write the manifest file on each reset. (This is currently a JSON file, so writing it is somewhat expensive.)
uid (Optional[str]): A unique id used as part of the suffix for the file. By default, uses os.getpid().
mode (['evaluation', 'training']): Whether this is an evaluation or training episode.
"""
if self.env.spec is None:
logger.warn("Trying to monitor an environment which has no 'spec' set. This usually means you did not create it via 'gym.make', and is recommended only for advanced users.")
env_id = '(unknown)'
else:
env_id = self.env.spec.id
if not os.path.exists(directory):
logger.info('Creating monitor directory %s', directory)
if six.PY3:
os.makedirs(directory, exist_ok=True)
else:
os.makedirs(directory)
if video_callable is None:
video_callable = capped_cubic_video_schedule
elif video_callable == False:
video_callable = disable_videos
elif not callable(video_callable):
raise error.Error('You must provide a function, None, or False for video_callable, not {}: {}'.format(type(video_callable), video_callable))
# Check on whether we need to clear anything
if force:
clear_monitor_files(directory)
elif not resume:
training_manifests = detect_training_manifests(directory)
if len(training_manifests) > 0:
raise error.Error('''Trying to write to monitor directory {} with existing monitor files: {}.
You should use a unique directory for each training run, or use 'force=True' to automatically clear previous monitor files.'''.format(directory, ', '.join(training_manifests[:5])))
self._monitor_id = monitor_closer.register(self)
self.enabled = True
self.directory = os.path.abspath(directory)
# We use the 'openai-gym' prefix to determine if a file is
# ours
self.file_prefix = FILE_PREFIX
self.file_infix = '{}.{}'.format(self._monitor_id, uid if uid else os.getpid())
self.stats_recorder = stats_recorder.StatsRecorder(directory, '{}.episode_batch.{}'.format(self.file_prefix, self.file_infix), autoreset=self.env.metadata.get('semantics.autoreset'), env_id=env_id)
self.configure(video_callable=video_callable)
if not os.path.exists(directory):
os.mkdir(directory)
self.write_upon_reset = write_upon_reset
if mode is not None:
self._set_mode(mode)
def flush(self, force=False):
"""Flush all relevant monitor information to disk."""
if not self.write_upon_reset and not force:
return
self.stats_recorder.flush()
# Give it a very distiguished name, since we need to pick it
# up from the filesystem later.
path = os.path.join(self.directory, '{}.manifest.{}.manifest.json'.format(self.file_prefix, self.file_infix))
logger.debug('Writing training manifest file to %s', path)
with atomic_write.atomic_write(path) as f:
# We need to write relative paths here since people may
# move the training_dir around. It would be cleaner to
# already have the basenames rather than basename'ing
# manually, but this works for now.
json.dump({
'stats': os.path.basename(self.stats_recorder.path),
'videos': [(os.path.basename(v), os.path.basename(m))
for v, m in self.videos],
'env_info': self._env_info(),
}, f)
def close(self):
"""Flush all monitor data to disk and close any open rending windows."""
if not self.enabled:
return
self.stats_recorder.close()
if self.video_recorder is not None:
self._close_video_recorder()
self.flush(force=True)
env = self._env_ref()
# Only take action if the env hasn't been GC'd
if env is not None:
# Note we'll close the env's rendering window even if we did
# not open it. There isn't a particular great way to know if
# we did, since some environments will have a window pop up
# during video recording.
try:
env.render(close=True)
except Exception as e:
if env.spec:
key = env.spec.id
else:
key = env
# We don't want to avoid writing the manifest simply
# because we couldn't close the renderer.
logger.error('Could not close renderer for %s: %s', key, e)
# Remove the env's pointer to this monitor
if hasattr(env, '_monitor'):
del env._monitor
# Stop tracking this for autoclose
monitor_closer.unregister(self._monitor_id)
self.enabled = False
logger.info('''Finished writing results. You can upload them to the scoreboard via gym.upload(%r)''', self.directory)
def configure(self, video_callable=None, mode=None):
"""Reconfigure the monitor.
video_callable (function): Whether to record video to upload to the scoreboard.
mode (['evaluation', 'training']): Whether this is an evaluation or training episode.
"""
if not self.enabled:
raise error.Error('Can only configure an enabled monitor. (HINT: did you already close this monitor?)')
if video_callable is not None:
self.video_callable = video_callable
if mode is not None:
self._set_mode(mode)
def _set_mode(self, mode):
if mode == 'evaluation':
type = 'e'
elif mode == 'training':
type = 't'
else:
raise error.Error('Invalid mode {}: must be "training" or "evaluation"', mode)
self.stats_recorder.type = type
def _before_step(self, action):
if not self.enabled: return
self.stats_recorder.before_step(action)
def _after_step(self, observation, reward, done, info):
if not self.enabled: return done
# Add 1 since about to take another step
if self.env.spec and self.stats_recorder.steps+1 >= self.env.spec.timestep_limit:
logger.debug('Ending episode %i because it reached the timestep limit of %i.', self.episode_id, self.env.spec.timestep_limit)
done = True
# Record stats
self.stats_recorder.after_step(observation, reward, done, info)
# Record video
self.video_recorder.capture_frame()
return done
def _before_reset(self):
if not self.enabled: return
self.stats_recorder.before_reset()
def _after_reset(self, observation):
if not self.enabled: return
# Reset the stat count
self.stats_recorder.after_reset(observation)
# Close any existing video recorder
if self.video_recorder:
self._close_video_recorder()
# Start recording the next video.
#
# TODO: calculate a more correct 'episode_id' upon merge
self.video_recorder = video_recorder.VideoRecorder(
env=self.env,
base_path=os.path.join(self.directory, '{}.video.{}.video{:06}'.format(self.file_prefix, self.file_infix, self.episode_id)),
metadata={'episode_id': self.episode_id},
enabled=self._video_enabled(),
)
self.video_recorder.capture_frame()
# Bump *after* all reset activity has finished
self.episode_id += 1
self.flush()
def _close_video_recorder(self):
self.video_recorder.close()
if self.video_recorder.functional:
self.videos.append((self.video_recorder.path, self.video_recorder.metadata_path))
def _video_enabled(self):
return self.video_callable(self.episode_id)
def _env_info(self):
env_info = {
'gym_version': version.VERSION,
}
if self.env.spec:
env_info['env_id'] = self.env.spec.id
return env_info
def __del__(self):
# Make sure we've closed up shop when garbage collecting
self.close()
def load_env_info_from_manifests(manifests, training_dir):
env_infos = []
for manifest in manifests:
with open(manifest) as f:
contents = json.load(f)
env_infos.append(contents['env_info'])
env_info = collapse_env_infos(env_infos, training_dir)
return env_info
def load_results(training_dir):
if not os.path.exists(training_dir):
logger.error('Training directory %s not found', training_dir)
return
manifests = detect_training_manifests(training_dir)
if not manifests:
logger.error('No manifests found in training directory %s', training_dir)
return
logger.debug('Uploading data from manifest %s', ', '.join(manifests))
# Load up stats + video files
stats_files = []
videos = []
env_infos = []
for manifest in manifests:
with open(manifest) as f:
contents = json.load(f)
# Make these paths absolute again
stats_files.append(os.path.join(training_dir, contents['stats']))
videos += [(os.path.join(training_dir, v), os.path.join(training_dir, m))
for v, m in contents['videos']]
env_infos.append(contents['env_info'])
env_info = collapse_env_infos(env_infos, training_dir)
data_sources, initial_reset_timestamps, timestamps, episode_lengths, episode_rewards, episode_types, initial_reset_timestamp = merge_stats_files(stats_files)
return {
'manifests': manifests,
'env_info': env_info,
'data_sources': data_sources,
'timestamps': timestamps,
'episode_lengths': episode_lengths,
'episode_rewards': episode_rewards,
'episode_types': episode_types,
'initial_reset_timestamps': initial_reset_timestamps,
'initial_reset_timestamp': initial_reset_timestamp,
'videos': videos,
}
def merge_stats_files(stats_files):
timestamps = []
episode_lengths = []
episode_rewards = []
episode_types = []
initial_reset_timestamps = []
data_sources = []
for i, path in enumerate(stats_files):
with open(path) as f:
content = json.load(f)
if len(content['timestamps'])==0: continue # so empty file doesn't mess up results, due to null initial_reset_timestamp
data_sources += [i] * len(content['timestamps'])
timestamps += content['timestamps']
episode_lengths += content['episode_lengths']
episode_rewards += content['episode_rewards']
# Recent addition
episode_types += content.get('episode_types', [])
# Keep track of where each episode came from.
initial_reset_timestamps.append(content['initial_reset_timestamp'])
idxs = np.argsort(timestamps)
timestamps = np.array(timestamps)[idxs].tolist()
episode_lengths = np.array(episode_lengths)[idxs].tolist()
episode_rewards = np.array(episode_rewards)[idxs].tolist()
data_sources = np.array(data_sources)[idxs].tolist()
if episode_types:
episode_types = np.array(episode_types)[idxs].tolist()
else:
episode_types = None
if len(initial_reset_timestamps) > 0:
initial_reset_timestamp = min(initial_reset_timestamps)
else:
initial_reset_timestamp = 0
return data_sources, initial_reset_timestamps, timestamps, episode_lengths, episode_rewards, episode_types, initial_reset_timestamp
# TODO training_dir isn't used except for error messages, clean up the layering
def collapse_env_infos(env_infos, training_dir):
assert len(env_infos) > 0
first = env_infos[0]
for other in env_infos[1:]:
if first != other:
raise error.Error('Found two unequal env_infos: {} and {}. This usually indicates that your training directory {} has commingled results from multiple runs.'.format(first, other, training_dir))
for key in ['env_id', 'gym_version']:
if key not in first:
raise error.Error("env_info {} from training directory {} is missing expected key {}. This is unexpected and likely indicates a bug in gym.".format(first, training_dir, key))
return first
| 39.650118
| 302
| 0.65347
|
98a6f97dc7e5d51b44400e6e081f60e61b196b84
| 257
|
py
|
Python
|
python/main.py
|
katelyn98/MachineLearning
|
bb03dd8a9370c509446ceda17b2b623c6efe7106
|
[
"MIT"
] | null | null | null |
python/main.py
|
katelyn98/MachineLearning
|
bb03dd8a9370c509446ceda17b2b623c6efe7106
|
[
"MIT"
] | 5
|
2020-08-14T02:58:46.000Z
|
2020-08-14T19:31:39.000Z
|
python/main.py
|
katelyn98/MachineLearning
|
bb03dd8a9370c509446ceda17b2b623c6efe7106
|
[
"MIT"
] | 1
|
2021-06-29T19:17:43.000Z
|
2021-06-29T19:17:43.000Z
|
'''
Name: main.py
Description:
author: @katelyn98
'''
import cv2 as cv
import func.processVideo as ppv
font = cv.FONT_HERSHEY_SIMPLEX
if __name__ == '__main__':
cap = cv.VideoCapture(0)
font = cv.FONT_HERSHEY_SIMPLEX
ppv.process(cap, font)
| 15.117647
| 34
| 0.70428
|
e3b58dff4bf5a42e21a8525fe35fa7903cb71f0a
| 1,344
|
py
|
Python
|
Undergraduate/COMP312/Assignment6/Programs/to-pattern.py
|
danielbraithwt/University
|
50c6a904e1c53c03bce9928975607c35fd741e33
|
[
"MIT"
] | null | null | null |
Undergraduate/COMP312/Assignment6/Programs/to-pattern.py
|
danielbraithwt/University
|
50c6a904e1c53c03bce9928975607c35fd741e33
|
[
"MIT"
] | 1
|
2016-12-09T00:17:19.000Z
|
2016-12-09T00:28:42.000Z
|
Undergraduate/COMP312/Assignment6/Programs/to-pattern.py
|
danielbraithwt/University
|
50c6a904e1c53c03bce9928975607c35fd741e33
|
[
"MIT"
] | 1
|
2020-04-23T23:02:31.000Z
|
2020-04-23T23:02:31.000Z
|
data = {'Iris-setosa': [], 'Iris-versicolor': [], 'Iris-virginica': []}
f = file("part1/iris-training.txt")
lines = f.read().split('\n')
f.close()
maxvals = [Float.MIN_VALUE, Float.MIN_VALUE, Float.MIN_VALUE]
minvals = [Float.MAX_VALUE, Float.MAX_VALUE, Float.MAX_VALUE]
for line in lines:
point = line.split(' ')
v = (float(point[0]), float(point[1]), float(point[2]))
for i in range(3):
if point[i] > maxvals[i]:
maxvals[i] = point[i]
if point[i] < minvals[i]:
minvals[i] = point[i]
c = point[3]
data[c].append([c, v])
# Compute the ranges of the values
r = [0, 0, 0]
for i in range(3):
r[i] = maxvals[i] - minvals[i]
# Normalise the data
i = 0
for d in data:
for pattern in d:
ov = pattern[1]
nv = (ov[0]/r[0], ov[1]/r[1], ov[2]/r[2])
pattern[1] = nv
total_patterns = 0
for d in data:
total_patterns += len(d)
#output = file('part1/iris-training.pat')
#output.write('SNNS pattern definition file V3.2\n')
#output.write('generated at Mon Apr 25 17:47:12 1994\n')
#output.write('\n\n')
#output.write('No. of patterns : ' + total_patterns)
#output.write('No. of input units : ' + 3)
#output.write('No. of output units : ' + 3)
#output.write('\n')
#i = 0
#for d in data:
# for pattern in d:
# i += 1
# output.write('# Input pattern ' + i + ':\n')
#output.write('# Input pattern ')
#output.close()
| 19.764706
| 71
| 0.620536
|
2d75e99dbc31eafa2535f4145b4c6d1ee877ebae
| 18,200
|
py
|
Python
|
tests/test_models.py
|
odinuge/django-push-notifications
|
03ef4318aafc5135490c12b38979307859ba97ef
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
odinuge/django-push-notifications
|
03ef4318aafc5135490c12b38979307859ba97ef
|
[
"MIT"
] | 1
|
2021-06-25T15:16:55.000Z
|
2021-06-25T15:16:55.000Z
|
tests/test_models.py
|
gagantrivedi/django-push-notifications
|
e51692b79be370d66a8e7b67c5c95ec27c5662ab
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import json
from django.test import TestCase
from django.utils import timezone
from push_notifications.gcm import GCMError, send_bulk_message
from push_notifications.models import APNSDevice, GCMDevice
from . import responses
from ._mock import mock
class GCMModelTestCase(TestCase):
def _create_devices(self, devices):
for device in devices:
GCMDevice.objects.create(registration_id=device, cloud_message_type="GCM")
def _create_fcm_devices(self, devices):
for device in devices:
GCMDevice.objects.create(registration_id=device, cloud_message_type="FCM")
def test_can_save_gcm_device(self):
device = GCMDevice.objects.create(
registration_id="a valid registration id", cloud_message_type="GCM"
)
assert device.id is not None
assert device.date_created is not None
assert device.date_created.date() == timezone.now().date()
def test_can_create_save_device(self):
device = APNSDevice.objects.create(registration_id="a valid registration id")
assert device.id is not None
assert device.date_created is not None
assert device.date_created.date() == timezone.now().date()
def test_gcm_send_message(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="GCM")
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON
) as p:
device.send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"data": {"message": "Hello world"},
"registration_ids": ["abc"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_gcm_send_message_extra(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="GCM")
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON
) as p:
device.send_message("Hello world", extra={"foo": "bar"}, collapse_key="test_key")
p.assert_called_once_with(
json.dumps({
"collapse_key": "test_key",
"data": {"message": "Hello world", "foo": "bar"},
"registration_ids": ["abc"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_gcm_send_message_collapse_key(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="GCM")
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON
) as p:
device.send_message("Hello world", collapse_key="test_key")
p.assert_called_once_with(
json.dumps({
"data": {"message": "Hello world"},
"registration_ids": ["abc"],
"collapse_key": "test_key"
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_gcm_send_message_to_multiple_devices(self):
self._create_devices(["abc", "abc1"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON_MULTIPLE
) as p:
GCMDevice.objects.all().send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"data": {"message": "Hello world"},
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_gcm_send_message_active_devices(self):
GCMDevice.objects.create(registration_id="abc", active=True, cloud_message_type="GCM")
GCMDevice.objects.create(registration_id="xyz", active=False, cloud_message_type="GCM")
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON_MULTIPLE
) as p:
GCMDevice.objects.all().send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"data": {"message": "Hello world"},
"registration_ids": ["abc"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_gcm_send_message_collapse_to_multiple_devices(self):
self._create_devices(["abc", "abc1"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON_MULTIPLE
) as p:
GCMDevice.objects.all().send_message("Hello world", collapse_key="test_key")
p.assert_called_once_with(
json.dumps({
"collapse_key": "test_key",
"data": {"message": "Hello world"},
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_gcm_send_message_to_single_device_with_error(self):
# these errors are device specific, device.active will be set false
devices = ["abc", "abc1"]
self._create_devices(devices)
errors = [
responses.GCM_JSON_ERROR_NOTREGISTERED,
responses.GCM_JSON_ERROR_INVALIDREGISTRATION
]
for index, error in enumerate(errors):
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=error):
device = GCMDevice.objects.get(registration_id=devices[index])
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id=devices[index]).active is False
def test_gcm_send_message_to_single_device_with_error_mismatch(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="GCM")
with mock.patch(
"push_notifications.gcm._gcm_send",
return_value=responses.GCM_JSON_ERROR_MISMATCHSENDERID
):
# these errors are not device specific, GCMError should be thrown
with self.assertRaises(GCMError):
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id="abc").active is True
def test_gcm_send_message_to_multiple_devices_with_error(self):
self._create_devices(["abc", "abc1", "abc2"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON_MULTIPLE_ERROR
):
devices = GCMDevice.objects.all()
devices.send_message("Hello World")
assert not GCMDevice.objects.get(registration_id="abc").active
assert GCMDevice.objects.get(registration_id="abc1").active
assert not GCMDevice.objects.get(registration_id="abc2").active
def test_gcm_send_message_to_multiple_devices_with_error_b(self):
self._create_devices(["abc", "abc1", "abc2"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON_MULTIPLE_ERROR_B
):
devices = GCMDevice.objects.all()
with self.assertRaises(GCMError):
devices.send_message("Hello World")
assert GCMDevice.objects.get(registration_id="abc").active is True
assert GCMDevice.objects.get(registration_id="abc1").active is True
assert GCMDevice.objects.get(registration_id="abc2").active is False
def test_gcm_send_message_to_multiple_devices_with_canonical_id(self):
self._create_devices(["foo", "bar"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON_MULTIPLE_CANONICAL_ID
):
GCMDevice.objects.all().send_message("Hello World")
assert not GCMDevice.objects.filter(registration_id="foo").exists()
assert GCMDevice.objects.filter(registration_id="bar").exists()
assert GCMDevice.objects.filter(registration_id="NEW_REGISTRATION_ID").exists() is True
def test_gcm_send_message_to_single_user_with_canonical_id(self):
old_registration_id = "foo"
self._create_devices([old_registration_id])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=responses.GCM_JSON_CANONICAL_ID
):
GCMDevice.objects.get(registration_id=old_registration_id).send_message("Hello World")
assert not GCMDevice.objects.filter(registration_id=old_registration_id).exists()
assert GCMDevice.objects.filter(registration_id="NEW_REGISTRATION_ID").exists()
def test_gcm_send_message_to_same_devices_with_canonical_id(self):
first_device = GCMDevice.objects.create(
registration_id="foo", active=True, cloud_message_type="GCM"
)
second_device = GCMDevice.objects.create(
registration_id="bar", active=False, cloud_message_type="GCM"
)
with mock.patch(
"push_notifications.gcm._gcm_send",
return_value=responses.GCM_JSON_CANONICAL_ID_SAME_DEVICE
):
GCMDevice.objects.all().send_message("Hello World")
assert first_device.active is True
assert second_device.active is False
def test_gcm_send_message_with_no_reg_ids(self):
self._create_devices(["abc", "abc1"])
with mock.patch("push_notifications.gcm._cm_send_request", return_value="") as p:
GCMDevice.objects.filter(registration_id="xyz").send_message("Hello World")
p.assert_not_called()
with mock.patch("push_notifications.gcm._cm_send_request", return_value="") as p:
reg_ids = [obj.registration_id for obj in GCMDevice.objects.all()]
send_bulk_message(reg_ids, {"message": "Hello World"}, "GCM")
p.assert_called_once_with(
[u"abc", u"abc1"], {"message": "Hello World"}, cloud_type="GCM", application_id=None
)
def test_fcm_send_message(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON
) as p:
device.send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"notification": {"body": "Hello world"},
"registration_ids": ["abc"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_fcm_send_message_extra_data(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON
) as p:
device.send_message("Hello world", extra={"foo": "bar"})
p.assert_called_once_with(
json.dumps({
"data": {"foo": "bar"},
"notification": {"body": "Hello world"},
"registration_ids": ["abc"],
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json",
application_id=None
)
def test_fcm_send_message_extra_options(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON
) as p:
device.send_message("Hello world", collapse_key="test_key", foo="bar")
p.assert_called_once_with(
json.dumps({
"collapse_key": "test_key",
"notification": {"body": "Hello world"},
"registration_ids": ["abc"],
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json",
application_id=None
)
def test_fcm_send_message_extra_notification(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON
) as p:
device.send_message("Hello world", extra={"icon": "test_icon"}, title="test")
p.assert_called_once_with(
json.dumps({
"notification": {"body": "Hello world", "title": "test", "icon": "test_icon"},
"registration_ids": ["abc"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_fcm_send_message_extra_options_and_notification_and_data(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON
) as p:
device.send_message(
"Hello world",
extra={"foo": "bar", "icon": "test_icon"},
title="test",
collapse_key="test_key"
)
p.assert_called_once_with(
json.dumps({
"notification": {"body": "Hello world", "title": "test", "icon": "test_icon"},
"data": {"foo": "bar"},
"registration_ids": ["abc"],
"collapse_key": "test_key"
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_fcm_send_message_to_multiple_devices(self):
self._create_fcm_devices(["abc", "abc1"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON_MULTIPLE
) as p:
GCMDevice.objects.all().send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"notification": {"body": "Hello world"},
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_fcm_send_message_active_devices(self):
GCMDevice.objects.create(registration_id="abc", active=True, cloud_message_type="FCM")
GCMDevice.objects.create(registration_id="xyz", active=False, cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON_MULTIPLE
) as p:
GCMDevice.objects.all().send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"notification": {"body": "Hello world"},
"registration_ids": ["abc"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_fcm_send_message_collapse_to_multiple_devices(self):
self._create_fcm_devices(["abc", "abc1"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON_MULTIPLE
) as p:
GCMDevice.objects.all().send_message("Hello world", collapse_key="test_key")
p.assert_called_once_with(
json.dumps({
"collapse_key": "test_key",
"notification": {"body": "Hello world"},
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"),
"application/json", application_id=None
)
def test_fcm_send_message_to_single_device_with_error(self):
# these errors are device specific, device.active will be set false
devices = ["abc", "abc1"]
self._create_fcm_devices(devices)
errors = [
responses.GCM_JSON_ERROR_NOTREGISTERED,
responses.GCM_JSON_ERROR_INVALIDREGISTRATION
]
for index, error in enumerate(errors):
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=error):
device = GCMDevice.objects.get(registration_id=devices[index])
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id=devices[index]).active is False
def test_fcm_send_message_to_single_device_with_error_mismatch(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send",
return_value=responses.GCM_JSON_ERROR_MISMATCHSENDERID
):
# these errors are not device specific, GCMError should be thrown
with self.assertRaises(GCMError):
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id="abc").active is True
def test_fcm_send_message_to_multiple_devices_with_error(self):
self._create_fcm_devices(["abc", "abc1", "abc2"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON_MULTIPLE_ERROR
):
devices = GCMDevice.objects.all()
devices.send_message("Hello World")
assert not GCMDevice.objects.get(registration_id="abc").active
assert GCMDevice.objects.get(registration_id="abc1").active
assert not GCMDevice.objects.get(registration_id="abc2").active
def test_fcm_send_message_to_multiple_devices_with_error_b(self):
self._create_fcm_devices(["abc", "abc1", "abc2"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON_MULTIPLE_ERROR_B
):
devices = GCMDevice.objects.all()
with self.assertRaises(GCMError):
devices.send_message("Hello World")
assert GCMDevice.objects.get(registration_id="abc").active is True
assert GCMDevice.objects.get(registration_id="abc1").active is True
assert GCMDevice.objects.get(registration_id="abc2").active is False
def test_fcm_send_message_to_multiple_devices_with_canonical_id(self):
self._create_fcm_devices(["foo", "bar"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON_MULTIPLE_CANONICAL_ID
):
GCMDevice.objects.all().send_message("Hello World")
assert not GCMDevice.objects.filter(registration_id="foo").exists()
assert GCMDevice.objects.filter(registration_id="bar").exists()
assert GCMDevice.objects.filter(registration_id="NEW_REGISTRATION_ID").exists() is True
def test_fcm_send_message_to_single_user_with_canonical_id(self):
old_registration_id = "foo"
self._create_fcm_devices([old_registration_id])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=responses.GCM_JSON_CANONICAL_ID
):
GCMDevice.objects.get(registration_id=old_registration_id).send_message("Hello World")
assert not GCMDevice.objects.filter(registration_id=old_registration_id).exists()
assert GCMDevice.objects.filter(registration_id="NEW_REGISTRATION_ID").exists()
def test_fcm_send_message_to_same_devices_with_canonical_id(self):
first_device = GCMDevice.objects.create(
registration_id="foo", active=True, cloud_message_type="FCM"
)
second_device = GCMDevice.objects.create(
registration_id="bar", active=False, cloud_message_type="FCM"
)
with mock.patch(
"push_notifications.gcm._fcm_send",
return_value=responses.GCM_JSON_CANONICAL_ID_SAME_DEVICE
):
GCMDevice.objects.all().send_message("Hello World")
assert first_device.active is True
assert second_device.active is False
def test_fcm_send_message_with_no_reg_ids(self):
self._create_fcm_devices(["abc", "abc1"])
with mock.patch("push_notifications.gcm._cm_send_request", return_value="") as p:
GCMDevice.objects.filter(registration_id="xyz").send_message("Hello World")
p.assert_not_called()
with mock.patch("push_notifications.gcm._cm_send_request", return_value="") as p:
reg_ids = [obj.registration_id for obj in GCMDevice.objects.all()]
send_bulk_message(reg_ids, {"message": "Hello World"}, "FCM")
p.assert_called_once_with(
[u"abc", u"abc1"], {"message": "Hello World"}, cloud_type="FCM",
application_id=None
)
def test_can_save_wsn_device(self):
device = GCMDevice.objects.create(registration_id="a valid registration id")
self.assertIsNotNone(device.pk)
self.assertIsNotNone(device.date_created)
self.assertEqual(device.date_created.date(), timezone.now().date())
| 39.055794
| 92
| 0.73989
|
74baf400afd4e79e8d9f3b7cc866fe41d3a105a0
| 8,770
|
py
|
Python
|
imix/utils/distributed_info.py
|
linxi1158/iMIX
|
af87a17275f02c94932bb2e29f132a84db812002
|
[
"Apache-2.0"
] | 23
|
2021-06-26T08:45:19.000Z
|
2022-03-02T02:13:33.000Z
|
imix/utils/distributed_info.py
|
XChuanLee/iMIX
|
99898de97ef8b45462ca1d6bf2542e423a73d769
|
[
"Apache-2.0"
] | null | null | null |
imix/utils/distributed_info.py
|
XChuanLee/iMIX
|
99898de97ef8b45462ca1d6bf2542e423a73d769
|
[
"Apache-2.0"
] | 9
|
2021-06-10T02:36:20.000Z
|
2021-11-09T02:18:16.000Z
|
import enum
import functools
import pickle
import numpy as np
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
G_NCCL = 'nccl'
G_GLOO = 'gloo'
G_CUDA = 'cuda'
class DistributedStatus(enum.Enum):
AVAILABLE = 1
INITIALIZED = 2
AVAILABLE_AND_INITIALIZED = 3
NO_AVAILABLE_INITIALIZED = 4
def get_dist_status():
dist_package_status = dist.is_available()
pg_init_status = dist.is_initialized()
if dist_package_status and pg_init_status:
return DistributedStatus.AVAILABLE_AND_INITIALIZED
else:
if dist_package_status:
return DistributedStatus.AVAILABLE
elif pg_init_status:
return DistributedStatus.INITIALIZED
else:
return DistributedStatus.NO_AVAILABLE_INITIALIZED
def get_world_size() -> int:
status = get_dist_status()
if status is DistributedStatus.AVAILABLE_AND_INITIALIZED:
return dist.get_world_size()
if status in (DistributedStatus.AVAILABLE, DistributedStatus.INITIALIZED):
return 1
def get_rank() -> int:
status = get_dist_status()
if status is DistributedStatus.AVAILABLE_AND_INITIALIZED:
return dist.get_rank()
if status in (DistributedStatus.AVAILABLE, DistributedStatus.INITIALIZED):
return 0
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
status = get_dist_status()
if status in (DistributedStatus.AVAILABLE, DistributedStatus.INITIALIZED):
return 0
assert _LOCAL_PROCESS_GROUP is not None
if status is DistributedStatus.AVAILABLE_AND_INITIALIZED:
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
status = get_dist_status()
if status in (DistributedStatus.AVAILABLE, DistributedStatus.INITIALIZED):
return 0
assert _LOCAL_PROCESS_GROUP is not None
if status is DistributedStatus.AVAILABLE_AND_INITIALIZED:
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def master_only_run(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if is_main_process():
return func(*args, **kwargs)
return wrapper
def synchronize() -> None:
"""Helper function to synchronize (barrier) among all processes when using
distributed training."""
status = get_dist_status()
if status in (DistributedStatus.AVAILABLE, DistributedStatus.INITIALIZED):
return
num_processes = dist.get_world_size()
if num_processes == 1:
return
else:
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""Return a process group based on gloo backend, containing all the ranks
The result is cached."""
if dist.get_backend() == G_NCCL:
return dist.new_group(backend=G_GLOO)
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group): # serialize2tensor -> is_serialize
backend = dist.get_backend(group=group)
assert backend in [G_GLOO, G_NCCL]
device = torch.device('cpu' if backend is G_GLOO else 'cuda')
bytes_data = pickle.dumps(data)
b2s = torch.ByteStorage.from_buffer(bytes_data)
s2t = torch.ByteTensor(b2s).to(device=device)
return s2t
def _pad_to_largest_tensor(tensor: torch.Tensor, group) -> tuple:
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
if world_size < 1:
raise Exception('dist.gather/all_gather must be called from ranks with in the given group', world_size)
dtype = torch.int64
device = tensor.device
local_tensor_size = torch.tensor([tensor.numel()], dtype=dtype, device=device)
tensor_sizes = [torch.zeros([1], dtype=dtype, device=device) for _ in range(world_size)]
dist.all_gather(tensor_sizes, local_tensor_size, group=group)
tensor_sizes = [int(size.item()) for size in tensor_sizes]
max_tensor_size = max(tensor_sizes)
if local_tensor_size != max_tensor_size:
pad_size = max_tensor_size - local_tensor_size
pad = torch.zeros((pad_size, ), dtype=torch.uint8, device=device)
tensor = torch.cat((tensor, pad), dim=0)
return tensor, tensor_sizes
@functools.lru_cache()
def is_single_processes(group=None) -> bool:
if get_world_size() == 1:
return True
else:
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return True
else:
return False
def all_gather(data, group=None) -> list:
"""Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if is_single_processes(group):
return [data]
if group is None:
group = _get_global_gloo_group()
tensor = _serialize_to_tensor(data, group)
tensor, tensor_sizes = _pad_to_largest_tensor(tensor, group)
max_tensor_size = max(tensor_sizes)
tensor_list = [torch.empty((max_tensor_size, ), dtype=torch.uint8, device=tensor.device) for _ in tensor_sizes]
dist.all_gather(tensor_list, tensor, group=group)
datum = []
for length, tensor in zip(tensor_sizes, tensor_list):
single_data = tensor.cpu().numpy().tobytes()
single_data = single_data[:length]
datum.append(pickle.loads(single_data))
return datum
def gather(data, *, dst_rank=0, group=None) -> list:
"""Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if is_single_processes(group=group):
return [data]
if group is None:
group = _get_global_gloo_group()
tensor = _serialize_to_tensor(data, group)
tensor, tensor_sizes = _pad_to_largest_tensor(tensor, group)
if dist.get_rank(group=group) == dst_rank:
max_tensor_size = max(tensor_sizes)
tensor_list = [torch.empty((max_tensor_size, ), dtype=torch.uint8, device=tensor.device) for _ in tensor_sizes]
dist.gather(tensor, tensor_list, dst=dst_rank, group=group)
datum = []
for length, tensor in zip(tensor_sizes, tensor_list):
single_data = tensor.cpu().numpy().tobytes()
single_data = single_data[:length]
datum.append(pickle.loads(single_data))
return datum
else:
dist.gather(tensor, [], dst=dst_rank, group=group)
return []
def shared_random_seed(low=2**31, select_idx=0) -> int:
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
random_ints = np.random.randint(low)
all_random_ints = all_gather(random_ints)
if len(all_random_ints) < select_idx:
return all_random_ints[0]
else:
return all_random_ints[select_idx]
def reduce_dict(input_dict: dict, is_average: bool = True) -> dict:
"""Reduce the values in the dictionary from all processes so that process
with rank 0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
is_average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
values = torch.stack(list(input_dict.values()), dim=0)
dist.reduce(values, dst=0)
if is_main_process() and is_average:
values /= world_size
output_dict = {k: v for k, v in zip(input_dict.keys(), values)}
return output_dict
| 31.209964
| 119
| 0.677651
|
3948f97cb3dda43a6ebb9a53685ab3955b4254e8
| 19,067
|
py
|
Python
|
edb/pgsql/ast.py
|
ambv/edgedb
|
83a2a4fac2d9dce9b609ddb786331ff431339062
|
[
"Apache-2.0"
] | 4
|
2020-04-25T13:52:13.000Z
|
2020-09-23T19:14:07.000Z
|
edb/pgsql/ast.py
|
ambv/edgedb
|
83a2a4fac2d9dce9b609ddb786331ff431339062
|
[
"Apache-2.0"
] | null | null | null |
edb/pgsql/ast.py
|
ambv/edgedb
|
83a2a4fac2d9dce9b609ddb786331ff431339062
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import enum
import dataclasses
import typing
import uuid
from edb.common import ast
from edb.common import typeutils
from edb.edgeql import ast as qlast
from edb.ir import ast as irast
# The structure of the nodes mostly follows that of Postgres'
# parsenodes.h and primnodes.h, but only with fields that are
# relevant to parsing and code generation.
#
# Certain nodes have EdgeDB-specific fields used by the
# compiler.
class Base(ast.AST):
def __repr__(self):
return f'<pg.{self.__class__.__name__} at 0x{id(self):x}>'
class ImmutableBase(ast.ImmutableASTMixin, Base):
pass
class Alias(ImmutableBase):
"""Alias for a range variable."""
aliasname: str # aliased relation name
colnames: typing.List[str] # optional list of column aliases
class Keyword(ImmutableBase):
"""An SQL keyword that must be output without quoting."""
name: str # Keyword name
class Star(Base):
"""'*' representing all columns of a table or compound field."""
class BaseExpr(Base):
"""Any non-statement expression node that returns a value."""
__ast_meta__ = {'nullable'}
nullable: bool # Whether the result can be NULL.
ser_safe: bool = False # Whether the expr is serialization-safe.
def __init__(self, *, nullable: typing.Optional[bool]=None,
**kwargs) -> None:
nullable = self._is_nullable(kwargs, nullable)
super().__init__(nullable=nullable, **kwargs)
def _is_nullable(self, kwargs: typing.Dict[str, object],
nullable: typing.Optional[bool]) -> bool:
if nullable is None:
default = type(self).get_field('nullable').default
if default is not None:
nullable = default
else:
nullable = self._infer_nullability(kwargs)
return nullable
def _infer_nullability(self, kwargs: typing.Dict[str, object]) -> bool:
nullable = False
for v in kwargs.values():
if typeutils.is_container(v):
items = typing.cast(typing.Iterable, v)
nullable = all(getattr(vv, 'nullable', False) for vv in items)
elif getattr(v, 'nullable', None):
nullable = True
if nullable:
break
return nullable
class ImmutableBaseExpr(BaseExpr, ImmutableBase):
pass
class OutputVar(ImmutableBaseExpr):
"""A base class representing expression output address."""
pass
class EdgeQLPathInfo(Base):
"""A general mixin providing EdgeQL-specific metadata on certain nodes."""
# Ignore the below fields in AST visitor/transformer.
__ast_meta__ = {
'path_scope', 'path_outputs', 'path_id', 'is_distinct',
'path_id_mask', 'path_namespace'
}
# The path id represented by the node.
path_id: irast.PathId
# Whether the node represents a distinct set.
is_distinct: bool = True
# A subset of paths necessary to perform joining.
path_scope: typing.Set[irast.PathId]
# Map of res target names corresponding to paths.
path_outputs: typing.Dict[typing.Tuple[irast.PathId, str], OutputVar]
path_id_mask: typing.Set[irast.PathId]
# Map of col refs corresponding to paths.
path_namespace: typing.Dict[typing.Tuple[irast.PathId, str], BaseExpr]
class BaseRangeVar(ImmutableBaseExpr):
"""Range variable, used in FROM clauses."""
__ast_meta__ = {'schema_object_id'}
alias: Alias
#: The id of the schema object this rvar represents
schema_object_id: typing.Optional[uuid.UUID] = None
class BaseRelation(EdgeQLPathInfo, BaseExpr):
name: str
nullable: bool # Whether the result can be NULL.
class Relation(BaseRelation):
"""Regular relation."""
catalogname: str
schemaname: str
class CommonTableExpr(Base):
# Query name (unqualified)
name: str
# Whether the result can be NULL.
nullable: bool
# Optional list of column names
aliascolnames: list
# The CTE query
query: Query
# True if this CTE is recursive
recursive: bool
def __repr__(self):
return (
f'<pg.{self.__class__.__name__} '
f'name={self.name!r} at 0x{id(self):x}>'
)
class PathRangeVar(BaseRangeVar):
#: The IR TypeRef this rvar represents (if any).
typeref: typing.Optional[irast.TypeRef]
@property
def query(self) -> BaseRelation:
raise NotImplementedError
class RelRangeVar(PathRangeVar):
"""Relation range variable, used in FROM clauses."""
relation: typing.Union[BaseRelation, CommonTableExpr]
include_inherited: bool = True
@property
def query(self) -> BaseRelation:
if isinstance(self.relation, CommonTableExpr):
return self.relation.query
else:
return self.relation
class IntersectionRangeVar(PathRangeVar):
component_rvars: typing.List[PathRangeVar]
class TypeName(ImmutableBase):
"""Type in definitions and casts."""
name: typing.Tuple[str, ...] # Type name
setof: bool # SET OF?
typmods: list # Type modifiers
array_bounds: list # Array bounds
class ColumnRef(OutputVar):
"""Specifies a reference to a column."""
# Column name list.
name: typing.List[typing.Union[str, Star]]
# Whether the col is an optional path bond (i.e accepted when NULL)
optional: bool
def __repr__(self):
if hasattr(self, 'name'):
return (
f'<pg.{self.__class__.__name__} '
f'name={".".join(self.name)!r} at 0x{id(self):x}>'
)
else:
return super().__repr__()
class TupleElementBase(ImmutableBase):
path_id: irast.PathId
name: typing.Optional[typing.Union[OutputVar, str]]
def __init__(self, path_id: irast.PathId,
name: typing.Optional[typing.Union[OutputVar, str]]=None):
self.path_id = path_id
self.name = name
def __repr__(self):
return f'<{self.__class__.__name__} ' \
f'name={self.name} path_id={self.path_id}>'
class TupleElement(TupleElementBase):
val: BaseExpr
def __init__(self, path_id: irast.PathId, val: BaseExpr, *,
name: typing.Optional[typing.Union[OutputVar, str]]=None):
super().__init__(path_id, name)
self.val = val
def __repr__(self):
return f'<{self.__class__.__name__} ' \
f'name={self.name} val={self.val} path_id={self.path_id}>'
class TupleVarBase(OutputVar):
elements: typing.Sequence[TupleElementBase]
named: bool
nullable: bool
typeref: typing.Optional[irast.TypeRef]
def __init__(self, elements: typing.List[TupleElementBase], *,
named: bool=False, nullable: bool=False,
typeref: typing.Optional[irast.TypeRef]=None):
self.elements = elements
self.named = named
self.nullable = nullable
self.typeref = typeref
def __repr__(self):
return f'<{self.__class__.__name__} [{self.elements!r}]'
class TupleVar(TupleVarBase):
elements: typing.Sequence[TupleElement]
def __init__(self, elements: typing.List[TupleElement], *,
named: bool=False, nullable: bool=False,
typeref: typing.Optional[irast.TypeRef]=None):
self.elements = elements
self.named = named
self.nullable = nullable
self.typeref = typeref
class BaseParamRef(ImmutableBaseExpr):
pass
class ParamRef(BaseParamRef):
"""Query parameter ($0..$n)."""
# Number of the parameter.
number: int
class NamedParamRef(BaseParamRef):
"""Named query parameter."""
name: str
class ResTarget(ImmutableBaseExpr):
"""Query result target."""
# Column name (optional)
name: str
# subscripts, field names and '*'
indirection: list
# value expression to compute
val: BaseExpr
class UpdateTarget(ImmutableBaseExpr):
"""Query update target."""
# column name (optional)
name: str
# value expression to assign
val: BaseExpr
class InferClause(ImmutableBaseExpr):
# IndexElems to infer unique index
index_elems: list
# Partial-index predicate
where_clause: BaseExpr
# Constraint name
conname: str
class OnConflictClause(ImmutableBaseExpr):
action: str
infer: InferClause
target_list: list
where: BaseExpr
class ReturningQuery(BaseRelation):
target_list: typing.List[ResTarget]
class NullRelation(ReturningQuery):
"""Special relation that produces nulls for all its attributes."""
where_clause: BaseExpr
@dataclasses.dataclass(frozen=True)
class Param:
#: postgres' variable index
index: int
#: whether parameter is required
required: bool
class Query(ReturningQuery):
"""Generic superclass representing a query."""
# Ignore the below fields in AST visitor/transformer.
__ast_meta__ = {'ptr_join_map', 'path_rvar_map',
'view_path_id_map', 'argnames', 'nullable'}
view_path_id_map: typing.Dict[irast.PathId, irast.PathId]
# Map of RangeVars corresponding to pointer relations.
ptr_join_map: dict
# Map of RangeVars corresponding to paths.
path_rvar_map: typing.Dict[typing.Tuple[irast.PathId, str], PathRangeVar]
argnames: typing.Dict[str, Param]
ctes: typing.List[CommonTableExpr]
@property
def ser_safe(self):
return all(t.ser_safe for t in self.target_list)
class DMLQuery(Query):
"""Generic superclass for INSERT/UPDATE/DELETE statements."""
# Target relation to perform the operation on.
relation: PathRangeVar
# List of expressions returned
returning_list: typing.List[ResTarget]
@property
def target_list(self):
return self.returning_list
class InsertStmt(DMLQuery):
# (optional) list of target column names
cols: typing.List[ColumnRef]
# source SELECT/VALUES or None
select_stmt: Query
# ON CONFLICT clause
on_conflict: OnConflictClause
class UpdateStmt(DMLQuery):
# The UPDATE target list
targets: typing.List[UpdateTarget]
# WHERE clause
where_clause: BaseExpr
# optional FROM clause
from_clause: typing.List[BaseRangeVar]
class DeleteStmt(DMLQuery):
# WHERE clause
where_clause: BaseExpr
# optional USING clause
using_clause: typing.List[BaseRangeVar]
class SelectStmt(Query):
# List of DISTINCT ON expressions, empty list for DISTINCT ALL
distinct_clause: list
# The target list
target_list: typing.List[ResTarget]
# The FROM clause
from_clause: typing.List[BaseRangeVar]
# The WHERE clause
where_clause: BaseExpr
# GROUP BY clauses
group_clause: typing.List[Base]
# HAVING expression
having: BaseExpr
# WINDOW window_name AS(...),
window_clause: typing.List[Base]
# List of ImplicitRow's in a VALUES query
values: typing.List[Base]
# ORDER BY clause
sort_clause: typing.List[SortBy]
# OFFSET expression
limit_offset: typing.Optional[BaseExpr]
# LIMIT expression
limit_count: typing.Optional[BaseExpr]
# FOR UPDATE clause
locking_clause: list
# Set operation type
op: str
# ALL modifier
all: bool
# Left operand of set op
larg: Query
# Right operand of set op,
rarg: Query
class ExprKind(enum.IntEnum):
OP = enum.auto()
class Expr(ImmutableBaseExpr):
"""Infix, prefix, and postfix expressions."""
# Operator kind
kind: ExprKind
# Possibly-qualified name of operator
name: str
# Left argument, if any
lexpr: BaseExpr
# Right argument, if any
rexpr: BaseExpr
class BaseConstant(ImmutableBaseExpr):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if not isinstance(self, NullConstant) and self.val is None:
raise ValueError('cannot create a pgast.Constant without a value')
class StringConstant(BaseConstant):
"""A literal string constant."""
# Constant value
val: str
class NullConstant(BaseConstant):
"""A NULL constant."""
nullable: bool = True
class ByteaConstant(BaseConstant):
"""An bytea string."""
val: bytes
class NumericConstant(BaseConstant):
val: str
class BooleanConstant(BaseConstant):
val: str
class LiteralExpr(ImmutableBaseExpr):
"""A literal expression."""
# Expression text
expr: str
class TypeCast(ImmutableBaseExpr):
"""A CAST expression."""
# Expression being casted.
arg: BaseExpr
# Target type.
type_name: TypeName
class CollateClause(ImmutableBaseExpr):
"""A COLLATE expression."""
# Input expression
arg: BaseExpr
# Possibly-qualified collation name
collname: str
class VariadicArgument(ImmutableBaseExpr):
expr: BaseExpr
nullable: bool = False
class ColumnDef(ImmutableBase):
# name of column
name: str
# type of column
typename: TypeName
# default value, if any
default_expr: BaseExpr
# COLLATE clause, if any
coll_clause: BaseExpr
class FuncCall(ImmutableBaseExpr):
# Function name
name: typing.Tuple[str, ...]
# List of arguments
args: typing.List[BaseExpr]
# ORDER BY
agg_order: typing.List[BaseExpr]
# FILTER clause
agg_filter: BaseExpr
# Argument list is '*'
agg_star: bool
# Arguments were labeled DISTINCT
agg_distinct: bool
# OVER clause, if any
over: typing.Optional[WindowDef]
# WITH ORDINALITY
with_ordinality: bool = False
# list of ColumnDef nodes to describe result of
# the function returning RECORD.
coldeflist: typing.List[ColumnDef]
def __init__(self, *, nullable: typing.Optional[bool]=None,
null_safe: bool=False, **kwargs) -> None:
"""Function call node.
@param null_safe:
Specifies whether this function is guaranteed
to never return NULL on non-NULL input.
"""
if nullable is None and not null_safe:
nullable = True
super().__init__(nullable=nullable, **kwargs)
class NamedFuncArg(ImmutableBaseExpr):
name: str
val: BaseExpr
class Indices(ImmutableBase):
"""Array subscript or slice bounds."""
# True, if slice
is_slice: bool
# Lower bound, if any
lidx: BaseExpr
# Upper bound if any
ridx: BaseExpr
class Indirection(ImmutableBaseExpr):
"""Field and/or array element indirection."""
# Indirection subject
arg: BaseExpr
# Subscripts and/or field names and/or '*'
indirection: list
class ArrayExpr(ImmutableBaseExpr):
"""ARRAY[] construct."""
# array element expressions
elements: typing.List[BaseExpr]
class MultiAssignRef(ImmutableBase):
"""UPDATE (a, b, c) = row-valued-expr."""
# row-valued expression
source: BaseExpr
# list of columns to assign to
columns: typing.List[ColumnRef]
class SortBy(ImmutableBase):
"""ORDER BY clause element."""
# expression to sort on
node: BaseExpr
# ASC/DESC/USING/default
dir: qlast.SortOrder
# NULLS FIRST/LAST
nulls: qlast.NonesOrder
class WindowDef(ImmutableBase):
"""WINDOW and OVER clauses."""
# window name
name: str
# referenced window name, if any
refname: str
# PARTITION BY expr list
partition_clause: typing.List[BaseExpr]
# ORDER BY
order_clause: typing.List[SortBy]
# Window frame options
frame_options: list
# expression for starting bound, if any
start_offset: BaseExpr
# expression for ending ound, if any
end_offset: BaseExpr
class RangeSubselect(PathRangeVar):
"""Subquery appearing in FROM clauses."""
lateral: bool
subquery: Query
@property
def query(self):
return self.subquery
class RangeFunction(BaseRangeVar):
lateral: bool
# WITH ORDINALITY
with_ordinality: bool = False
# ROWS FROM form
is_rowsfrom: bool
functions: typing.List[FuncCall]
class JoinExpr(BaseRangeVar):
# Type of join
type: str
# Left subtree
larg: BaseExpr
# Right subtree
rarg: BaseExpr
# USING clause, if any
using_clause: typing.List[BaseExpr]
# Qualifiers on join, if any
quals: BaseExpr
def copy(self):
result = self.__class__()
result.copyfrom(self)
return result
def copyfrom(self, other):
self.larg = other.larg
self.rarg = other.rarg
self.quals = other.quals
self.type = other.type
class SubLinkType(enum.IntEnum):
EXISTS = enum.auto()
ALL = enum.auto()
ANY = enum.auto()
class SubLink(ImmutableBaseExpr):
"""Subselect appearing in an expression."""
# Type of sublink
type: SubLinkType
# Sublink expression
expr: BaseExpr
# Sublink is never NULL
nullable: bool = False
class RowExpr(ImmutableBaseExpr):
"""A ROW() expression."""
# The fields.
args: typing.List[BaseExpr]
# Row expressions, while may contain NULLs, are not NULL themselves.
nullable: bool = False
class ImplicitRowExpr(ImmutableBaseExpr):
"""A (a, b, c) expression."""
# The fields.
args: typing.List[BaseExpr]
# Row expressions, while may contain NULLs, are not NULL themselves.
nullable: bool = False
class CoalesceExpr(ImmutableBaseExpr):
"""A COALESCE() expression."""
# The arguments.
args: typing.List[Base]
class NullTest(ImmutableBaseExpr):
"""IS [NOT] NULL."""
# Input expression,
arg: BaseExpr
# NOT NULL?
negated: bool
# NullTest is never NULL
nullable: bool = False
class CaseWhen(ImmutableBase):
# Condition expression
expr: BaseExpr
# subsitution result
result: BaseExpr
class CaseExpr(ImmutableBaseExpr):
# Equality comparison argument
arg: BaseExpr
# List of WHEN clauses
args: typing.List[CaseWhen]
# ELSE clause
defresult: BaseExpr
SortAsc = qlast.SortAsc
SortDesc = qlast.SortDesc
SortDefault = qlast.SortDefault
NullsFirst = qlast.NonesFirst
NullsLast = qlast.NonesLast
class AlterSystem(ImmutableBaseExpr):
name: str
value: BaseExpr
class Set(ImmutableBaseExpr):
name: str
value: BaseExpr
| 23.395092
| 78
| 0.662401
|
ef1999a6b83954b35066a0e78f3a05a961ac62df
| 2,123
|
py
|
Python
|
config.py
|
tryonelove/tvv-vk-bot
|
2fda3f32f88303ced64388cd43f08bdccfe82ee4
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
tryonelove/tvv-vk-bot
|
2fda3f32f88303ced64388cd43f08bdccfe82ee4
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
tryonelove/tvv-vk-bot
|
2fda3f32f88303ced64388cd43f08bdccfe82ee4
|
[
"Apache-2.0"
] | null | null | null |
import os
CREATOR_ID = 236965366
GROUP_ID_TEST = os.getenv("GROUP_ID_TEST")
GROUP_ID = os.getenv("GROUP_ID")
API_KEY_TEST = os.getenv("API_KEY_TEST")
API_KEY = os.getenv("API_KEY")
OSU_API_KEY = os.getenv("OSU_API_KEY")
OSU_MATCHMAKING_KEY = os.getenv("OSU_MATCHMAKING_KEY")
RESTRICTED_HIGHLIGHTS = ["@all", "@online", "@тут", "@все"]
DATABASE_INIT = """
CREATE TABLE IF NOT EXISTS "users" (
"id" INTEGER,
"name" TEXT,
"server" TEXT,
"username" TEXT,
"role" INTEGER DEFAULT 1,
PRIMARY KEY("id")
);
CREATE TABLE IF NOT EXISTS "users_experience" (
"chat_id" INTEGER,
"user_id" INTEGER,
"experience" FLOAT DEFAULT 0,
"level" INTEGER DEFAULT 1,
FOREIGN KEY(user_id) REFERENCES "users"("id")
);
CREATE TABLE IF NOT EXISTS "osu" (
"id" INTEGER NOT NULL UNIQUE,
"main_server" TEXT,
"bancho_username" TEXT,
"gatari_username" TEXT,
FOREIGN KEY("id") REFERENCES "users"("id") ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS "weather" (
"id" INTEGER NOT NULL UNIQUE,
"city" TEXT,
FOREIGN KEY("id") REFERENCES "users"("id") ON UPDATE CASCADE
);
CREATE TABLE IF NOT EXISTS "donators" (
"id" int,
"expires" INTEGER,
"role" text,
FOREIGN KEY("id") REFERENCES "users"("id") ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS "commands" (
"key" INTEGER NOT NULL UNIQUE,
"message" TEXT,
"attachment" TEXT,
"author" INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS "beatmapsets" (
"beatmapset_id" INTEGER NOT NULL UNIQUE,
"artist" TEXT,
"title" TEXT,
"background_url" TEXT,
PRIMARY KEY("beatmapset_id")
);
CREATE TABLE IF NOT EXISTS "beatmaps" (
"beatmapset_id" INTEGER,
"beatmap_id" INTEGER,
"version" TEXT,
"max_combo" INTEGER,
PRIMARY KEY("beatmap_id"),
FOREIGN KEY("beatmapset_id") REFERENCES "beatmapsets"("beatmapset_id")
);
"""
| 29.082192
| 86
| 0.601507
|
605239e2228af3029e32e4164141a85ebc90bbbc
| 3,444
|
py
|
Python
|
waterfall/api/v2/views/workflows.py
|
xiaoquqi/waterfall
|
c023df8fc4376d93136458dfc4ca956a7249e509
|
[
"Apache-2.0"
] | null | null | null |
waterfall/api/v2/views/workflows.py
|
xiaoquqi/waterfall
|
c023df8fc4376d93136458dfc4ca956a7249e509
|
[
"Apache-2.0"
] | null | null | null |
waterfall/api/v2/views/workflows.py
|
xiaoquqi/waterfall
|
c023df8fc4376d93136458dfc4ca956a7249e509
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from waterfall.api import common
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = "workflows"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, workflows, workflow_count=None):
"""Show a list of workflows without many details."""
return self._list_view(self.summary, request, workflows,
workflow_count)
def detail_list(self, request, workflows, workflow_count=None):
"""Detailed view of a list of workflows."""
return self._list_view(self.detail, request, workflows,
workflow_count,
self._collection_name + '/detail')
def summary(self, request, workflow):
"""Generic, non-detailed view of a workflow."""
return {
'workflow': {
'id': workflow['id'],
'resource_type': workflow['resource_type'],
},
}
def detail(self, request, workflow):
"""Detailed view of a single workflow."""
workflow_ref = {
'workflow': {
'id': workflow.get('id'),
'resource_type': workflow.get('resource_type'),
'payload': workflow.get('payload'),
'created_at': workflow.get('created_at'),
'updated_at': workflow.get('updated_at'),
'user_id': workflow.get('user_id'),
}
}
return workflow_ref
def _list_view(self, func, request, workflows, workflow_count,
coll_name=_collection_name):
"""Provide a view for a list of workflows.
:param func: Function used to format the workflow data
:param request: API request
:param workflows: List of workflows in dictionary format
:param workflow_count: Length of the original list of workflows
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: Workflow data in dictionary format
"""
workflows_list = [func(request, workflow)['workflow'] for workflow in workflows]
workflows_links = self._get_collection_links(request,
workflows,
coll_name,
workflow_count)
workflows_dict = dict(workflows=workflows_list)
if workflows_links:
workflows_dict['workflows_links'] = workflows_links
return workflows_dict
| 37.846154
| 88
| 0.601626
|
f251f894edfec3b7a73c89ae4666e2c0b3a1949f
| 64
|
py
|
Python
|
py/start/helloworld.py
|
zhongwei/ztodo
|
fef4f24e65fb8d571c6c13e6f82d842023e7a8e1
|
[
"CC0-1.0"
] | 1
|
2015-09-22T08:28:27.000Z
|
2015-09-22T08:28:27.000Z
|
py/start/helloworld.py
|
zhongwei/ztodo
|
fef4f24e65fb8d571c6c13e6f82d842023e7a8e1
|
[
"CC0-1.0"
] | null | null | null |
py/start/helloworld.py
|
zhongwei/ztodo
|
fef4f24e65fb8d571c6c13e6f82d842023e7a8e1
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
print "Hello, World!";
| 12.8
| 22
| 0.65625
|
e023b51f25a491e63141cad8c4e4b15c945de5bf
| 743
|
py
|
Python
|
tasbot/__init__.py
|
TurBoss/tasbot3
|
c29a22f98aa8044a415f79d83f620ed6ef7ce000
|
[
"WTFPL"
] | null | null | null |
tasbot/__init__.py
|
TurBoss/tasbot3
|
c29a22f98aa8044a415f79d83f620ed6ef7ce000
|
[
"WTFPL"
] | null | null | null |
tasbot/__init__.py
|
TurBoss/tasbot3
|
c29a22f98aa8044a415f79d83f620ed6ef7ce000
|
[
"WTFPL"
] | null | null | null |
"""tasbot module docstring"""
__version__ = (1, 0, 0)
from main import MainApp as DefaultApp
import sys
#pretty sure there's buitins for this but I couldn't find them
def _greater(a, b):
return cmp(a, b) > 0
def _less(a, b):
return cmp(a, b) < 0
def _compare(vtuple, op):
for i in range(len(vtuple)):
if op(__version__[i], vtuple[i]):
return False
return True
def check_min_version(vtuple):
if not _compare(vtuple, _less):
print('tasbot version %s does not match minimum requirement %s' %
(str(__version__), str(vtuple)))
sys.exit(1)
def check_max_version(vtuple):
if not _compare(vtuple, _greater):
print('tasbot version %s exceeds maximum requirement %s' %
(str(__version__), str(vtuple)))
sys.exit(1)
| 20.081081
| 67
| 0.694482
|
ba9538726e16f8c986fc5ef048aafcc4ed7129d8
| 14,900
|
py
|
Python
|
src/ezdxf/path/path.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
src/ezdxf/path/path.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
src/ezdxf/path/path.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020-2021, Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, List, Iterable
from collections import abc
import warnings
from ezdxf.math import (
Vec3, NULLVEC, OCS, Bezier3P, Bezier4P, Matrix44,
ConstructionEllipse, BSpline, has_clockwise_orientation,
)
from ezdxf.entities import LWPolyline, Polyline, Spline
from .commands import Command, LineTo, Curve3To, Curve4To, AnyCurve, PathElement
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex, Ellipse, Arc, Circle
__all__ = ['Path']
MAX_DISTANCE = 0.01
MIN_SEGMENTS = 4
G1_TOL = 1e-4
class Path(abc.Sequence):
__slots__ = ('_start', '_commands')
def __init__(self, start: 'Vertex' = NULLVEC):
self._start = Vec3(start)
self._commands: List[PathElement] = []
def __len__(self) -> int:
return len(self._commands)
def __getitem__(self, item) -> PathElement:
return self._commands[item]
def __iter__(self) -> Iterable[PathElement]:
return iter(self._commands)
def __copy__(self) -> 'Path':
""" Returns a new copy of :class:`Path` with shared immutable data. """
copy = Path(self._start)
# immutable data
copy._commands = list(self._commands)
return copy
clone = __copy__
@property
def start(self) -> Vec3:
""" :class:`Path` start point, resetting the start point of an empty
path is possible.
"""
return self._start
@start.setter
def start(self, location: 'Vertex') -> None:
if len(self._commands):
raise ValueError('Requires an empty path.')
else:
self._start = Vec3(location)
@property
def end(self) -> Vec3:
""" :class:`Path` end point. """
if self._commands:
return self._commands[-1].end
else:
return self._start
@property
def is_closed(self) -> bool:
""" Returns ``True`` if the start point is close to the end point. """
return self._start.isclose(self.end)
@property
def has_lines(self) -> bool:
""" Returns ``True`` if the path has any line segments. """
return any(cmd.type == Command.LINE_TO for cmd in self._commands)
@property
def has_curves(self) -> bool:
""" Returns ``True`` if the path has any curve segments. """
return any(cmd.type in AnyCurve for cmd in self._commands)
def has_clockwise_orientation(self) -> bool:
""" Returns ``True`` if 2D path has clockwise orientation, ignores
z-axis of all control vertices.
"""
return has_clockwise_orientation(self.control_vertices())
def line_to(self, location: 'Vertex') -> None:
""" Add a line from actual path end point to `location`.
"""
self._commands.append(LineTo(end=Vec3(location)))
def curve3_to(self, location: 'Vertex', ctrl: 'Vertex') -> None:
""" Add a quadratic Bèzier-curve from actual path end point to
`location`, `ctrl` is the control point for the quadratic Bèzier-curve.
"""
self._commands.append(Curve3To(end=Vec3(location), ctrl=Vec3(ctrl)))
def curve4_to(self, location: 'Vertex', ctrl1: 'Vertex',
ctrl2: 'Vertex') -> None:
""" Add a cubic Bèzier-curve from actual path end point to `location`,
`ctrl1` and `ctrl2` are the control points for the cubic Bèzier-curve.
"""
self._commands.append(Curve4To(
end=Vec3(location), ctrl1=Vec3(ctrl1), ctrl2=Vec3(ctrl2))
)
curve_to = curve4_to # TODO: 2021-01-30, remove compatibility alias
def close(self) -> None:
""" Close path by adding a line segment from the end point to the start
point.
"""
if not self.is_closed:
self.line_to(self.start)
def reversed(self) -> 'Path':
""" Returns a new :class:`Path` with reversed segments and control
vertices.
"""
if len(self) == 0:
return Path(self.start)
path = Path(start=self.end)
for index in range(len(self) - 1, -1, -1):
cmd = self[index]
if index > 0:
prev_end = self[index - 1].end
else:
prev_end = self.start
if cmd.type == Command.LINE_TO:
path.line_to(prev_end)
elif cmd.type == Command.CURVE3_TO:
path.curve3_to(prev_end, cmd.ctrl)
elif cmd.type == Command.CURVE4_TO:
path.curve4_to(prev_end, cmd.ctrl2, cmd.ctrl1)
return path
def clockwise(self) -> 'Path':
""" Returns new :class:`Path` in clockwise orientation. """
if self.has_clockwise_orientation():
return self.clone()
else:
return self.reversed()
def counter_clockwise(self) -> 'Path':
""" Returns new :class:`Path` in counter-clockwise orientation. """
if self.has_clockwise_orientation():
return self.reversed()
else:
return self.clone()
def approximate(self, segments: int = 20) -> Iterable[Vec3]:
""" Approximate path by vertices, `segments` is the count of
approximation segments for each Bézier curve.
Does not yield any vertices for empty paths, where only a start point
is present!
"""
def approx_curve3(s, c, e) -> Iterable[Vec3]:
return Bezier3P((s, c, e)).approximate(segments)
def approx_curve4(s, c1, c2, e) -> Iterable[Vec3]:
return Bezier4P((s, c1, c2, e)).approximate(segments)
yield from self._approximate(approx_curve3, approx_curve4)
def flattening(self, distance: float,
segments: int = 16) -> Iterable[Vec3]:
""" Approximate path by vertices and use adaptive recursive flattening
to approximate Bèzier curves. The argument `segments` is the
minimum count of approximation segments for each curve, if the distance
from the center of the approximation segment to the curve is bigger than
`distance` the segment will be subdivided.
Does not yield any vertices for empty paths, where only a start point
is present!
Args:
distance: maximum distance from the center of the curve to the
center of the line segment between two approximation points to
determine if a segment should be subdivided.
segments: minimum segment count per Bézier curve
"""
def approx_curve3(s, c, e) -> Iterable[Vec3]:
return Bezier3P((s, c, e)).flattening(distance, segments)
def approx_curve4(s, c1, c2, e) -> Iterable[Vec3]:
return Bezier4P((s, c1, c2, e)).flattening(distance, segments)
yield from self._approximate(approx_curve3, approx_curve4)
def _approximate(self, approx_curve3, approx_curve4) -> Iterable[Vec3]:
if not self._commands:
return
start = self._start
yield start
for cmd in self._commands:
end_location = cmd.end
if cmd.type == Command.LINE_TO:
yield end_location
elif cmd.type == Command.CURVE3_TO:
pts = iter(
approx_curve3(start, cmd.ctrl, end_location)
)
next(pts) # skip first vertex
yield from pts
elif cmd.type == Command.CURVE4_TO:
pts = iter(
approx_curve4(start, cmd.ctrl1, cmd.ctrl2, end_location)
)
next(pts) # skip first vertex
yield from pts
else:
raise ValueError(f'Invalid command: {cmd.type}')
start = end_location
def transform(self, m: 'Matrix44') -> 'Path':
""" Returns a new transformed path.
Args:
m: transformation matrix of type :class:`~ezdxf.math.Matrix44`
"""
new_path = self.__class__(m.transform(self.start))
for cmd in self._commands:
if cmd.type == Command.LINE_TO:
new_path.line_to(m.transform(cmd.end))
elif cmd.type == Command.CURVE3_TO:
loc, ctrl = m.transform_vertices(
(cmd.end, cmd.ctrl)
)
new_path.curve3_to(loc, ctrl)
elif cmd.type == Command.CURVE4_TO:
loc, ctrl1, ctrl2 = m.transform_vertices(
(cmd.end, cmd.ctrl1, cmd.ctrl2)
)
new_path.curve4_to(loc, ctrl1, ctrl2)
else:
raise ValueError(f'Invalid command: {cmd.type}')
return new_path
def to_wcs(self, ocs: OCS, elevation: float):
""" Transform path from given `ocs` to WCS coordinates inplace. """
self._start = ocs.to_wcs(self._start.replace(z=elevation))
for i, cmd in enumerate(self._commands):
self._commands[i] = cmd.to_wcs(ocs, elevation)
def add_curves(self, curves: Iterable[Bezier4P]) -> None:
""" Add multiple cubic Bèzier-curves to the path.
.. deprecated:: 0.15.3
replaced by factory function :func:`add_bezier4p`
"""
warnings.warn(
'use tool function add_bezier4p(),'
'will be removed in v0.17.', DeprecationWarning)
from .tools import add_bezier4p
add_bezier4p(self, curves)
def add_bezier3p(self, curves: Iterable[Bezier3P]) -> None:
""" Add multiple quadratic Bèzier-curves to the path.
"""
warnings.warn(
'use tool function add_bezier3p(),'
'will be removed in v0.17.', DeprecationWarning)
from .tools import add_bezier3p
add_bezier3p(self, curves)
def add_ellipse(self, ellipse: ConstructionEllipse, segments=1,
reset=True) -> None:
""" Add an elliptical arc as multiple cubic Bèzier-curves
.. deprecated:: 0.15.3
replaced by factory function :func:`add_ellipse`
"""
warnings.warn(
'use tool function add_ellipse(),'
'will be removed in v0.17.', DeprecationWarning)
from .tools import add_ellipse
add_ellipse(self, ellipse, segments, reset)
def add_spline(self, spline: BSpline, level=4, reset=True) -> None:
""" Add a B-spline as multiple cubic Bèzier-curves.
.. deprecated:: 0.15.3
replaced by factory function :func:`add_spline`
"""
warnings.warn(
'use tool function add_spline(),'
'will be removed in v0.17.', DeprecationWarning)
from .tools import add_spline
add_spline(self, spline, level, reset)
@classmethod
def from_vertices(cls, vertices: Iterable['Vertex'], close=False) -> 'Path':
""" Returns a :class:`Path` from given `vertices`.
.. deprecated:: 0.15.3
replaced by factory function :func:`from_vertices()`
"""
warnings.warn(
'use factory function from_vertices(),'
'will be removed in v0.17.', DeprecationWarning)
from .converter import from_vertices
return from_vertices(vertices, close)
@classmethod
def from_lwpolyline(cls, lwpolyline: 'LWPolyline') -> 'Path':
""" Returns a :class:`Path` from a :class:`~ezdxf.entities.LWPolyline`
entity, all vertices transformed to WCS.
.. deprecated:: 0.15.2
replaced by factory function :func:`make_path()`
"""
warnings.warn(
'use factory function make_path(lwpolyline),'
'will be removed in v0.17.', DeprecationWarning)
from .converter import make_path
return make_path(lwpolyline)
@classmethod
def from_polyline(cls, polyline: 'Polyline') -> 'Path':
""" Returns a :class:`Path` from a :class:`~ezdxf.entities.Polyline`
entity, all vertices transformed to WCS.
.. deprecated:: 0.15.2
replaced by factory function :func:`make_path()`
"""
warnings.warn(
'use factory function make_path(polyline),'
'will be removed in v0.17.', DeprecationWarning)
from .converter import make_path
return make_path(polyline)
@classmethod
def from_spline(cls, spline: 'Spline', level: int = 4) -> 'Path':
""" Returns a :class:`Path` from a :class:`~ezdxf.entities.Spline`.
.. deprecated:: 0.15.2
replaced by factory function :func:`make_path()`
"""
warnings.warn(
'use factory function make_path(polyline),'
'will be removed in v0.17.', DeprecationWarning)
from .converter import make_path
return make_path(spline, level=level)
@classmethod
def from_ellipse(cls, ellipse: 'Ellipse', segments: int = 1) -> 'Path':
""" Returns a :class:`Path` from a :class:`~ezdxf.entities.Ellipse`.
.. deprecated:: 0.15.2
replaced by factory function :func:`make_path()`
"""
warnings.warn(
'use factory function make_path(ellipse),'
'will be removed in v0.17.', DeprecationWarning)
from .converter import make_path
return make_path(ellipse, segments=segments)
@classmethod
def from_arc(cls, arc: 'Arc', segments: int = 1) -> 'Path':
""" Returns a :class:`Path` from an :class:`~ezdxf.entities.Arc`.
.. deprecated:: 0.15.2
replaced by factory function :func:`make_path()`
"""
warnings.warn(
'use factory function make_path(arc),'
'will be removed in v0.17.', DeprecationWarning)
from .converter import make_path
return make_path(arc, segments=segments)
@classmethod
def from_circle(cls, circle: 'Circle', segments: int = 1) -> 'Path':
""" Returns a :class:`Path` from a :class:`~ezdxf.entities.Circle`.
.. deprecated:: 0.15.2
replaced by factory function :func:`make_path()`
"""
warnings.warn(
'use factory function make_path(circle),'
'will be removed in v0.17.', DeprecationWarning)
from .converter import make_path
return make_path(circle, segments=segments)
def control_vertices(self):
""" Yields all path control vertices in consecutive order. """
if len(self):
yield self.start
for cmd in self._commands:
if cmd.type == Command.LINE_TO:
yield cmd.end
elif cmd.type == Command.CURVE3_TO:
yield cmd.ctrl
yield cmd.end
elif cmd.type == Command.CURVE4_TO:
yield cmd.ctrl1
yield cmd.ctrl2
yield cmd.end
| 34.976526
| 80
| 0.590134
|
fab8d9005b1a45b36964fd1193c7a65bc84dd41e
| 2,600
|
py
|
Python
|
app/modules/common/FEAT/F14_individual_heat_supply_costs_per_building/F_14.py
|
HotMaps/building_h-c
|
db5a103cb9d41b88e6cdc3c9194fc1ec9fc5c31f
|
[
"Apache-2.0"
] | 1
|
2017-05-12T11:31:09.000Z
|
2017-05-12T11:31:09.000Z
|
app/modules/common/FEAT/F14_individual_heat_supply_costs_per_building/F_14.py
|
HotMaps/HotMaps-building_h-c
|
db5a103cb9d41b88e6cdc3c9194fc1ec9fc5c31f
|
[
"Apache-2.0"
] | 2
|
2017-08-22T13:53:22.000Z
|
2017-09-25T07:27:28.000Z
|
app/modules/common/FEAT/F14_individual_heat_supply_costs_per_building/F_14.py
|
HotMaps/Hotmaps-building_h-c
|
db5a103cb9d41b88e6cdc3c9194fc1ec9fc5c31f
|
[
"Apache-2.0"
] | null | null | null |
'''
This script has been created in the context of the Hotmaps EU project.
@author: Sara Fritz
@Institute: TUW, Austria
@Contact: fritz@eeg.tuwien.ac.at
'''
import os
import sys
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.
abspath(__file__))))
if path not in sys.path:
sys.path.append(path)
import AD.F14_input.main as data
import CM.CM_TUW2.run_cm as CM2
import CM.CM_TUW3.run_cm as CM3
import pandas
import numpy
import json
def execute(interest_rate, lifetime, user_input_nuts0, user_input_nuts3, population, energy_demand, heat_load):
(co2_costs, energy_price, taxes, investment_costs, operation_and_maintenance_costs, efficiency_heatingsystem, spec_co2_emissions, heating_system_names)\
=data.ad_f14(user_input_nuts3, user_input_nuts0)
(var_costs, fix_costs,energy_costs, total_costs, share_of_taxes, co2_costs, lcoh, lcohcapita, fed) = \
CM2.main(energy_demand, heat_load,energy_price,co2_costs, taxes, investment_costs, operation_and_maintenance_costs, efficiency_heatingsystem,interest_rate,lifetime,population)
(specific_emissions, total_emissions) = CM3.main(energy_demand, efficiency_heatingsystem, spec_co2_emissions)
name_heating_systems = (pandas.Series(heating_system_names)).rename('Heating System')
export_dataframe=pandas.concat([name_heating_systems,var_costs,fix_costs, energy_costs, total_costs, share_of_taxes, co2_costs, lcoh, lcohcapita, fed, specific_emissions, total_emissions],axis=1)
#return [var_costs, fix_costs,energy_costs, total_costs, share_of_taxes, co2_costs, lcoh, lcohcapita, fed,specific_emissions, total_emissions]
#return export_dataframe
test = export_dataframe
export_dataframe = export_dataframe.set_index('Heating System')
dictionary= json.loads( export_dataframe.to_json() )
return dictionary
if __name__ == "__main__":
print('calculation started')
output_dir = path + os.sep + 'Outputs'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
interest_rate = 1.06
lifetime = 20
##Information from selected area
user_input_nuts0 = 'AT'
user_input_nuts3 = 'AT130'
population = 1800000 #not final - just value
#user input
sel_building_energy_demand= 120 # kWh/a
sel_building_heat_load = 20 #kW/a
result = execute(interest_rate, lifetime,user_input_nuts0, user_input_nuts3, population, sel_building_energy_demand, sel_building_heat_load)
print(result)
print('calculation done')
| 35.135135
| 199
| 0.739231
|
56aa124a5b3848b66114d9a6079b9319a00bb870
| 2,838
|
py
|
Python
|
ImageBot/to_yolo/YoloConverter.py
|
FraunhoferIAO/Image-Bot
|
951258a78a297f3fb27478f5671f6bc804cd5715
|
[
"MIT"
] | 2
|
2021-12-28T08:33:14.000Z
|
2022-01-06T15:28:19.000Z
|
ImageBot/to_yolo/YoloConverter.py
|
FraunhoferIAO/Image-Bot
|
951258a78a297f3fb27478f5671f6bc804cd5715
|
[
"MIT"
] | 1
|
2022-02-17T17:43:11.000Z
|
2022-02-17T17:43:11.000Z
|
ImageBot/to_yolo/YoloConverter.py
|
IAORaisierer/Image-Bot
|
951258a78a297f3fb27478f5671f6bc804cd5715
|
[
"MIT"
] | 1
|
2022-02-09T18:24:09.000Z
|
2022-02-09T18:24:09.000Z
|
import numpy as np
import cv2
import uuid
import os
import random
import imgaug.augmenters as iaa
import imgaug.parameters as iap
from pathlib import Path
from queue import Queue
import os
from ..Config import CLASS_ID
from ..infrastructure.ImageMessage import ImageMessage
from ..infrastructure.filter import load_image
from ..image_processing.masks import mask_bounding_box
def to_yolo_dataset(source_folder, target_folder, test_training_split=0.3):
# Get all filenames
filenames = [f.replace('_mask.png', "") for f in os.listdir(source_folder) if f.endswith('_mask.png')]
# First shuffle the list
random.shuffle(filenames)
# Now select the test_aquisition split
max_index = int(test_training_split*len(filenames))
test_messages = filenames[:max_index]
training_messages = filenames[max_index:]
# Now save them and their labels in different folders
train_folder = os.path.join(target_folder, 'train')
test_folder = os.path.join(target_folder, 'test')
# Make the folders if the do not already exist
# TODO: Remove hard coded paths
Path(target_folder).mkdir(parents=True, exist_ok=True)
Path(train_folder).mkdir(parents=True, exist_ok=True)
Path(test_folder).mkdir(parents=True, exist_ok=True)
Path(os.path.join(train_folder, "images")).mkdir(parents=True, exist_ok=True)
Path(os.path.join(test_folder, "images")).mkdir(parents=True, exist_ok=True)
Path(os.path.join(train_folder, "labels")).mkdir(parents=True, exist_ok=True)
Path(os.path.join(test_folder, "labels")).mkdir(parents=True, exist_ok=True)
# Helper function to save the file
def save_message(filename, parent_path):
image = cv2.imread(os.path.join(source_folder, filename + ".png"), cv2.IMREAD_GRAYSCALE)
mask = cv2.imread(os.path.join(source_folder, filename + "_mask.png"), cv2.IMREAD_GRAYSCALE)
# Save the image
cv2.imwrite(os.path.join(parent_path, "images", filename + ".png"), image)
# Save the label
bb = bounding_box_darknet_format(mask)
bb_str = ""
if bb is not None:
bb_str = str(CLASS_ID) + " " + str(bb[0][0]) + " " + str(bb[0][1]) + " " + str(bb[1][0]) + " " + str(bb[1][1])
with open(os.path.join(os.path.join(parent_path, "labels"), filename + ".txt"), "w") as label_file:
label_file.write(bb_str)
# Now save them
for m in test_messages:
save_message(m, test_folder)
for m in training_messages:
save_message(m, train_folder)
def bounding_box_darknet_format(mask):
bb = mask_bounding_box(mask)
if bb is None:
return None
s = mask.shape
center = ((bb[0][0]+bb[1][0])/2.0/s[1], (bb[0][1]+bb[1][1])/2.0/s[0])
size = ((bb[1][0]-bb[0][0])/s[1], (bb[1][1]-bb[0][1])/s[0])
return (center, size)
| 38.351351
| 122
| 0.677942
|
7c0901f83acb1a941985bf451068cfc65e13f65c
| 3,337
|
py
|
Python
|
infrastructure-provisioning/src/general/scripts/aws/common_create_role_policy.py
|
GennadiyShpak/incubator-datalab
|
00f23c6bea160136c4ab106484c7053554915873
|
[
"Apache-2.0"
] | 53
|
2019-01-24T10:18:26.000Z
|
2020-09-27T10:44:33.000Z
|
infrastructure-provisioning/src/general/scripts/aws/common_create_role_policy.py
|
GennadiyShpak/incubator-datalab
|
00f23c6bea160136c4ab106484c7053554915873
|
[
"Apache-2.0"
] | 48
|
2019-02-28T12:11:33.000Z
|
2020-09-15T08:27:08.000Z
|
infrastructure-provisioning/src/general/scripts/aws/common_create_role_policy.py
|
GennadiyShpak/incubator-datalab
|
00f23c6bea160136c4ab106484c7053554915873
|
[
"Apache-2.0"
] | 44
|
2019-01-14T10:31:55.000Z
|
2020-09-22T17:53:33.000Z
|
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import argparse
import sys
from datalab.actions_lib import *
from datalab.meta_lib import *
from datalab.logger import logging
parser = argparse.ArgumentParser()
parser.add_argument('--role_name', type=str, default='')
parser.add_argument('--role_profile_name', type=str, default='')
parser.add_argument('--permissions_boundary_arn', type=str, default='')
parser.add_argument('--policy_name', type=str, default='')
parser.add_argument('--policy_arn', type=str, default='')
parser.add_argument('--policy_file_name', type=str, default='')
parser.add_argument('--region', type=str, default='')
parser.add_argument('--infra_tag_name', type=str, default='')
parser.add_argument('--infra_tag_value', type=str, default='')
parser.add_argument('--user_tag_value', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
if args.role_name != '':
try:
role_name = get_role_by_name(args.role_name)
if role_name == '':
tag = {"Key": args.infra_tag_name, "Value": args.infra_tag_value}
user_tag = {"Key": "user:tag", "Value": args.user_tag_value}
logging.info("Creating role {0}, profile name {1}".format(args.role_name, args.role_profile_name))
create_iam_role(args.role_name, args.role_profile_name, args.region, args.permissions_boundary_arn, tag=tag, user_tag=user_tag)
else:
logging.info("ROLE AND ROLE PROFILE ARE ALREADY CREATED")
logging.info("ROLE {} created. IAM group {} created".format(args.role_name, args.role_profile_name))
logging.info("ATTACHING POLICIES TO ROLE")
if args.policy_file_name != '':
create_attach_policy(args.policy_name, args.role_name, args.policy_file_name)
else:
if args.policy_arn == '':
logging.info("POLICY ARN is empty, there is nothing to attach.")
success = True
else:
policy_arn_bits = eval(args.policy_arn)
for bit in policy_arn_bits:
attach_policy(args.role_name, bit)
logging.info("POLICY {} created".format(args.policy_name))
except Exception as err:
logging.error('Error: {0}'.format(err))
else:
parser.print_help()
sys.exit(2)
| 45.094595
| 143
| 0.637998
|
16ee9425d4107ab860c52b53f97c802b576ce2ea
| 14,043
|
py
|
Python
|
lbrynet/lbryfile/client/LBRYFileDownloader.py
|
sonatagreen/lbry
|
f6473e3383956b7823a76516622bba6c172619e1
|
[
"BSD-2-Clause"
] | null | null | null |
lbrynet/lbryfile/client/LBRYFileDownloader.py
|
sonatagreen/lbry
|
f6473e3383956b7823a76516622bba6c172619e1
|
[
"BSD-2-Clause"
] | null | null | null |
lbrynet/lbryfile/client/LBRYFileDownloader.py
|
sonatagreen/lbry
|
f6473e3383956b7823a76516622bba6c172619e1
|
[
"BSD-2-Clause"
] | null | null | null |
import subprocess
import binascii
from zope.interface import implements
from lbrynet.lbryfile.StreamDescriptor import save_sd_info
from lbrynet.cryptstream.client.CryptStreamDownloader import CryptStreamDownloader
from lbrynet.core.client.StreamProgressManager import FullStreamProgressManager
from lbrynet.core.StreamDescriptor import StreamMetadata
from lbrynet.interfaces import IStreamDownloaderFactory
from lbrynet.lbryfile.client.LBRYFileMetadataHandler import LBRYFileMetadataHandler
import os
from twisted.internet import defer, threads, reactor
from twisted.python.procutils import which
import logging
import traceback
log = logging.getLogger(__name__)
class LBRYFileDownloader(CryptStreamDownloader):
"""Classes which inherit from this class download LBRY files"""
def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet, upload_allowed):
CryptStreamDownloader.__init__(self, peer_finder, rate_limiter, blob_manager,
payment_rate_manager, wallet, upload_allowed)
self.stream_hash = stream_hash
self.stream_info_manager = stream_info_manager
self.suggested_file_name = None
self._calculated_total_bytes = None
def set_stream_info(self):
if self.key is None:
d = self.stream_info_manager.get_stream_info(self.stream_hash)
def set_stream_info(stream_info):
key, stream_name, suggested_file_name = stream_info
self.key = binascii.unhexlify(key)
self.stream_name = binascii.unhexlify(stream_name)
self.suggested_file_name = binascii.unhexlify(suggested_file_name)
d.addCallback(set_stream_info)
return d
else:
return defer.succeed(True)
def delete_data(self):
d1 = self.stream_info_manager.get_blobs_for_stream(self.stream_hash)
def get_blob_hashes(blob_infos):
return [b[0] for b in blob_infos if b[0] is not None]
d1.addCallback(get_blob_hashes)
d2 = self.stream_info_manager.get_sd_blob_hashes_for_stream(self.stream_hash)
def combine_blob_hashes(results):
blob_hashes = []
for success, result in results:
if success is True:
blob_hashes.extend(result)
return blob_hashes
def delete_blobs(blob_hashes):
self.blob_manager.delete_blobs(blob_hashes)
return True
dl = defer.DeferredList([d1, d2], fireOnOneErrback=True)
dl.addCallback(combine_blob_hashes)
dl.addCallback(delete_blobs)
return dl
def stop(self, err=None):
d = self._close_output()
d.addCallback(lambda _: CryptStreamDownloader.stop(self, err=err))
return d
def _get_progress_manager(self, download_manager):
return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager)
def _start(self):
d = self._setup_output()
d.addCallback(lambda _: CryptStreamDownloader._start(self))
return d
def _setup_output(self):
pass
def _close_output(self):
pass
def get_total_bytes(self):
d = self.stream_info_manager.get_blobs_for_stream(self.stream_hash)
def calculate_size(blobs):
return sum([b[3] for b in blobs])
d.addCallback(calculate_size)
return d
def get_total_bytes_cached(self):
if self._calculated_total_bytes is None or self._calculated_total_bytes == 0:
if self.download_manager is None:
return 0
else:
self._calculated_total_bytes = self.download_manager.calculate_total_bytes()
return self._calculated_total_bytes
def get_bytes_left_to_output(self):
if self.download_manager is not None:
return self.download_manager.calculate_bytes_left_to_output()
else:
return 0
def get_bytes_left_to_download(self):
if self.download_manager is not None:
return self.download_manager.calculate_bytes_left_to_download()
else:
return 0
def _get_metadata_handler(self, download_manager):
return LBRYFileMetadataHandler(self.stream_hash, self.stream_info_manager, download_manager)
class LBRYFileDownloaderFactory(object):
implements(IStreamDownloaderFactory)
def __init__(self, peer_finder, rate_limiter, blob_manager, stream_info_manager,
wallet):
self.peer_finder = peer_finder
self.rate_limiter = rate_limiter
self.blob_manager = blob_manager
self.stream_info_manager = stream_info_manager
self.wallet = wallet
def can_download(self, sd_validator):
return True
def make_downloader(self, metadata, options, payment_rate_manager, **kwargs):
payment_rate_manager.min_blob_data_payment_rate = options[0]
upload_allowed = options[1]
def save_source_if_blob(stream_hash):
if metadata.metadata_source == StreamMetadata.FROM_BLOB:
d = self.stream_info_manager.save_sd_blob_hash_to_stream(stream_hash, metadata.source_blob_hash)
else:
d = defer.succeed(True)
d.addCallback(lambda _: stream_hash)
return d
def create_downloader(stream_hash):
downloader = self._make_downloader(stream_hash, payment_rate_manager,
metadata.validator.raw_info, upload_allowed)
d = downloader.set_stream_info()
d.addCallback(lambda _: downloader)
return d
d = save_sd_info(self.stream_info_manager, metadata.validator.raw_info)
d.addCallback(save_source_if_blob)
d.addCallback(create_downloader)
return d
def _make_downloader(self, stream_hash, payment_rate_manager, stream_info, upload_allowed):
pass
class LBRYFileSaver(LBRYFileDownloader):
def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager,
payment_rate_manager, wallet, download_directory, upload_allowed, file_name=None):
LBRYFileDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet, upload_allowed)
self.download_directory = download_directory
self.file_name = file_name
self.file_written_to = None
self.file_handle = None
def __str__(self):
if self.file_written_to is not None:
return str(self.file_written_to)
else:
return str(self.file_name)
def set_stream_info(self):
d = LBRYFileDownloader.set_stream_info(self)
def set_file_name():
if self.file_name is None:
if self.suggested_file_name:
self.file_name = os.path.basename(self.suggested_file_name)
else:
self.file_name = os.path.basename(self.stream_name)
d.addCallback(lambda _: set_file_name())
return d
def stop(self, err=None):
d = LBRYFileDownloader.stop(self, err=err)
d.addCallback(lambda _: self._delete_from_info_manager())
return d
def _get_progress_manager(self, download_manager):
return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager,
delete_blob_after_finished=not self.upload_allowed)
def _setup_output(self):
def open_file():
if self.file_handle is None:
file_name = self.file_name
if not file_name:
file_name = "_"
if os.path.exists(os.path.join(self.download_directory, file_name)):
ext_num = 1
def _get_file_name(ext):
if len(file_name.split(".")):
fn = ''.join(file_name.split(".")[:-1])
file_ext = ''.join(file_name.split(".")[-1])
return fn + "-" + str(ext) + "." + file_ext
else:
return file_name + "_" + str(ext)
while os.path.exists(os.path.join(self.download_directory,
_get_file_name(ext_num))):
ext_num += 1
file_name = _get_file_name(ext_num)
try:
self.file_handle = open(os.path.join(self.download_directory, file_name), 'wb')
self.file_written_to = os.path.join(self.download_directory, file_name)
except IOError:
log.error(traceback.format_exc())
raise ValueError("Failed to open %s. Make sure you have permission to save files to that"
" location." % str(os.path.join(self.download_directory,
file_name)))
return threads.deferToThread(open_file)
def _close_output(self):
self.file_handle, file_handle = None, self.file_handle
def close_file():
if file_handle is not None:
name = file_handle.name
file_handle.close()
if self.completed is False:
os.remove(name)
return threads.deferToThread(close_file)
def _get_write_func(self):
def write_func(data):
if self.stopped is False and self.file_handle is not None:
self.file_handle.write(data)
return write_func
def _delete_from_info_manager(self):
return self.stream_info_manager.delete_stream(self.stream_hash)
class LBRYFileSaverFactory(LBRYFileDownloaderFactory):
def __init__(self, peer_finder, rate_limiter, blob_manager, stream_info_manager,
wallet, download_directory):
LBRYFileDownloaderFactory.__init__(self, peer_finder, rate_limiter, blob_manager,
stream_info_manager, wallet)
self.download_directory = download_directory
def _make_downloader(self, stream_hash, payment_rate_manager, stream_info, upload_allowed):
return LBRYFileSaver(stream_hash, self.peer_finder, self.rate_limiter, self.blob_manager,
self.stream_info_manager, payment_rate_manager, self.wallet,
self.download_directory, upload_allowed)
@staticmethod
def get_description():
return "Save"
class LBRYFileOpener(LBRYFileDownloader):
def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager,
payment_rate_manager, wallet, upload_allowed):
LBRYFileDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet, upload_allowed)
self.process = None
self.process_log = None
def stop(self, err=None):
d = LBRYFileDownloader.stop(self, err=err)
d.addCallback(lambda _: self._delete_from_info_manager())
return d
def _get_progress_manager(self, download_manager):
return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager,
delete_blob_after_finished=not self.upload_allowed)
def _setup_output(self):
def start_process():
if os.name == "nt":
paths = [r'C:\Program Files\VideoLAN\VLC\vlc.exe',
r'C:\Program Files (x86)\VideoLAN\VLC\vlc.exe']
for p in paths:
if os.path.exists(p):
vlc_path = p
break
else:
raise ValueError("You must install VLC media player to stream files")
else:
vlc_path = 'vlc'
self.process_log = open("vlc.out", 'a')
try:
self.process = subprocess.Popen([vlc_path, '-'], stdin=subprocess.PIPE,
stdout=self.process_log, stderr=self.process_log)
except OSError:
raise ValueError("VLC media player could not be opened")
d = threads.deferToThread(start_process)
return d
def _close_output(self):
if self.process is not None:
self.process.stdin.close()
self.process = None
return defer.succeed(True)
def _get_write_func(self):
def write_func(data):
if self.stopped is False and self.process is not None:
try:
self.process.stdin.write(data)
except IOError:
reactor.callLater(0, self.stop)
return write_func
def _delete_from_info_manager(self):
return self.stream_info_manager.delete_stream(self.stream_hash)
class LBRYFileOpenerFactory(LBRYFileDownloaderFactory):
def can_download(self, sd_validator):
if which('vlc'):
return True
elif os.name == "nt":
paths = [r'C:\Program Files\VideoLAN\VLC\vlc.exe',
r'C:\Program Files (x86)\VideoLAN\VLC\vlc.exe']
for p in paths:
if os.path.exists(p):
return True
return False
def _make_downloader(self, stream_hash, payment_rate_manager, stream_info, upload_allowed):
return LBRYFileOpener(stream_hash, self.peer_finder, self.rate_limiter, self.blob_manager,
self.stream_info_manager, payment_rate_manager, self.wallet, upload_allowed)
@staticmethod
def get_description():
return "Stream"
| 39.669492
| 112
| 0.631347
|
499b25c7002886a68ccc0a96a57ee24a95d757b7
| 1,210
|
py
|
Python
|
camel_tools/disambig/__init__.py
|
balhafni/camel_tools
|
84a8149fe6e80b4ecbd4bf8cfb9f0440b4815389
|
[
"MIT"
] | null | null | null |
camel_tools/disambig/__init__.py
|
balhafni/camel_tools
|
84a8149fe6e80b4ecbd4bf8cfb9f0440b4815389
|
[
"MIT"
] | null | null | null |
camel_tools/disambig/__init__.py
|
balhafni/camel_tools
|
84a8149fe6e80b4ecbd4bf8cfb9f0440b4815389
|
[
"MIT"
] | 2
|
2020-12-08T18:10:12.000Z
|
2021-08-30T15:33:50.000Z
|
# MIT License
#
# Copyright 2018-2019 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains classes and functions for morphological disambiguation.
"""
| 48.4
| 79
| 0.780992
|
aad8692cfd5f7302722068f7c5debb2d9d3fa008
| 5,397
|
py
|
Python
|
wl.py
|
camgnostic/Wunderlist-3-Alfred
|
65fc2fad2c2f7786140ade78140ff0c80aca6e68
|
[
"AAL"
] | 9
|
2015-03-04T12:44:22.000Z
|
2018-08-12T05:56:13.000Z
|
wl.py
|
camgnostic/Wunderlist-3-Alfred
|
65fc2fad2c2f7786140ade78140ff0c80aca6e68
|
[
"AAL"
] | 1
|
2015-02-22T20:19:48.000Z
|
2016-08-21T07:05:36.000Z
|
wl.py
|
camgnostic/Wunderlist-3-Alfred
|
65fc2fad2c2f7786140ade78140ff0c80aca6e68
|
[
"AAL"
] | null | null | null |
from wl_utils import getLists, getListId, refreshTasks, refreshLists, getTasks, getTask
from wl_utils import ICON_ADDTASK, ICON_DELETE, ICON_DONE, ICON_EMPTY, ICON_ITEM, ICON_LIST, ICON_NEWLIST, ICON_STAR, ICON_UNSTAR, ICON_UPDATE
from workflow import Workflow, PasswordNotFound
from workflow.background import is_running
import sys
import threading
def main(wf):
#if no args, no query:
if len(wf.args):
query = u' '.join(wf.args[0:])
wf.logger.debug(query)
else:
query = ""
try:
#do we have a login? If not drop to catch at bottom no api password:
token = wf.get_password('token')
add = False
new = False
#if add task, strip add and collect listname:
listId = False
if query[0:4].lower() == "add:":
query = query[3:].lstrip(' ').lstrip(":").lstrip(' ')
if len(query.split(":")) > 1:
listname = query.split(":")[0]
listId = getListId(wf, listname)
query = ":".join(query.split(":")[1:]).lstrip(' ').lstrip(":")
if listId == False:
if not is_running("updateLists"):
refreshLists(wf)
wf.add_item("List not found, updating lists...", valid=False, icon=ICON_UPDATE)
else:
valid = True if query != "" else False
wf.add_item(title = u'' + (query if query != "" else 'New Task'),
subtitle = u'add to list: ' + str(listname),
valid=valid, arg='add:' + str(listId) + ':' + str(query),
icon = ICON_ADDTASK)
wf.send_feedback()
add = True
wf.add_item(title = u'Add an item to the highlighted list:',
valid=False, icon=ICON_ADDTASK)
getLists(wf, query, True)
#if new list, strip new and collect new listname:
elif query[0:4].lower() == "new:":
query = query[3:].lstrip(' ').lstrip(":").lstrip(' ')
new = True
valid = True if query != "" else False
wf.add_item(title = u'' + query,
subtitle='add new list',
valid=valid, arg='new:' + query,
icon=ICON_NEWLIST)
wf.send_feedback()
if len(query.split(":")) > 1:
listname = query.split(":")[0]
listId = getListId(wf, listname)
query = ":".join(query.split(":")[1:]).lstrip(' ').lstrip(':')
#listname not found:
if listId == False:
if not is_running("updateLists"):
refreshLists(wf)
wf.add_item("List not found, updating lists...", valid=False, icon=ICON_UPDATE)
wf.send_feedback()
#listname found, is taskname found?
elif getTask(wf, listId, query):
task = getTask(wf, listId, query)
taskId = task['id']
query = ":".join(query.split(":")[1:]).lstrip(' ').lstrip(":")
#taskname not found:
if taskId == False:
if not is_running("updateTasks:" + str(listId)):
refreshTasks(wf, listId)
wf.add_item("Task not found, updating tasks....", valid=False, icon=ICON_UPDATE)
wf.send_feedback()
#taskname found. tasks menu
else:
wf.add_item(title = u'' + listname + ':' + task['title'] + ':', valid=False,
icon=(ICON_STAR if task['starred'] else ICON_ITEM))
wf.add_item(title = u'Mark as Complete', subtitle = u'Task will not show in list when marked complete',
valid=True, arg="done:" + str(listId) + ":" + str(taskId),
icon=ICON_DONE)
wf.add_item(title = u'' + ("Uns" if task['starred'] == True else "S") + "tar task",
valid=True, arg="star:" + str(listId) + ":" + str(taskId),
icon=(ICON_UNSTAR if task['starred'] else ICON_STAR))
wf.add_item(title = u'Delete task', subtitle='This will permanently delete this task',
valid=True, arg="rem:" + str(listId) + ":" + str(taskId),
icon = ICON_DELETE)
wf.send_feedback()
elif add == False:
getTasks(wf, listId, listname, query)
#not new or add, still searching lists:
if new == False and add == False and not listId:
if query == "":
wf.add_item(title = u'Add', subtitle = u'Add a new task to a list',
valid=False, autocomplete="Add:", icon=ICON_ADDTASK)
wf.add_item(title = u'New', subtitle = u'New list',
valid=False, autocomplete="New:", icon=ICON_NEWLIST)
getLists(wf, query)
except PasswordNotFound:
wf.add_item(title = u'Login', subtitle = u'Authorize with Wunderlist',
arg = "login", valid=True, icon=ICON_EMPTY)
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
| 49.513761
| 142
| 0.495831
|
762646b44b3f3be70513c9299f47da6de6d76ad6
| 25,596
|
py
|
Python
|
pywps/app/WPSRequest.py
|
janpisl/pywps
|
73a1835359f0503e08fb007d75de699bf3cf29ed
|
[
"MIT"
] | null | null | null |
pywps/app/WPSRequest.py
|
janpisl/pywps
|
73a1835359f0503e08fb007d75de699bf3cf29ed
|
[
"MIT"
] | null | null | null |
pywps/app/WPSRequest.py
|
janpisl/pywps
|
73a1835359f0503e08fb007d75de699bf3cf29ed
|
[
"MIT"
] | null | null | null |
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import logging
import lxml
import lxml.etree
from werkzeug.exceptions import MethodNotAllowed
import base64
import datetime
from pywps import WPS
from pywps._compat import text_type, PY2
from pywps.app.basic import xpath_ns
from pywps.inout.basic import LiteralInput, ComplexInput, BBoxInput
from pywps.exceptions import NoApplicableCode, OperationNotSupported, MissingParameterValue, VersionNegotiationFailed, \
InvalidParameterValue, FileSizeExceeded
from pywps import configuration
from pywps.validator.mode import MODE
from pywps.inout.literaltypes import AnyValue, NoValue, ValuesReference, AllowedValue
from pywps.inout.formats import Format
import json
LOGGER = logging.getLogger("PYWPS")
class WPSRequest(object):
def __init__(self, http_request=None):
self.http_request = http_request
self.operation = None
self.version = None
self.language = None
self.identifier = None
self.identifiers = None
self.store_execute = None
self.status = None
self.lineage = None
self.inputs = {}
self.outputs = {}
self.raw = None
if self.http_request:
request_parser = self._get_request_parser_method(http_request.method)
request_parser()
def _get_request_parser_method(self, method):
if method == 'GET':
return self._get_request
elif method == 'POST':
return self._post_request
else:
raise MethodNotAllowed()
def _get_request(self):
"""HTTP GET request parser
"""
# service shall be WPS
service = _get_get_param(self.http_request, 'service')
if service:
if str(service).lower() != 'wps':
raise InvalidParameterValue(
'parameter SERVICE [%s] not supported' % service, 'service')
else:
raise MissingParameterValue('service', 'service')
operation = _get_get_param(self.http_request, 'request')
request_parser = self._get_request_parser(operation)
request_parser(self.http_request)
def _post_request(self):
"""HTTP GET request parser
"""
# check if input file size was not exceeded
maxsize = configuration.get_config_value('server', 'maxrequestsize')
maxsize = configuration.get_size_mb(maxsize) * 1024 * 1024
if self.http_request.content_length > maxsize:
raise FileSizeExceeded('File size for input exceeded.'
' Maximum request size allowed: %i megabytes' % maxsize / 1024 / 1024)
try:
doc = lxml.etree.fromstring(self.http_request.get_data())
except Exception as e:
if PY2:
raise NoApplicableCode(e.message)
else:
raise NoApplicableCode(e.msg)
operation = doc.tag
request_parser = self._post_request_parser(operation)
request_parser(doc)
def _get_request_parser(self, operation):
"""Factory function returing propper parsing function
"""
wpsrequest = self
def parse_get_getcapabilities(http_request):
"""Parse GET GetCapabilities request
"""
acceptedversions = _get_get_param(http_request, 'acceptversions')
wpsrequest.check_accepted_versions(acceptedversions)
def parse_get_describeprocess(http_request):
"""Parse GET DescribeProcess request
"""
version = _get_get_param(http_request, 'version')
wpsrequest.check_and_set_version(version)
language = _get_get_param(http_request, 'language')
wpsrequest.check_and_set_language(language)
wpsrequest.identifiers = _get_get_param(
http_request, 'identifier', aslist=True)
def parse_get_execute(http_request):
"""Parse GET Execute request
"""
version = _get_get_param(http_request, 'version')
wpsrequest.check_and_set_version(version)
language = _get_get_param(http_request, 'language')
wpsrequest.check_and_set_language(language)
wpsrequest.identifier = _get_get_param(http_request, 'identifier')
wpsrequest.store_execute = _get_get_param(
http_request, 'storeExecuteResponse', 'false')
wpsrequest.status = _get_get_param(http_request, 'status', 'false')
wpsrequest.lineage = _get_get_param(
http_request, 'lineage', 'false')
wpsrequest.inputs = get_data_from_kvp(
_get_get_param(http_request, 'DataInputs'), 'DataInputs')
wpsrequest.outputs = {}
# take responseDocument preferably
resp_outputs = get_data_from_kvp(
_get_get_param(http_request, 'ResponseDocument'))
raw_outputs = get_data_from_kvp(
_get_get_param(http_request, 'RawDataOutput'))
wpsrequest.raw = False
if resp_outputs:
wpsrequest.outputs = resp_outputs
elif raw_outputs:
wpsrequest.outputs = raw_outputs
wpsrequest.raw = True
# executeResponse XML will not be stored and no updating of
# status
wpsrequest.store_execute = 'false'
wpsrequest.status = 'false'
if not operation:
raise MissingParameterValue('Missing request value', 'request')
else:
self.operation = operation.lower()
if self.operation == 'getcapabilities':
return parse_get_getcapabilities
elif self.operation == 'describeprocess':
return parse_get_describeprocess
elif self.operation == 'execute':
return parse_get_execute
else:
raise OperationNotSupported(
'Unknown request %r' % self.operation, operation)
def _post_request_parser(self, tagname):
"""Factory function returing propper parsing function
"""
wpsrequest = self
def parse_post_getcapabilities(doc):
"""Parse POST GetCapabilities request
"""
acceptedversions = xpath_ns(
doc, '/wps:GetCapabilities/ows:AcceptVersions/ows:Version')
acceptedversions = ','.join(
map(lambda v: v.text, acceptedversions))
wpsrequest.check_accepted_versions(acceptedversions)
def parse_post_describeprocess(doc):
"""Parse POST DescribeProcess request
"""
version = doc.attrib.get('version')
wpsrequest.check_and_set_version(version)
language = doc.attrib.get('language')
wpsrequest.check_and_set_language(language)
wpsrequest.operation = 'describeprocess'
wpsrequest.identifiers = [identifier_el.text for identifier_el in
xpath_ns(doc, './ows:Identifier')]
def parse_post_execute(doc):
"""Parse POST Execute request
"""
version = doc.attrib.get('version')
wpsrequest.check_and_set_version(version)
language = doc.attrib.get('language')
wpsrequest.check_and_set_language(language)
wpsrequest.operation = 'execute'
identifier = xpath_ns(doc, './ows:Identifier')
if not identifier:
raise MissingParameterValue(
'Process identifier not set', 'Identifier')
wpsrequest.identifier = identifier[0].text
wpsrequest.lineage = 'false'
wpsrequest.store_execute = 'false'
wpsrequest.status = 'false'
wpsrequest.inputs = get_inputs_from_xml(doc)
wpsrequest.outputs = get_output_from_xml(doc)
wpsrequest.raw = False
if xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:RawDataOutput'):
wpsrequest.raw = True
# executeResponse XML will not be stored
wpsrequest.store_execute = 'false'
# check if response document tag has been set then retrieve
response_document = xpath_ns(
doc, './wps:ResponseForm/wps:ResponseDocument')
if len(response_document) > 0:
wpsrequest.lineage = response_document[
0].attrib.get('lineage', 'false')
wpsrequest.store_execute = response_document[
0].attrib.get('storeExecuteResponse', 'false')
wpsrequest.status = response_document[
0].attrib.get('status', 'false')
if tagname == WPS.GetCapabilities().tag:
self.operation = 'getcapabilities'
return parse_post_getcapabilities
elif tagname == WPS.DescribeProcess().tag:
self.operation = 'describeprocess'
return parse_post_describeprocess
elif tagname == WPS.Execute().tag:
self.operation = 'execute'
return parse_post_execute
else:
raise InvalidParameterValue(
'Unknown request %r' % tagname, 'request')
def check_accepted_versions(self, acceptedversions):
"""
:param acceptedversions: string
"""
version = None
if acceptedversions:
acceptedversions_array = acceptedversions.split(',')
for aversion in acceptedversions_array:
if _check_version(aversion):
version = aversion
else:
version = '1.0.0'
if version:
self.check_and_set_version(version)
else:
raise VersionNegotiationFailed(
'The requested version "%s" is not supported by this server' % acceptedversions, 'version')
def check_and_set_version(self, version):
"""set this.version
"""
if not version:
raise MissingParameterValue('Missing version', 'version')
elif not _check_version(version):
raise VersionNegotiationFailed(
'The requested version "%s" is not supported by this server' % version, 'version')
else:
self.version = version
def check_and_set_language(self, language):
"""set this.language
"""
if not language:
language = 'None'
elif language != 'en-US':
raise InvalidParameterValue(
'The requested language "%s" is not supported by this server' % language, 'language')
else:
self.language = language
@property
def json(self):
"""Return JSON encoded representation of the request
"""
class ExtendedJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.date) or isinstance(obj, datetime.time):
encoded_object = obj.isoformat()
else:
encoded_object = json.JSONEncoder.default(self, obj)
return encoded_object
obj = {
'operation': self.operation,
'version': self.version,
'language': self.language,
'identifier': self.identifier,
'identifiers': self.identifiers,
'store_execute': self.store_execute,
'status': self.status,
'lineage': self.lineage,
'inputs': dict((i, [inpt.json for inpt in self.inputs[i]]) for i in self.inputs),
'outputs': self.outputs,
'raw': self.raw
}
return json.dumps(obj, allow_nan=False, cls=ExtendedJSONEncoder)
@json.setter
def json(self, value):
"""init this request from json back again
:param value: the json (not string) representation
"""
self.operation = value['operation']
self.version = value['version']
self.language = value['language']
self.identifier = value['identifier']
self.identifiers = value['identifiers']
self.store_execute = value['store_execute']
self.status = value['status']
self.lineage = value['lineage']
self.outputs = value['outputs']
self.raw = value['raw']
self.inputs = {}
for identifier in value['inputs']:
inpt = None
inpt_defs = value['inputs'][identifier]
for inpt_def in inpt_defs:
if inpt_def['type'] == 'complex':
inpt = ComplexInput(
identifier=inpt_def['identifier'],
title=inpt_def.get('title'),
abstract=inpt_def.get('abstract'),
workdir=inpt_def.get('workdir'),
data_format=Format(
schema=inpt_def['data_format'].get('schema'),
extension=inpt_def['data_format'].get('extension'),
mime_type=inpt_def['data_format']['mime_type'],
encoding=inpt_def['data_format'].get('encoding')
),
supported_formats=[
Format(
schema=infrmt.get('schema'),
extension=infrmt.get('extension'),
mime_type=infrmt['mime_type'],
encoding=infrmt.get('encoding')
) for infrmt in inpt_def['supported_formats']
],
mode=MODE.NONE
)
inpt.file = inpt_def['file']
elif inpt_def['type'] == 'literal':
allowed_values = []
for allowed_value in inpt_def['allowed_values']:
if allowed_value['type'] == 'anyvalue':
allowed_values.append(AnyValue())
elif allowed_value['type'] == 'novalue':
allowed_values.append(NoValue())
elif allowed_value['type'] == 'valuesreference':
allowed_values.append(ValuesReference())
elif allowed_value['type'] == 'allowedvalue':
allowed_values.append(AllowedValue(
allowed_type=allowed_value['allowed_type'],
value=allowed_value['value'],
minval=allowed_value['minval'],
maxval=allowed_value['maxval'],
spacing=allowed_value['spacing'],
range_closure=allowed_value['range_closure']
))
inpt = LiteralInput(
identifier=inpt_def['identifier'],
title=inpt_def.get('title'),
abstract=inpt_def.get('abstract'),
data_type=inpt_def.get('data_type'),
workdir=inpt_def.get('workdir'),
allowed_values=AnyValue,
uoms=inpt_def.get('uoms'),
mode=inpt_def.get('mode')
)
inpt.uom = inpt_def.get('uom')
inpt.data = inpt_def.get('data')
elif inpt_def['type'] == 'bbox':
inpt = BBoxInput(
identifier=inpt_def['identifier'],
title=inpt_def['title'],
abstract=inpt_def['abstract'],
crss=inpt_def['crs'],
dimensions=inpt_def['dimensions'],
workdir=inpt_def['workdir'],
mode=inpt_def['mode']
)
inpt.ll = inpt_def['bbox'][0]
inpt.ur = inpt_def['bbox'][1]
if identifier in self.inputs:
self.inputs[identifier].append(inpt)
else:
self.inputs[identifier] = [inpt]
def get_inputs_from_xml(doc):
the_inputs = {}
for input_el in xpath_ns(doc, '/wps:Execute/wps:DataInputs/wps:Input'):
[identifier_el] = xpath_ns(input_el, './ows:Identifier')
identifier = identifier_el.text
if identifier not in the_inputs:
the_inputs[identifier] = []
literal_data = xpath_ns(input_el, './wps:Data/wps:LiteralData')
if literal_data:
value_el = literal_data[0]
inpt = {}
inpt['identifier'] = identifier_el.text
inpt['data'] = text_type(value_el.text)
inpt['uom'] = value_el.attrib.get('uom', '')
inpt['datatype'] = value_el.attrib.get('datatype', '')
the_inputs[identifier].append(inpt)
continue
complex_data = xpath_ns(input_el, './wps:Data/wps:ComplexData')
if complex_data:
complex_data_el = complex_data[0]
inpt = {}
inpt['identifier'] = identifier_el.text
inpt['mimeType'] = complex_data_el.attrib.get('mimeType', '')
inpt['encoding'] = complex_data_el.attrib.get(
'encoding', '').lower()
inpt['schema'] = complex_data_el.attrib.get('schema', '')
inpt['method'] = complex_data_el.attrib.get('method', 'GET')
if len(complex_data_el.getchildren()) > 0:
value_el = complex_data_el[0]
inpt['data'] = _get_dataelement_value(value_el)
else:
inpt['data'] = _get_rawvalue_value(
complex_data_el.text, inpt['encoding'])
the_inputs[identifier].append(inpt)
continue
reference_data = xpath_ns(input_el, './wps:Reference')
if reference_data:
reference_data_el = reference_data[0]
inpt = {}
inpt['identifier'] = identifier_el.text
inpt[identifier_el.text] = reference_data_el.text
inpt['href'] = reference_data_el.attrib.get(
'{http://www.w3.org/1999/xlink}href', '')
inpt['mimeType'] = reference_data_el.attrib.get('mimeType', '')
inpt['method'] = reference_data_el.attrib.get('method', 'GET')
header_element = xpath_ns(reference_data_el, './wps:Header')
if header_element:
inpt['header'] = _get_reference_header(header_element)
body_element = xpath_ns(reference_data_el, './wps:Body')
if body_element:
inpt['body'] = _get_reference_body(body_element[0])
bodyreference_element = xpath_ns(reference_data_el,
'./wps:BodyReference')
if bodyreference_element:
inpt['bodyreference'] = _get_reference_bodyreference(
bodyreference_element[0])
the_inputs[identifier].append(inpt)
continue
# OWSlib is not python 3 compatible yet
if PY2:
from owslib.ows import BoundingBox
bbox_datas = xpath_ns(input_el, './wps:Data/wps:BoundingBoxData')
if bbox_datas:
for bbox_data in bbox_datas:
bbox_data_el = bbox_data
bbox = BoundingBox(bbox_data_el)
the_inputs[identifier].append(bbox)
return the_inputs
def get_output_from_xml(doc):
the_output = {}
if xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:ResponseDocument'):
for output_el in xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:ResponseDocument/wps:Output'):
[identifier_el] = xpath_ns(output_el, './ows:Identifier')
outpt = {}
outpt[identifier_el.text] = ''
outpt['mimetype'] = output_el.attrib.get('mimeType', '')
outpt['encoding'] = output_el.attrib.get('encoding', '')
outpt['schema'] = output_el.attrib.get('schema', '')
outpt['uom'] = output_el.attrib.get('uom', '')
outpt['asReference'] = output_el.attrib.get('asReference', 'false')
the_output[identifier_el.text] = outpt
elif xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:RawDataOutput'):
for output_el in xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:RawDataOutput'):
[identifier_el] = xpath_ns(output_el, './ows:Identifier')
outpt = {}
outpt[identifier_el.text] = ''
outpt['mimetype'] = output_el.attrib.get('mimeType', '')
outpt['encoding'] = output_el.attrib.get('encoding', '')
outpt['schema'] = output_el.attrib.get('schema', '')
outpt['uom'] = output_el.attrib.get('uom', '')
the_output[identifier_el.text] = outpt
return the_output
def get_data_from_kvp(data, part=None):
"""Get execute DataInputs and ResponseDocument from URL (key-value-pairs) encoding
:param data: key:value pair list of the datainputs and responseDocument parameter
:param part: DataInputs or similar part of input url
"""
the_data = {}
if data is None:
return None
for d in data.split(";"):
try:
io = {}
fields = d.split('@')
# First field is identifier and its value
(identifier, val) = fields[0].split("=")
io['identifier'] = identifier
io['data'] = val
# Get the attributes of the data
for attr in fields[1:]:
(attribute, attr_val) = attr.split('=', 1)
if attribute == 'xlink:href':
io['href'] = attr_val
else:
io[attribute] = attr_val
# Add the input/output with all its attributes and values to the
# dictionary
if part == 'DataInputs':
if identifier not in the_data:
the_data[identifier] = []
the_data[identifier].append(io)
else:
the_data[identifier] = io
except Exception as e:
LOGGER.warning(e)
the_data[d] = {'identifier': d, 'data': ''}
return the_data
def _check_version(version):
""" check given version
"""
if version != '1.0.0':
return False
else:
return True
def _get_get_param(http_request, key, default=None, aslist=False):
"""Returns value from the key:value pair, of the HTTP GET request, for
example 'service' or 'request'
:param http_request: http_request object
:param key: key value you need to dig out of the HTTP GET request
"""
key = key.lower()
value = default
# http_request.args.keys will make + sign disappear in GET url if not
# urlencoded
for k in http_request.args.keys():
if k.lower() == key:
value = http_request.args.get(k)
if aslist:
value = value.split(",")
return value
def _get_dataelement_value(value_el):
"""Return real value of XML Element (e.g. convert Element.FeatureCollection
to String
"""
if isinstance(value_el, lxml.etree._Element):
if PY2:
return lxml.etree.tostring(value_el, encoding=unicode) # noqa
else:
return lxml.etree.tostring(value_el, encoding=str)
else:
return value_el
def _get_rawvalue_value(data, encoding=None):
"""Return real value of CDATA section"""
try:
if encoding is None or encoding == "":
return data
elif encoding == 'base64':
return base64.b64decode(data)
return base64.b64decode(data)
except Exception:
return data
def _get_reference_header(header_element):
"""Parses ReferenceInput Header element
"""
header = {}
header['key'] = header_element.attrib('key')
header['value'] = header_element.attrib('value')
return header
def _get_reference_body(body_element):
"""Parses ReferenceInput Body element
"""
body = None
if len(body_element.getchildren()) > 0:
value_el = body_element[0]
body = _get_dataelement_value(value_el)
else:
body = _get_rawvalue_value(body_element.text)
return body
def _get_reference_bodyreference(referencebody_element):
"""Parse ReferenceInput BodyReference element
"""
return referencebody_element.attrib.get(
'{http://www.w3.org/1999/xlink}href', '')
| 38.548193
| 121
| 0.552469
|
d7a58dd8095d0bd336daf65ad359a73a7c1cccd0
| 1,405
|
py
|
Python
|
pico8/pico8.py
|
fmaida/pico8-mkdocs-plugin
|
8fac0e651ef41a10d338566f659ad26ba09e425b
|
[
"MIT"
] | null | null | null |
pico8/pico8.py
|
fmaida/pico8-mkdocs-plugin
|
8fac0e651ef41a10d338566f659ad26ba09e425b
|
[
"MIT"
] | null | null | null |
pico8/pico8.py
|
fmaida/pico8-mkdocs-plugin
|
8fac0e651ef41a10d338566f659ad26ba09e425b
|
[
"MIT"
] | null | null | null |
import re
from mkdocs.plugins import BasePlugin
class Pico8(BasePlugin):
# This snippet of code allows to show a pico-8
# web player in a HTML page
snippet = "<div style='width:100%'>"
snippet += "<iframe src='https://www.lexaloffle.com/bbs/widget.php?pid={}'"
snippet += " allowfullscreen width='621' height='513'"
snippet += " style='border:none; overflow:hidden; display:block; margin-left:auto; margin-right:auto'>"
snippet += "</iframe></div>"
def on_page_markdown(self, markdown, **kwargs):
"""
Takes an article written in markdown and looks for the
presence of a tag written with this style:
{{ pico-8: 12345 }}
When the tag is found, it will be replaced with a
snippet code that will show a pico-8 web player onscreen.
:param markdown: Original article in markdown format
:param kwargs: Other parameters (won't be used here)
:return: Modified markdown
"""
plugin_pattern = r"\{\{[\s]*pico[-]{0,1}8:[\s]*([0-9]*)[\s]*\}\}"
plugin_code = re.findall(plugin_pattern, markdown)
if plugin_code:
game_code = plugin_code[0]
markdown = re.sub(plugin_pattern,
self.snippet.format(game_code),
markdown,
flags=re.IGNORECASE)
return markdown
| 34.268293
| 107
| 0.593594
|
8229664c8b5b0ef7a728a86aca6818e111454f69
| 8,851
|
py
|
Python
|
misc/acrn-config/launch_config/launch_cfg_gen.py
|
yfliuu/acrn-hypervisor
|
6289124e7c894323e2a5342bf201856d76512a60
|
[
"BSD-3-Clause"
] | null | null | null |
misc/acrn-config/launch_config/launch_cfg_gen.py
|
yfliuu/acrn-hypervisor
|
6289124e7c894323e2a5342bf201856d76512a60
|
[
"BSD-3-Clause"
] | null | null | null |
misc/acrn-config/launch_config/launch_cfg_gen.py
|
yfliuu/acrn-hypervisor
|
6289124e7c894323e2a5342bf201856d76512a60
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2019 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
from launch_item import AvailablePthru, PthruSelected, VirtioDeviceSelect, AcrnDmArgs
import launch_cfg_lib
import com
ACRN_PATH = launch_cfg_lib.SOURCE_ROOT_DIR
XML_PATH = ACRN_PATH + '/misc/acrn-config/xmls/config-xmls/'
def get_launch_item_values(board_info):
"""
Get items which capable multi select for user
:param board_info: it is a file what contains board information for script to read from
"""
launch_item_values = {}
# passthrough devices
pthru = AvailablePthru(board_info)
pthru.get_pci_dev()
pthru.insert_nun()
# pre passthrough device for ui
launch_item_values["uos,passthrough_devices,usb_xdci"] = pthru.avl["usb_xdci"]
launch_item_values["uos,passthrough_devices,ipu"] = pthru.avl["ipu"]
launch_item_values["uos,passthrough_devices,ipu_i2c"] = pthru.avl["ipu_i2c"]
launch_item_values["uos,passthrough_devices,cse"] = pthru.avl["cse"]
launch_item_values["uos,passthrough_devices,audio"] = pthru.avl["audio"]
launch_item_values["uos,passthrough_devices,audio_codec"] = pthru.avl["audio_codec"]
launch_item_values["uos,passthrough_devices,sd_card"] = pthru.avl["sd_card"]
launch_item_values["uos,passthrough_devices,wifi"] = pthru.avl["wifi"]
launch_item_values["uos,passthrough_devices,ethernet"] = pthru.avl["ethernet"]
launch_item_values["uos,passthrough_devices,sata"] = pthru.avl["sata"]
launch_item_values["uos,passthrough_devices,nvme"] = pthru.avl["nvme"]
launch_item_values["uos,passthrough_devices,bluetooth"] = pthru.avl["bluetooth"]
# acrn dm available optargs
launch_item_values['uos,uos_type'] = launch_cfg_lib.UOS_TYPES
launch_item_values["uos,rtos_type"] = launch_cfg_lib.RTOS_TYPE
launch_item_values["uos,vbootloader"] = launch_cfg_lib.BOOT_TYPE
launch_item_values['uos,vuart0'] = launch_cfg_lib.DM_VUART0
launch_item_values['uos,poweroff_channel'] = launch_cfg_lib.PM_CHANNEL
return launch_item_values
def validate_launch_setting(board_info, scenario_info, launch_info):
"""
This is validate the data setting from scenario xml
:param board_info: it is a file what contains board information for script to read from
:param scenario_info: it is a file what user have already setting to
:return: return a dictionary contain errors
"""
launch_cfg_lib.ERR_LIST = {}
launch_cfg_lib.BOARD_INFO_FILE = board_info
launch_cfg_lib.SCENARIO_INFO_FILE = scenario_info
launch_cfg_lib.LAUNCH_INFO_FILE = launch_info
# init available pt devices and get selected pt devices
pt_avl = AvailablePthru(board_info)
pt_sel = PthruSelected(launch_info, pt_avl.bdf_desc_map, pt_avl.bdf_vpid_map)
pt_sel.get_bdf()
pt_sel.get_vpid()
pt_sel.get_slot()
pt_sel.check_item()
# virt-io devices
virtio = VirtioDeviceSelect(launch_info)
virtio.get_virtio()
# acrn dm arguments
dm = AcrnDmArgs(board_info, scenario_info, launch_info)
dm.get_args()
dm.check_item()
return (launch_cfg_lib.ERR_LIST, pt_sel, virtio, dm)
def ui_entry_api(board_info, scenario_info, launch_info, enable_commit=False):
err_dic = {}
git_env_check = False
arg_list = ['board_cfg_gen.py', '--board', board_info, '--scenario', scenario_info, '--launch', launch_info, '--uosid', '0']
if enable_commit:
arg_list.append('--enable_commit')
git_env_check = True
err_dic = launch_cfg_lib.prepare(git_env_check)
if err_dic:
return err_dic
err_dic = main(arg_list)
return err_dic
def get_names():
names = {}
# get uos name
uos_types = launch_cfg_lib.get_uos_type()
names['uos_types'] = uos_types
# get board name
(err_dic, board_name) = launch_cfg_lib.get_board_name()
if err_dic:
return (err_dic, names)
names['board_name'] = board_name
# get scenario name
(err_dic, scenario_name) = launch_cfg_lib.get_scenario_name()
if err_dic:
return (err_dic, names)
names['scenario_name'] = scenario_name
return (err_dic, names)
def generate_script_file(names, pt_sel, virt_io, dm, vmid, config):
uos_type = names['uos_types'][vmid]
board_name = names['board_name']
scenario_name = names['scenario_name']
header_info = "#!/bin/bash\n" +\
"# board: {}, scenario: {}, uos: {}".format(
board_name.upper(), scenario_name.upper(), uos_type.upper())
print("{}".format(header_info), file=config)
com.gen(names, pt_sel, virt_io, dm, vmid, config)
if launch_cfg_lib.ERR_LIST:
return launch_cfg_lib.ERR_LIST
def main(args):
"""
This is main function to start generate launch script
:param args: it is a command line args for the script
"""
config_srcs = []
# get parameters
(err_dic, board_info_file, scenario_info_file, launch_info_file, vm_th, enable_commit) = launch_cfg_lib.get_param(args)
if err_dic:
return err_dic
# check env
err_dic = launch_cfg_lib.prepare(enable_commit)
if err_dic:
return err_dic
# vm_th =[0..post_vm_max]
# 0: generate all launch script for all post vm launch script
# 1: generate launch script for 1st post vm launch script
# 2: generate launch script for 2nd post vm launch script
launch_cfg_lib.BOARD_INFO_FILE = board_info_file
launch_cfg_lib.SCENARIO_INFO_FILE = scenario_info_file
launch_cfg_lib.LAUNCH_INFO_FILE = launch_info_file
# get post vm dic
post_num_list = launch_cfg_lib.get_post_num_list()
# get toatl post vm number and total vm in launch config file
(launch_vm_count, post_vm_count) = launch_cfg_lib.get_post_vm_cnt()
if vm_th < 0 or vm_th > post_vm_count:
err_dic['uosid err:'] = "--uosid shoudl be positive and less than total post vm count in scenario"
if vm_th and vm_th not in post_num_list:
err_dic['uosid err:'] = "--uosid generate the {} post vm, but this vm's config not in launch xml".format(vm_th)
if launch_vm_count > post_vm_count:
err_dic['xm config err:'] = "too many vms config than scenario"
for post_num in post_num_list:
if post_num > post_vm_count:
err_dic['xm config err:'] = "launch xml uos id config is bigger than scenario post vm count"
if err_dic:
return err_dic
# validate launch config file
(err_dic, pt_sel, virt_io, dm) = validate_launch_setting(board_info_file, scenario_info_file, launch_info_file)
if err_dic:
return err_dic
# check if this is the scenario config which matched board info
(err_dic, status) = launch_cfg_lib.is_config_file_match()
if not status:
return err_dic
(err_dic, names) = get_names()
if err_dic:
return err_dic
# create output directory
board_name = names['board_name']
output = XML_PATH + '/' + board_name + '/output/'
if not os.path.exists(output):
os.makedirs(output)
# generate launch script
if vm_th:
script_name = "launch_uos_id{}.sh".format(vm_th)
commit_msg = script_name
launch_script_file = output + script_name
config_srcs.append(launch_script_file)
with open(launch_script_file, mode = 'w', newline=None, encoding='utf-8') as config:
err_dic = generate_script_file(names, pt_sel, virt_io.dev, dm.args, vm_th, config)
if err_dic:
return err_dic
else:
for post_vm_i in post_num_list:
script_name = "launch_uos_id{}.sh".format(post_vm_i)
launch_script_file = output + script_name
config_srcs.append(launch_script_file)
with open(launch_script_file, mode = 'w', newline='\n', encoding='utf-8') as config:
err_dic = generate_script_file(names, pt_sel, virt_io.dev, dm.args, post_vm_i, config)
if err_dic:
return err_dic
commit_msg = "launch_uos_id{}.sh".format(launch_vm_count)
config_str = 'Config files'
gen_str = 'generated'
# move changes to patch, and apply to the source code
if enable_commit:
err_dic = launch_cfg_lib.gen_patch(config_srcs, commit_msg)
config_str = 'Config patch'
gen_str = 'committed'
if not err_dic:
print("{} for {} is {} successfully!".format(config_str, commit_msg, gen_str))
else:
print("{} for {} is failed".format(config_str, commit_msg))
return err_dic
if __name__ == '__main__':
ARGS = sys.argv
err_dic = main(ARGS)
if err_dic:
for err_k, err_v in err_dic.items():
launch_cfg_lib.print_red("{}: {}".format(err_k, err_v), err=True)
| 34.98419
| 128
| 0.694837
|
d3f5133cd5d51e549b2a1fefc2bb55ca00c84b2e
| 7,597
|
py
|
Python
|
sdk/avalon_sdk/ethereum/ethereum_work_order.py
|
manojsalunke85/avalon
|
99fc49ac215ac3dfcfb0547f8abebc0b131dfad1
|
[
"Apache-2.0"
] | null | null | null |
sdk/avalon_sdk/ethereum/ethereum_work_order.py
|
manojsalunke85/avalon
|
99fc49ac215ac3dfcfb0547f8abebc0b131dfad1
|
[
"Apache-2.0"
] | null | null | null |
sdk/avalon_sdk/ethereum/ethereum_work_order.py
|
manojsalunke85/avalon
|
99fc49ac215ac3dfcfb0547f8abebc0b131dfad1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import logging
import json
from os import environ
from utility.hex_utils import is_valid_hex_str
from avalon_sdk.worker.worker_details import WorkerStatus, WorkerType
from avalon_sdk.ethereum.ethereum_wrapper import EthereumWrapper
from avalon_sdk.interfaces.work_order_proxy \
import WorkOrderProxy
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO)
# Return codes
SUCCESS = 0
ERROR = 1
class EthereumWorkOrderProxyImpl(WorkOrderProxy):
"""
This class is meant to write work-order related data to Ethereum
blockchain. Detailed method description is available in the interface
"""
def __init__(self, config):
if self.__validate(config) is True:
self.__initialize(config)
else:
raise Exception("Invalid configuration parameter")
def __validate(self, config):
"""
Validates config parameters for existence.
Returns false if validation fails and true if it succeeds
"""
if config["ethereum"]["proxy_work_order_contract_file"] is None:
logging.error("Missing work order contract file path!!")
return False
if config["ethereum"]["proxy_work_order_contract_address"] is None:
logging.error("Missing work order contract address!!")
return False
return True
def __initialize(self, config):
"""
Initialize the parameters from config to instance variables.
"""
self.__eth_client = EthereumWrapper(config)
tcf_home = environ.get("TCF_HOME", "../../../")
contract_file_name = tcf_home + "/" + \
config["ethereum"]["proxy_work_order_contract_file"]
contract_address = \
config["ethereum"]["proxy_work_order_contract_address"]
self.__contract_instance = self.__eth_client.get_contract_instance(
contract_file_name, contract_address
)
def _is_valid_work_order_json(self, work_order_id, worker_id, requester_id,
work_order_request):
"""
Validate following fields in JSON request against the ones
provided outside the JSON - workOrderId, workerId, requesterId
"""
json_request = json.load(work_order_request)
if (work_order_id == json_request.get("workOrderId")
and worker_id == json_request.get("workerId")
and requester_id == json_request.get("requesterId")):
return True
else:
return False
def work_order_submit(self, work_order_id, worker_id, requester_id,
work_order_request, id=None):
"""
Submit work order request
work_order_id is a unique id to identify the work order request
worker_id is the identifier for the worker
requester_id is a unique id to identify the requester
work_order_request is a json string(Complete definition at
work_order.py interface file)
Returns
An error code, 0 - success, otherwise an error.
"""
if (self.__contract_instance is not None):
if not is_valid_hex_str(
binascii.hexlify(work_order_id).decode("utf8")):
logging.error("Invalid work order id {}".format(work_order_id))
return ERROR
if not is_valid_hex_str(
binascii.hexlify(worker_id).decode("utf8")):
logging.error("Invalid worker id {}".format(worker_id))
return ERROR
if not is_valid_hex_str(
binascii.hexlify(requester_id).decode("utf8")):
logging.error("Invalid requester id {}".format(requester_id))
return ERROR
if not _is_valid_work_order_json(work_order_id, worker_id,
requester_id, work_order_request):
logging.error("Invalid request string {}"
.format(work_order_request))
return ERROR
txn_dict = self.__contract_instance.functions.workOrderSubmit(
work_order_id, worker_id, requester_id, work_order_request
).buildTransaction(
self.__eth_client.get_transaction_params()
)
try:
txn_receipt = self.__eth_client.execute_transaction(txn_dict)
return SUCCESS
except Execption as e:
logging.error(
"execption occured when trying to execute workOrderSubmit \
transaction on chain"+str(e))
return ERROR
else:
logging.error(
"work order contract instance is not initialized")
return ERROR
def work_order_complete(self, work_order_id, work_order_response):
"""
This function is called by the Worker Service to
complete a Work Order successfully or in error.
This API is for proxy model.
params
work_order_id is unique id to identify the work order request
work_order_response is the Work Order response data in string
Returns
An error code, 0 - success, otherwise an error.
"""
if (self.__contract_instance is not None):
if not is_valid_hex_str(
binascii.hexlify(work_order_id).decode("utf8")):
logging.error("Invalid work order id {}".format(work_order_id))
return ERROR
txn_dict = self.__contract_instance.functions.workOrderComplete(
work_order_id, work_order_response).buildTransaction(
self.__eth_client.get_transaction_params()
)
try:
txn_receipt = self.__eth_client.execute_transaction(txn_dict)
return SUCCESS
except Execption as e:
logging.error(
"execption occured when trying to execute \
workOrderComplete transaction on chain"+str(e))
return ERROR
else:
logging.error(
"work order contract instance is not initialized")
return ERROR
def encryption_key_retrieve(self, worker_id, last_used_key_nonce, tag,
requester_id, signature_nonce=None,
signature=None, id=None):
"""
Get Encryption Key Request Payload
"""
pass
def encryption_key_start(self, tag, id=None):
"""
Function to inform the Worker that it should start
encryption key generation for this requester.
"""
pass
def encryption_key_set(self, worker_id, encryption_key,
encryption_nonce, tag, signature, id=None):
"""
Set Encryption Key Request Payload
"""
pass
| 38.958974
| 79
| 0.618139
|
faf6ae3f13cb5e77b5e7460191445a9023fdc5a7
| 8,613
|
py
|
Python
|
chaser/chaser.py
|
ccr-tools/chaser
|
75657f765c62ce98f80882d56a65417e427fd73c
|
[
"BSD-3-Clause"
] | 2
|
2016-02-19T21:58:24.000Z
|
2016-07-10T12:06:44.000Z
|
chaser/chaser.py
|
ccr-tools/chaser
|
75657f765c62ce98f80882d56a65417e427fd73c
|
[
"BSD-3-Clause"
] | 10
|
2016-02-19T21:57:44.000Z
|
2019-03-05T23:37:29.000Z
|
chaser/chaser.py
|
ccr-tools/chaser
|
75657f765c62ce98f80882d56a65417e427fd73c
|
[
"BSD-3-Clause"
] | 1
|
2019-05-11T12:02:21.000Z
|
2019-05-11T12:02:21.000Z
|
import io
import tarfile
import subprocess
import json
import re
import os
import termcolor
import progressbar
from pkg_resources import parse_version
import requests
from toposort import toposort_flatten
import ccr
from chaser import pacman, prompt
BUILD_DIR = "/tmp/chaser"
def get_source_files(args, workingdir=None):
"""Download the source tarball and extract it, workingdir defaults to BUILD_DIR"""
try:
pkgnames = args.package
workingdir = args.build_dir or BUILD_DIR
except AttributeError:
pkgnames = args
workingdir = workingdir or BUILD_DIR
if os.getuid() == 0:
print('Downloading sources as root is not allowed. Exiting.')
return
if not os.path.exists(workingdir):
os.mkdir(workingdir)
for pkgname in pkgnames:
print('Downloading %s' % pkgname)
r = requests.get(ccr.pkg_url(pkgname))
r.raise_for_status()
try:
tar = tarfile.open(mode='r', fileobj=io.BytesIO(r.content))
tar.extractall(workingdir)
except tarfile.ReadError as re:
print('Unable to open the tar file. Either the package does not '
'exist in CCR or it is malformed: %s' % str(re))
def recurse_depends(pkgname, workingdir=None, graph=None):
"""Build a dependency graph"""
if workingdir is None:
workingdir = BUILD_DIR
if graph is None:
graph = {}
if graph.get(pkgname) is not None:
# End case: already traversed
return graph
elif pacman.exists(pkgname):
# End case: exists in pacman
graph[pkgname] = set()
return graph
# Otherwise get dependencies
graph[pkgname] = set()
try:
get_source_files([pkgname], workingdir)
except requests.exceptions.HTTPError:
# Package not found, or other error
return graph
output = subprocess.check_output(["pkgvars.sh",
"{d}/{pkgname}/PKGBUILD".format(d=workingdir, pkgname=pkgname)])
data = json.loads(output.decode())['variables']
# NOTE: We don't differentiate make/depends here, this is an area for
# improvement in the future if someone cares.
depends = data.get('makedepends', []) + data.get('depends', [])
# Only depends that are not already installed
for dep in depends:
depname = re.split('[>=<]', dep)[0]
if not pacman.exists(depname) and not pacman.is_installed(depname):
graph[pkgname].add(depname)
for dep in graph[pkgname]:
recurse_depends(dep, workingdir, graph)
return graph
def dependency_chain(pkgname, workingdir=None):
"""Return an ordered list of dependencies for a package"""
depends = recurse_depends(pkgname, workingdir)
return toposort_flatten(depends)
def print_targets(packages):
"""Formatted print"""
print()
print(termcolor.colored(_("Targets"), attrs=['bold']) + \
termcolor.colored(" ({num}) ".format(num=len(packages)), attrs=['bold']) + \
"{packages}".format(num=len(packages), packages=' '.join(['-'.join(p) for p in packages if type(p) == tuple] or packages)))
print()
def install(args):
"""Install a given package"""
try:
pkgnames = args.package
workingdir = args.build_dir or BUILD_DIR
except AttributeError:
pkgnames = args
workingdir = BUILD_DIR
if os.getuid() == 0:
print('Installations as root are not allowed. Exiting.')
return
print(_("resolving dependencies..."))
editor = os.getenv('EDITOR') or 'vim'
# Check packages, to fail early
packages = []
for pkgname in pkgnames:
try:
# Make sure the package exists
ccr.info(pkgname)
except ccr.PackageNotFound:
print(_("Package not found: {pkg}").format(pkg=pkgname))
return 1
packages += dependency_chain(pkgname, workingdir)
# Check dependencies
for pkgname in packages:
try:
# Make sure the package exists
ccr.info(pkgname)
except ccr.PackageNotFound:
print(_("Package not found: {pkg}").format(pkg=pkgname))
return 1
print_targets(packages)
response = prompt.prompt(_("Proceed with installation?"), major=True)
if response == prompt.NO:
return 0
for package in packages:
try:
get_source_files([package], workingdir)
except (requests.exceptions.HTTPError, tarfile.ReadError):
print(_("Package not found: {pkg}").format(pkg=package))
return 1
# Ask to edit the PKGBUILD
response = prompt.prompt(_("Edit {pkg} PKGBUILD with $EDITOR?").format(pkg=package), color='yellow')
if response == prompt.YES:
subprocess.call([editor, "{d}/{pkg}/PKGBUILD".format(d=workingdir, pkg=package)])
# Ask to edit the .install, if it exists
if os.path.isfile("{d}/{pkg}/{pkg}.install".format(d=workingdir, pkg=package)):
response = prompt.prompt(_("Edit {pkg}.install with $EDITOR?").format(pkg=package), color='yellow')
if response == prompt.YES:
subprocess.call([editor, "{d}/{pkg}/{pkg}.install".format(d=workingdir, pkg=package)])
# makepkg
curdir = os.getcwd()
os.chdir(os.path.join(workingdir, package))
try:
subprocess.check_call(["makepkg", "-rsi"])
except subprocess.CalledProcessError:
return 1
os.chdir(curdir)
def check_updates(args=None):
"""Return list of (name, ver) tuples for packages with updates available"""
installed = pacman.list_unofficial()
updates = []
with progressbar.ProgressBar(max_value=len(installed)) as bar:
for i, pkg in enumerate(installed):
pkgname, curver = pkg
try:
data = ccr.info(pkgname)
except ccr.PackageNotFound:
continue
newver = data.get('Version', '0-0')
if parse_version(newver) > parse_version(curver):
updates.append((pkgname, newver))
bar.update(i)
return updates
def list_updates(args=None):
"""List currently installed unofficial packages in `name ver` format"""
for name, ver in check_updates():
print(name, ver)
def update(args):
"""Install updates"""
if os.getuid() == 0:
print('Updates as root are not allowed. Exiting.')
return
print(termcolor.colored(":: ", 'blue', attrs=['bold']) + \
termcolor.colored(_("Checking for updates..."), attrs=['bold']))
updates = check_updates()
if updates:
return install(p for p, v in updates)
else:
print(_("there is nothing to do"))
return 0
def search(args):
"""Print search results"""
try:
query = args.query
except AttributeError:
query = args
repo_results = pacman.search(query)
if repo_results:
for line in repo_results:
print(line)
results = ccr.search(query)
if results == "No results found":
return
results.sort(key=lambda x: x.Name)
for pkg in results:
print(''.join([
termcolor.colored("ccr/", color='magenta', attrs=['bold']),
termcolor.colored(pkg.Name, attrs=['bold']), ' ',
termcolor.colored(pkg.Version, color='green', attrs=['bold'])]))
print(" {desc}".format(desc=pkg.Description))
def info(args):
"""Print package info"""
try:
package = args.package
except AttributeError:
package = args
try:
results = ccr.info(package)
except ccr.PackageNotFound:
print("Package not found")
return 1
print(''.join([
termcolor.colored(_("Name : "), attrs=['bold']), results.Name, '\n',
termcolor.colored(_("Version : "), attrs=['bold']), results.Version, '\n',
termcolor.colored(_("URL : "), attrs=['bold']), results.URL, '\n',
termcolor.colored(_("Licenses : "), attrs=['bold']), results.License, '\n',
termcolor.colored(_("Category : "), attrs=['bold']), results.Category, '\n',
termcolor.colored(_("Votes : "), attrs=['bold']), str(results.NumVotes), '\n',
termcolor.colored(_("Maintainer : "), attrs=['bold']), results.Maintainer, '\n',
termcolor.colored(_("OutOfDate : "), attrs=['bold']), "{val}".format(val=True if results.OutOfDate == '1' else False), '\n',
termcolor.colored(_("Description : "), attrs=['bold']), results.Description,
]))
| 34.178571
| 137
| 0.607918
|
507fff9e330aee73d66dd446bb7170d7aacd5066
| 1,031
|
py
|
Python
|
bandstour_geojson_export/testcreate_geojson_polygon_from_viz_json.py
|
ungentilgarcon/cesiumviz
|
dd4c2bccde05e4f1272248cc0cedc075cdbeed96
|
[
"Apache-2.0"
] | null | null | null |
bandstour_geojson_export/testcreate_geojson_polygon_from_viz_json.py
|
ungentilgarcon/cesiumviz
|
dd4c2bccde05e4f1272248cc0cedc075cdbeed96
|
[
"Apache-2.0"
] | null | null | null |
bandstour_geojson_export/testcreate_geojson_polygon_from_viz_json.py
|
ungentilgarcon/cesiumviz
|
dd4c2bccde05e4f1272248cc0cedc075cdbeed96
|
[
"Apache-2.0"
] | null | null | null |
# -- coding: utf-8 --
import json
import os
from geojson import Feature, Point, FeatureCollection, Polygon
'''//ouvrir fichier points de viz, utiliser geojson pour en faire un geojson
import json'''
with open('exportbands_all_venues-1000.geojson','a+') as dataout_file:
f = open('exportbands_all_venues-1000.json');
data = json.loads(f.read())
f.close()
for row in data:
my_poly = Polygon([[(row['longitude']-float(row['count'])/1000,
row['latitude']+float(row['count'])/1000),(row['longitude']-float(row['count'])/1000,
row['latitude']-float(row['count'])/1000),(row['longitude']+float(row['count'])/1000,
row['latitude']-float(row['count'])/1000),(row['longitude']+float(row['count'])/1000,
row['latitude']+float(row['count'])/1000),(row['longitude']-float(row['count'])/1000,
row['latitude']+float(row['count'])/1000)]])
geojs = Feature(geometry=my_poly,properties={"type": "Salle"})
print my_poly
print geojs
geocoll = FeatureCollection([geojs])
json.dump (geocoll,dataout_file)
| 36.821429
| 91
| 0.678952
|
71be640577dcd40d1ab64c4a3428a084dedb2c32
| 1,511
|
py
|
Python
|
model/yolo4.py
|
inacmor/mobiledets-yolov4-pytorch
|
db285fa061e997032a77fa1ee8c954f2eba2a84d
|
[
"MIT"
] | 1
|
2021-11-04T07:33:56.000Z
|
2021-11-04T07:33:56.000Z
|
model/yolo4.py
|
inacmor/mobiledets-yolov4-pytorch
|
db285fa061e997032a77fa1ee8c954f2eba2a84d
|
[
"MIT"
] | null | null | null |
model/yolo4.py
|
inacmor/mobiledets-yolov4-pytorch
|
db285fa061e997032a77fa1ee8c954f2eba2a84d
|
[
"MIT"
] | null | null | null |
#encoding=utf-8
'''
@Time : 2020/11/30 08:30
@Author : Inacmor
@File : yolo4.py
@Noice :
@Modificattion :
@Author :
@Time :
@Detail :
'''
import torch.nn as nn
import torch
from model.net.head import YOLO_head
from model.net.body import YOLOBODY
import time
class YOLO4(nn.Module):
def __init__(self,
batch_size,
num_classes,
num_bbparas,
anchors,
stride,
freeze=False,
inference=False):
super(YOLO4, self).__init__()
self.inference = inference
self.yolobody = YOLOBODY(in_channels=256, anchors=len(anchors) * 3, num_bbparas=num_bbparas, num_classes=num_classes, freeze=freeze)
self.yolo_1 = YOLO_head(batch_size, num_classes, anchors[2], stride[2], num_bbparas, inference)
self.yolo_2 = YOLO_head(batch_size, num_classes, anchors[1], stride[1], num_bbparas, inference)
self.yolo_3 = YOLO_head(batch_size, num_classes, anchors[0], stride[0], num_bbparas, inference)
def forward(self, input):
output = self.yolobody(input)
feat1, yolo_1 = self.yolo_1(output[0])
feat2, yolo_2 = self.yolo_2(output[1])
feat3, yolo_3 = self.yolo_3(output[2])
if self.inference:
return [feat1, feat2, feat3], torch.cat([yolo_1, yolo_2, yolo_3], dim=0)
else:
return [feat1, feat2, feat3], [yolo_1, yolo_2, yolo_3]
| 26.508772
| 140
| 0.595632
|
23f3c59ded1ced6435bd7b444b3e44a9d2205e9b
| 17,847
|
py
|
Python
|
tests/postgres_tests/test_aggregates.py
|
robgolding/django
|
1d0bab0bfd77edcf1228d45bf654457a8ff1890d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 13
|
2015-04-17T02:12:15.000Z
|
2021-12-08T19:45:36.000Z
|
tests/postgres_tests/test_aggregates.py
|
robgolding/django
|
1d0bab0bfd77edcf1228d45bf654457a8ff1890d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 6
|
2019-08-06T06:45:19.000Z
|
2022-01-27T14:18:08.000Z
|
tests/postgres_tests/test_aggregates.py
|
robgolding/django
|
1d0bab0bfd77edcf1228d45bf654457a8ff1890d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 3
|
2019-09-26T17:58:34.000Z
|
2022-01-26T15:32:14.000Z
|
import json
from django.db.models.expressions import F, Value
from django.db.models.functions import Concat, Substr
from django.test.utils import Approximate
from . import PostgreSQLTestCase
from .models import AggregateTestModel, StatTestModel
try:
from django.contrib.postgres.aggregates import (
ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, JSONBAgg,
RegrAvgX, RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope,
RegrSXX, RegrSXY, RegrSYY, StatAggregate, StringAgg,
)
except ImportError:
pass # psycopg2 is not installed
class TestGeneralAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo1', integer_field=0)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo2', integer_field=1)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo4', integer_field=2)
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo3', integer_field=0)
def test_array_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo4', 'Foo3']})
def test_array_agg_charfield_ordering(self):
ordering_test_cases = (
(F('char_field').desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
(F('char_field').asc(), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
(F('char_field'), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
([F('boolean_field'), F('char_field').desc()], ['Foo4', 'Foo2', 'Foo3', 'Foo1']),
((F('boolean_field'), F('char_field').desc()), ['Foo4', 'Foo2', 'Foo3', 'Foo1']),
('char_field', ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
('-char_field', ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
(Concat('char_field', Value('@')), ['Foo1', 'Foo2', 'Foo3', 'Foo4']),
(Concat('char_field', Value('@')).desc(), ['Foo4', 'Foo3', 'Foo2', 'Foo1']),
(
(Substr('char_field', 1, 1), F('integer_field'), Substr('char_field', 4, 1).desc()),
['Foo3', 'Foo1', 'Foo2', 'Foo4'],
),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('char_field', ordering=ordering)
)
self.assertEqual(values, {'arrayagg': expected_output})
def test_array_agg_integerfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]})
def test_array_agg_integerfield_ordering(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('integer_field', ordering=F('integer_field').desc())
)
self.assertEqual(values, {'arrayagg': [2, 1, 0, 0]})
def test_array_agg_booleanfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': [True, False, False, True]})
def test_array_agg_booleanfield_ordering(self):
ordering_test_cases = (
(F('boolean_field').asc(), [False, False, True, True]),
(F('boolean_field').desc(), [True, True, False, False]),
(F('boolean_field'), [False, False, True, True]),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('boolean_field', ordering=ordering)
)
self.assertEqual(values, {'arrayagg': expected_output})
def test_array_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': []})
def test_array_agg_lookups(self):
aggr1 = AggregateTestModel.objects.create()
aggr2 = AggregateTestModel.objects.create()
StatTestModel.objects.create(related_field=aggr1, int1=1, int2=0)
StatTestModel.objects.create(related_field=aggr1, int1=2, int2=0)
StatTestModel.objects.create(related_field=aggr2, int1=3, int2=0)
StatTestModel.objects.create(related_field=aggr2, int1=4, int2=0)
qs = StatTestModel.objects.values('related_field').annotate(
array=ArrayAgg('int1')
).filter(array__overlap=[2]).values_list('array', flat=True)
self.assertCountEqual(qs.get(), [1, 2])
def test_bit_and_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 1})
def test_bit_and_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': None})
def test_bit_or_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 0})
def test_bit_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': None})
def test_bool_and_general(self):
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': False})
def test_bool_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': None})
def test_bool_or_general(self):
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': True})
def test_bool_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': None})
def test_string_agg_requires_delimiter(self):
with self.assertRaises(TypeError):
AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field'))
def test_string_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo4;Foo3'})
def test_string_agg_charfield_ordering(self):
ordering_test_cases = (
(F('char_field').desc(), 'Foo4;Foo3;Foo2;Foo1'),
(F('char_field').asc(), 'Foo1;Foo2;Foo3;Foo4'),
(F('char_field'), 'Foo1;Foo2;Foo3;Foo4'),
('char_field', 'Foo1;Foo2;Foo3;Foo4'),
('-char_field', 'Foo4;Foo3;Foo2;Foo1'),
(Concat('char_field', Value('@')), 'Foo1;Foo2;Foo3;Foo4'),
(Concat('char_field', Value('@')).desc(), 'Foo4;Foo3;Foo2;Foo1'),
)
for ordering, expected_output in ordering_test_cases:
with self.subTest(ordering=ordering, expected_output=expected_output):
values = AggregateTestModel.objects.aggregate(
stringagg=StringAgg('char_field', delimiter=';', ordering=ordering)
)
self.assertEqual(values, {'stringagg': expected_output})
def test_string_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': ''})
def test_orderable_agg_alternative_fields(self):
values = AggregateTestModel.objects.aggregate(
arrayagg=ArrayAgg('integer_field', ordering=F('char_field').asc())
)
self.assertEqual(values, {'arrayagg': [0, 1, 0, 2]})
def test_json_agg(self):
values = AggregateTestModel.objects.aggregate(jsonagg=JSONBAgg('char_field'))
self.assertEqual(values, {'jsonagg': ['Foo1', 'Foo2', 'Foo4', 'Foo3']})
def test_json_agg_empty(self):
values = AggregateTestModel.objects.none().aggregate(jsonagg=JSONBAgg('integer_field'))
self.assertEqual(values, json.loads('{"jsonagg": []}'))
class TestAggregateDistinct(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(char_field='Foo')
AggregateTestModel.objects.create(char_field='Foo')
AggregateTestModel.objects.create(char_field='Bar')
def test_string_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=False))
self.assertEqual(values['stringagg'].count('Foo'), 2)
self.assertEqual(values['stringagg'].count('Bar'), 1)
def test_string_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=True))
self.assertEqual(values['stringagg'].count('Foo'), 1)
self.assertEqual(values['stringagg'].count('Bar'), 1)
def test_array_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=False))
self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo', 'Foo'])
def test_array_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=True))
self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo'])
class TestStatisticsAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
StatTestModel.objects.create(
int1=1,
int2=3,
related_field=AggregateTestModel.objects.create(integer_field=0),
)
StatTestModel.objects.create(
int1=2,
int2=2,
related_field=AggregateTestModel.objects.create(integer_field=1),
)
StatTestModel.objects.create(
int1=3,
int2=1,
related_field=AggregateTestModel.objects.create(integer_field=2),
)
# Tests for base class (StatAggregate)
def test_missing_arguments_raises_exception(self):
with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'):
StatAggregate(x=None, y=None)
def test_correct_source_expressions(self):
func = StatAggregate(x='test', y=13)
self.assertIsInstance(func.source_expressions[0], Value)
self.assertIsInstance(func.source_expressions[1], F)
def test_alias_is_required(self):
class SomeFunc(StatAggregate):
function = 'TEST'
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1'))
# Test aggregates
def test_corr_general(self):
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': -1.0})
def test_corr_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': None})
def test_covar_pop_general(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)})
def test_covar_pop_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': None})
def test_covar_pop_sample(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': -1.0})
def test_covar_pop_sample_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': None})
def test_regr_avgx_general(self):
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': 2.0})
def test_regr_avgx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': None})
def test_regr_avgy_general(self):
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': 2.0})
def test_regr_avgy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': None})
def test_regr_count_general(self):
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 3})
def test_regr_count_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 0})
def test_regr_intercept_general(self):
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': 4})
def test_regr_intercept_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': None})
def test_regr_r2_general(self):
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': 1})
def test_regr_r2_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': None})
def test_regr_slope_general(self):
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': -1})
def test_regr_slope_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': None})
def test_regr_sxx_general(self):
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': 2.0})
def test_regr_sxx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': None})
def test_regr_sxy_general(self):
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': -2.0})
def test_regr_sxy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': None})
def test_regr_syy_general(self):
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': 2.0})
def test_regr_syy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': None})
def test_regr_avgx_with_related_obj_and_number_as_argument(self):
"""
This is more complex test to check if JOIN on field and
number as argument works as expected.
"""
values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field'))
self.assertEqual(values, {'complex_regravgx': 1.0})
| 46.355844
| 119
| 0.668348
|
a13c5824c0efd94e84eb285124015363fc02f7af
| 9,083
|
py
|
Python
|
aix360/algorithms/rbm/boolean_rule_cg.py
|
gdequeiroz/AIX360
|
e0da163f1e8ced971b92b071856e581d666d2e75
|
[
"Apache-2.0"
] | null | null | null |
aix360/algorithms/rbm/boolean_rule_cg.py
|
gdequeiroz/AIX360
|
e0da163f1e8ced971b92b071856e581d666d2e75
|
[
"Apache-2.0"
] | null | null | null |
aix360/algorithms/rbm/boolean_rule_cg.py
|
gdequeiroz/AIX360
|
e0da163f1e8ced971b92b071856e581d666d2e75
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import cvxpy as cvx
from sklearn.metrics import confusion_matrix, accuracy_score
from .beam_search import beam_search, beam_search_K1
class BooleanRuleCG(object):
"""BooleanRuleCG is a directly interpretable supervised learning method
for binary classification that learns a Boolean rule in disjunctive
normal form (DNF) or conjunctive normal form (CNF) using column generation (CG).
"""
def __init__(self,
lambda0=0.001,
lambda1=0.001,
CNF=False,
iterMax=100,
K=10,
D=10,
B=5,
eps=1e-6,
solver='ECOS',
verbose=False,
silent=False):
"""
Args:
lambda0 (float, optional): Complexity - fixed cost of each clause
lambda1 (float, optional): Complexity - additional cost for each literal
CNF (bool, optional): CNF instead of DNF
iterMax (int, optional): Column generation - maximum number of iterations
K (int, optional): Column generation - maximum number of columns generated per iteration
D (int, optional): Column generation - maximum degree
B (int, optional): Column generation - beam search width
eps (float, optional): Numerical tolerance on comparisons
solver (str, optional): Linear programming - solver
verbose (bool, optional): Linear programming - verboseness
silent (bool, optional): Silence overall algorithm messages
"""
# Complexity parameters
self.lambda0 = lambda0 # fixed cost of each clause
self.lambda1 = lambda1 # additional cost per literal
# CNF instead of DNF
self.CNF = CNF
# Column generation parameters
self.iterMax = iterMax # maximum number of iterations
self.K = K # maximum number of columns generated per iteration
self.D = D # maximum degree
self.B = B # beam search width
# Numerical tolerance on comparisons
self.eps = eps
# Linear programming parameters
self.solver = solver # solver
self.verbose = verbose # verboseness
# Silence output
self.silent = silent
def fit(self, X, y):
"""Fit model to training data.
Args:
X (DataFrame): Binarized features with MultiIndex column labels
y (array): Binary-valued target variable
Returns:
BooleanRuleCG: Self
"""
if not self.silent:
print('Learning {} rule with complexity parameters lambda0={}, lambda1={}'\
.format('CNF' if self.CNF else 'DNF', self.lambda0, self.lambda1))
if self.CNF:
# Flip labels for CNF
y = 1 - y
# Positive (y = 1) and negative (y = 0) samples
P = np.where(y > 0.5)[0]
Z = np.where(y < 0.5)[0]
nP = len(P)
n = len(y)
# Initialize with empty and singleton conjunctions, i.e. X plus all-ones feature
# Feature indicator and conjunction matrices
z = pd.DataFrame(np.eye(X.shape[1], X.shape[1]+1, 1, dtype=int), index=X.columns)
A = np.hstack((np.ones((X.shape[0],1), dtype=int), X))
# Iteration counter
self.it = 0
# Formulate master LP
# Variables
w = cvx.Variable(A.shape[1], nonneg=True)
xi = cvx.Variable(nP, nonneg=True)
# Objective function (no penalty on empty conjunction)
cs = self.lambda0 + self.lambda1 * z.sum().values
cs[0] = 0
obj = cvx.Minimize(cvx.sum(xi) / n + cvx.sum(A[Z,:] * w) / n + cs * w)
# Constraints
constraints = [xi + A[P,:] * w >= 1]
# Solve problem
prob = cvx.Problem(obj, constraints)
prob.solve(solver=self.solver, verbose=self.verbose)
if not self.silent:
print('Initial LP solved')
# Extract dual variables
r = np.ones_like(y, dtype=float) / n
r[P] = -constraints[0].dual_value
# Beam search for conjunctions with negative reduced cost
# Most negative reduced cost among current variables
UB = np.dot(r, A) + cs
UB = min(UB.min(), 0)
v, zNew, Anew = beam_search(r, X, self.lambda0, self.lambda1,
K=self.K, UB=UB, D=self.D, B=self.B, eps=self.eps)
while (v < -self.eps).any() and (self.it < self.iterMax):
# Negative reduced costs found
self.it += 1
if not self.silent:
print('Iteration: {}, Objective: {:.4f}'.format(self.it, prob.value))
# Add to existing conjunctions
z = pd.concat([z, zNew], axis=1, ignore_index=True)
A = np.concatenate((A, Anew), axis=1)
# Reformulate master LP
# Variables
w = cvx.Variable(A.shape[1], nonneg=True)
# Objective function
cs = np.concatenate((cs, self.lambda0 + self.lambda1 * zNew.sum().values))
obj = cvx.Minimize(cvx.sum(xi) / n + cvx.sum(A[Z,:] * w) / n + cs * w)
# Constraints
constraints = [xi + A[P,:] * w >= 1]
# Solve problem
prob = cvx.Problem(obj, constraints)
prob.solve(solver=self.solver, verbose=self.verbose)
# Extract dual variables
r[P] = -constraints[0].dual_value
# Beam search for conjunctions with negative reduced cost
# Most negative reduced cost among current variables
UB = np.dot(r, A) + cs
UB = min(UB.min(), 0)
v, zNew, Anew = beam_search(r, X, self.lambda0, self.lambda1,
K=self.K, UB=UB, D=self.D, B=self.B, eps=self.eps)
# Save generated conjunctions and LP solution
self.z = z
self.wLP = w.value
r = np.full(nP, 1./n)
self.w = beam_search_K1(r, pd.DataFrame(1-A[P,:]), 0, A[Z,:].sum(axis=0) / n + cs,
UB=r.sum(), D=100, B=2*self.B, eps=self.eps, stopEarly=False)[1].values.ravel()
if len(self.w) == 0:
self.w = np.zeros_like(self.wLP, dtype=int)
def compute_conjunctions(self, X):
"""Compute conjunctions of features as specified in self.z.
Args:
X (DataFrame): Binarized features with MultiIndex column labels
Returns:
array: A -- Conjunction values
"""
try:
A = 1 - (np.dot(1 - X, self.z) > 0) # Changed matmul to dot, because failed on some machines
except AttributeError:
print("Attribute 'z' does not exist, please fit model first.")
return A
def predict(self, X):
"""Predict class labels.
Args:
X (DataFrame): Binarized features with MultiIndex column labels
Returns:
array: y -- Predicted labels
"""
# Compute conjunctions of features
A = self.compute_conjunctions(X)
# Predict labels
if self.CNF:
# Flip labels since model is actually a DNF for Y=0
return 1 - (np.dot(A, self.w) > 0)
else:
return (np.dot(A, self.w) > 0).astype(int)
def explain(self, maxConj=None, prec=2):
"""Return rules comprising the model.
Args:
maxConj (int, optional): Maximum number of conjunctions to show
prec (int, optional): Number of decimal places to show for floating-value thresholds
Returns:
Dictionary containing
* isCNF (bool): flag signaling whether model is CNF or DNF
* rules (list): selected conjunctions formatted as strings
"""
# Selected conjunctions
z = self.z.loc[:, self.w > 0.5]
truncate = (maxConj is not None) and (z.shape[1] > maxConj)
nConj = maxConj if truncate else z.shape[1]
"""
if self.CNF:
print('Predict Y=0 if ANY of the following rules are satisfied, otherwise Y=1:')
else:
print('Predict Y=1 if ANY of the following rules are satisfied, otherwise Y=0:')
"""
# Sort conjunctions by increasing order
idxSort = z.sum().sort_values().index[:nConj]
# Iterate over sorted conjunctions
conj = []
for i in idxSort:
# MultiIndex of features participating in rule i
idxFeat = z.index[z[i] > 0]
# String representations of features
strFeat = idxFeat.get_level_values(0) + ' ' + idxFeat.get_level_values(1)\
+ ' ' + idxFeat.get_level_values(2).to_series()\
.apply(lambda x: ('{:.' + str(prec) + 'f}').format(x) if type(x) is float else str(x))
# String representation of rule
strFeat = strFeat.str.cat(sep=' AND ')
conj.append(strFeat)
return {
'isCNF': self.CNF,
'rules': conj
}
| 39.150862
| 111
| 0.560828
|
2985e862a3191ddcf6431185046c11c79eb7fd1d
| 261
|
py
|
Python
|
Modulo 3/090.py
|
thiago19maciel/Exercicios-em-Python
|
0d46816caf655c6e870510bb1136964854fc875f
|
[
"MIT"
] | 1
|
2022-03-22T22:36:48.000Z
|
2022-03-22T22:36:48.000Z
|
Modulo 3/090.py
|
thiago19maciel/Exercicios-em-Python
|
0d46816caf655c6e870510bb1136964854fc875f
|
[
"MIT"
] | null | null | null |
Modulo 3/090.py
|
thiago19maciel/Exercicios-em-Python
|
0d46816caf655c6e870510bb1136964854fc875f
|
[
"MIT"
] | null | null | null |
dados_aluno = {'Nome': str(input('Nome do aluno: ')), 'Media': float(input('Média: '))}
if dados_aluno['Media'] > 7:
dados_aluno['situação'] = 'Aprovado'
else:
dados_aluno['situação'] = 'Reprovado'
print(f'Situação é igual a {dados_aluno["situação"]} ')
| 43.5
| 87
| 0.659004
|
ae2ee8603fc913332fb504c64f4cddda69f12f65
| 1,101
|
py
|
Python
|
Chapter08/Exercise8.01/bookr/bookr/urls.py
|
lmoshood/The-Django-Workshop
|
52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62
|
[
"MIT"
] | null | null | null |
Chapter08/Exercise8.01/bookr/bookr/urls.py
|
lmoshood/The-Django-Workshop
|
52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62
|
[
"MIT"
] | null | null | null |
Chapter08/Exercise8.01/bookr/bookr/urls.py
|
lmoshood/The-Django-Workshop
|
52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62
|
[
"MIT"
] | 1
|
2020-05-27T13:41:58.000Z
|
2020-05-27T13:41:58.000Z
|
"""bookr URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
import reviews.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', reviews.views.index),
path('book-search/', reviews.views.book_search, name='book_search'),
path('', include('reviews.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 34.40625
| 80
| 0.721163
|
03d29a173eab7d1b43e3c4e3fad45ef04a133cbd
| 2,105
|
py
|
Python
|
labler/db/rdmbs/dbman.py
|
thenetcircle/coop-labler
|
43798facfc5488e3c081b3401406a3fb30cb82e3
|
[
"MIT"
] | 1
|
2019-04-15T08:20:19.000Z
|
2019-04-15T08:20:19.000Z
|
labler/db/rdmbs/dbman.py
|
thenetcircle/coop-labler
|
43798facfc5488e3c081b3401406a3fb30cb82e3
|
[
"MIT"
] | 3
|
2021-09-08T00:57:18.000Z
|
2022-03-11T23:45:24.000Z
|
labler/db/rdmbs/dbman.py
|
thenetcircle/coop-labler
|
43798facfc5488e3c081b3401406a3fb30cb82e3
|
[
"MIT"
] | null | null | null |
from gnenv.environ import GNEnvironment
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from labler.config import ConfigKeys
# need to keep these here even if "unused", otherwise create_all(engine) won't find the models
from labler.db.rdmbs.models import *
class Database(object):
def __init__(self, env: GNEnvironment):
self.env = env
self.driver = self.env.config.get(ConfigKeys.DRIVER, domain=ConfigKeys.DATABASE, default='postgres+psycopg2')
self.engine = self.db_connect()
self.create_tables(self.engine)
session_factory = sessionmaker(bind=self.engine)
self.Session = scoped_session(session_factory)
def db_connect(self):
domain = ConfigKeys.DATABASE
params = {
'drivername': self.driver,
}
host = self.env.config.get(ConfigKeys.HOST, default=None, domain=domain)
port = self.env.config.get(ConfigKeys.PORT, default=None, domain=domain)
username = self.env.config.get(ConfigKeys.USER, default=None, domain=domain)
password = self.env.config.get(ConfigKeys.PASSWORD, default=None, domain=domain)
database = self.env.config.get(ConfigKeys.DB, default=None, domain=domain)
pool_size = self.env.config.get(ConfigKeys.POOL_SIZE, default=10, domain=domain)
if host is not None and host != '':
params['host'] = host
if port is not None and port != '':
params['port'] = port
if username is not None and username != '':
params['username'] = username
if password is not None and password != '':
params['password'] = password
if database is not None and database != '':
params['database'] = database
return create_engine(URL(**params), pool_recycle=280, pool_size=pool_size)
def truncate(self):
DeclarativeBase.metadata.drop_all(self.engine)
def create_tables(self, engine):
DeclarativeBase.metadata.create_all(engine)
| 39.716981
| 117
| 0.678385
|
22b84df8530466e1873e7446e1034d2e0f020d10
| 21,553
|
py
|
Python
|
code_python/1.3/corner-extract.py
|
pinxau1000/Computer-Vision
|
c1d8da241dfe39467aaa65777c725fdebffe35cf
|
[
"MIT"
] | null | null | null |
code_python/1.3/corner-extract.py
|
pinxau1000/Computer-Vision
|
c1d8da241dfe39467aaa65777c725fdebffe35cf
|
[
"MIT"
] | null | null | null |
code_python/1.3/corner-extract.py
|
pinxau1000/Computer-Vision
|
c1d8da241dfe39467aaa65777c725fdebffe35cf
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# IMPORTS
# ------------------------------------------------------------------------------
from sys import version_info
from sys import path as syspath
from os import path
import json
_CURRENT_DIRECTORY = syspath[0]
try:
import util
# if you have problems visit:
# https://gist.github.com/pinxau1000/8817d4ef0ed766c78bac8e6feafc8b47
# https://github.com/pinxau1000/
except ModuleNotFoundError:
from urllib import request
print("'util.py' not found on the same folder as this script!")
_url_utilpy = "https://gist.githubusercontent.com/pinxau1000/8817d4ef0ed766c78bac8e6feafc8b47/raw/util.py"
print("Downloading util.py from:\n" + _url_utilpy)
# https://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
request.urlretrieve(_url_utilpy, "util.py")
print("Downloading finished!")
import util
try:
import cv2 as cv
except ModuleNotFoundError:
util.install("opencv-python")
import cv2 as cv
try:
from matplotlib import pyplot as plt
except ModuleNotFoundError:
util.install("matplotlib")
from matplotlib import pyplot as plt
try:
import numpy as np
except ModuleNotFoundError:
util.install("numpy>=1.19,<1.19.4")
import numpy as np
try:
from packaging import version
except ModuleNotFoundError:
util.install("packaging")
from packaging import version
try:
import click
except ModuleNotFoundError:
util.install("click")
import click
# ------------------------------------------------------------------------------
# REQUIREMENTS CHECK
# ------------------------------------------------------------------------------
assert version_info.major >= 3 and \
version_info.minor >= 5, \
"This script requires Python 3.5.0 or above!"
assert version.parse(cv.__version__).major >= 4 and \
version.parse(cv.__version__).minor >= 4, \
"This script requires OpenCV 4.4.0 or above!"
assert version.parse(plt.matplotlib.__version__).major >= 3 and \
version.parse(plt.matplotlib.__version__).minor >= 3, \
"This script requires MatPlotLib 3.3.0 or above!"
assert version.parse(np.__version__).major >= 1 and \
version.parse(np.__version__).minor >= 19 and \
version.parse(np.__version__).micro < 4, \
"This script requires Numpy version >= 1.19.0 and < 1.19.4 !"
assert version.parse(click.__version__).major >= 7 and \
version.parse(click.__version__).minor >= 1, \
"This script requires Click 7.1.0 or above!"
# ------------------------------------------------------------------------------
# Load Default Pictures
# ------------------------------------------------------------------------------
_PATH_2_DATA = path.join(_CURRENT_DIRECTORY, "../../data/")
_IMG_ORIG_NAME = "img05.jpg"
_IMG_NOISE_NAME = "img05_noise.jpg"
_IMG_HARRIS_NAME = "Harris.jpg"
_FULL_PATH_ORIG = path.join(_PATH_2_DATA, _IMG_ORIG_NAME)
_FULL_PATH_NOISE = path.join(_PATH_2_DATA, _IMG_NOISE_NAME)
_FULL_PATH_HARRIS = path.join(_PATH_2_DATA, _IMG_HARRIS_NAME)
# ------------------------------------------------------------------------------
# Functions
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Harris Corner Detector
# ------------------------------------------------------------------------------
# PASSED
@click.command()
@click.option("--image", prompt="Path",
default=_FULL_PATH_ORIG,
type=str,
help="The path to the image")
@click.option("--bsize",
default=3,
type=int,
help="Neighborhood size")
@click.option("--ksize",
default=3,
type=int,
help="Aperture parameter for the Sobel operator")
@click.option("--k",
default=0.06,
type=float,
help="Harris detector free parameter")
@click.option("--threshold",
default=0.02,
type=float,
help="values above threshold*max(R) are considered corners.")
@click.option("--filter_params",
default=str([0]),
help="Tuple with initial filtering parameters. If None no "
"filter will be applied. If a 2 element tuple the a "
"Gaussian Blur will be applied with ksize=(filter_params["
"0], filter_params[1]).")
@click.option("--save",
default="output_HarrisCornerDetector",
type=str,
help="The save name(s) of the output figure(s)")
@click.option("--dpi",
default=None,
type=int,
help="Quality of the figure window generated. If None its the "
"default 100 dpi.")
@click.option("--num",
default=None,
type=int,
help="Number of the figure window generated. If None its "
"cumulative.")
def harris_detector(image, bsize, ksize, k, threshold, filter_params, save,
dpi, num):
image = util.load_image_RGB(image)
filter_params = json.loads(filter_params)
if len(filter_params) == 2:
print("Applying Gaussian Filter")
image = cv.GaussianBlur(src=image,
ksize=(filter_params[0], filter_params[1]),
sigmaX=0,
sigmaY=0)
gray_image = cv.cvtColor(src=image, code=cv.COLOR_RGB2GRAY)
harris_image = np.copy(image)
mask = cv.cornerHarris(src=gray_image,
blockSize=bsize,
ksize=ksize,
k=k)
harris_image[mask > threshold * np.max(mask)] = [255, 0, 0]
# Convert from Float 64 to Unsigned Int 8
# Also needs to be converted from np.array to list
harris_image = list(np.uint8(np.abs(harris_image)))
# Plots the images.
fig = util.plotImages([image, harris_image],
["Orig Image", "Harris Output"],
show=True,
main_title="Harris Corner Detector - cv.cornerHarris"
f"\nblock size = {bsize}"
f"\nsobel aperture = {ksize}"
f"\nharris param = {k}",
cols=2,
num=num,
dpi=dpi)
# Saves the figure.
if save != "None":
fig.savefig(save)
# Wait for a key press to close figures
input("Press Enter to continue...")
# ------------------------------------------------------------------------------
# Harris Corner Detector Animation
# ------------------------------------------------------------------------------
# PASSED
@click.command()
@click.option("--image", prompt="Path",
default=_FULL_PATH_ORIG,
type=str,
help="The path to the image")
@click.option("--bsizes",
default=str(list(range(1, 16, 2))),
help="Neighborhood size")
@click.option("--ksizes",
default=str(list(range(3, 16, 2))),
help="Aperture parameter for the Sobel operator")
@click.option("--ks",
default=str(list(np.array([-0.08, -0.04, -0.02, 0,
0.02, 0.04, 0.08]))),
help="Harris detector free parameter")
@click.option("--threshold",
default=0.01,
type=float,
help="Values below threshold*max(SobelMagnitude) are set to 0.")
@click.option("--save",
default="output_HarrisCornerDetector_Animation",
type=str,
help="The save name(s) of the output figure(s)")
def harris_detector_animate(image, bsizes, ksizes, ks, threshold, save):
image = util.load_image_RGB(image)
gray_image = cv.cvtColor(src=image, code=cv.COLOR_RGB2GRAY)
bsizes = json.loads(bsizes)
ksizes = json.loads(ksizes)
ks = json.loads(ks)
bsizes = np.rint(bsizes).astype(int)
ksizes = np.rint(ksizes).astype(int)
ks = np.round(ks, 2)
harris_images = []
harris_titles = []
for bsize in bsizes:
for ksize in ksizes:
for k in ks:
# When the number its even add one to ensure ksize is odd
ksize = ksize + 1 if ksize % 2 == 0 else ksize
harris_image = np.copy(image)
mask = cv.cornerHarris(src=gray_image,
blockSize=bsize,
ksize=ksize,
k=k)
harris_image[mask > threshold * mask.max()] = [255, 0, 0]
harris_images.append(harris_image)
harris_titles.append(f"bsize={bsize}, ksize={ksize}, k={k}")
# Convert from Float 64 to Unsigned Int 8
# Also needs to be converted from np.array to list
harris_images = list(np.uint8(np.abs(harris_images)))
util.animateImages(images=harris_images,
titles=harris_titles,
save_name=save,
cmap="gray",
frame_interval=120,
verbose=True)
# ------------------------------------------------------------------------------
# Harris Corner Detector Block Size
# ------------------------------------------------------------------------------
# PASSED
@click.command()
@click.option("--image", prompt="Path",
default=_FULL_PATH_ORIG,
type=str,
help="The path to the image")
@click.option("--bsizes",
default=str(list(range(3, 16, 6))),
help="Neighborhood size")
@click.option("--ksize",
default=5,
type=int,
help="Aperture parameter for the Sobel operator")
@click.option("--k",
default=0.04,
type=float,
help="Harris detector free parameter")
@click.option("--threshold",
default=0.01,
type=float,
help="values above threshold*max(R) are considered corners.")
@click.option("--filter_params",
default=str([0]),
help="Tuple with initial filtering parameters. If None no "
"filter will be applied. If a 2 element tuple the a "
"Gaussian Blur will be applied with ksize=(filter_params["
"0], filter_params[1]).")
@click.option("--save",
default="output_HarrisCornerDetector_BlockSize",
type=str,
help="The save name(s) of the output figure(s)")
@click.option("--dpi",
default=None,
type=int,
help="Quality of the figure window generated. If None its the "
"default 100 dpi.")
@click.option("--num",
default=None,
type=int,
help="Number of the figure window generated. If None its "
"cumulative.")
def harris_detector_bsize(image, bsizes, ksize, k, threshold, filter_params,
save, dpi, num):
image = util.load_image_RGB(image)
bsizes = json.loads(bsizes)
bsizes = np.rint(bsizes).astype(int)
filter_params = json.loads(filter_params)
if len(filter_params) == 2:
print("Applying Gaussian Filter")
image = cv.GaussianBlur(src=image,
ksize=(filter_params[0], filter_params[1]),
sigmaX=0,
sigmaY=0)
gray_image = cv.cvtColor(src=image, code=cv.COLOR_RGB2GRAY)
harris_images_bsize = []
harris_images_titles = []
for bsize in bsizes:
harris_image = np.copy(image)
mask = cv.cornerHarris(src=gray_image,
blockSize=bsize,
ksize=ksize,
k=k)
harris_image[mask > threshold * mask.max()] = [255, 0, 0]
harris_images_bsize.append(harris_image)
harris_images_titles.append(f"block size = {bsize}")
harris_images = harris_images_bsize
harris_images.insert(0, image)
titles_images = harris_images_titles
titles_images.insert(0, "Orig Image")
# Convert from Float 64 to Unsigned Int 8
# Also needs to be converted from np.array to list
harris_images = list(np.uint8(np.abs(harris_images)))
# Plots the images.
fig = util.plotImages(harris_images,
titles_images,
show=True,
main_title="Harris Corner Detectorn - cv.cornerHarris"
f"\nsobel aperture = {ksize}"
f"\nharris parameter = {k}",
cmap="gray",
cols=2,
num=num,
dpi=dpi)
# Saves the figure.
if save != "None":
fig.savefig(save)
# Wait for a key press to close figures
input("Press Enter to continue...")
# ------------------------------------------------------------------------------
# Harris Corner Detector Sobel Aperture
# ------------------------------------------------------------------------------
# PASSED
@click.command()
@click.option("--image", prompt="Path",
default=_FULL_PATH_ORIG,
type=str,
help="The path to the image")
@click.option("--bsize",
default=3,
type=int,
help="Neighborhood size")
@click.option("--ksizes",
default=str(list(range(3, 16, 6))),
help="Aperture parameter for the Sobel operator")
@click.option("--k",
default=0.04,
type=float,
help="Harris detector free parameter")
@click.option("--threshold",
default=0.01,
type=float,
help="values above threshold*max(R) are considered corners.")
@click.option("--filter_params",
default=str([0]),
help="Tuple with initial filtering parameters. If None no "
"filter will be applied. If a 2 element tuple the a "
"Gaussian Blur will be applied with ksize=(filter_params["
"0], filter_params[1]).")
@click.option("--save",
default="output_HarrisCornerDetector_SobelAperture",
type=str,
help="The save name(s) of the output figure(s)")
@click.option("--dpi",
default=None,
type=int,
help="Quality of the figure window generated. If None its the "
"default 100 dpi.")
@click.option("--num",
default=None,
type=int,
help="Number of the figure window generated. If None its "
"cumulative.")
def harris_detector_ksize(image, bsize, ksizes, k, threshold, filter_params,
save, dpi, num):
image = util.load_image_RGB(image)
ksizes = json.loads(ksizes)
ksizes = np.rint(ksizes).astype(int)
filter_params = json.loads(filter_params)
if len(filter_params) == 2:
print("Applying Gaussian Filter")
image = cv.GaussianBlur(src=image,
ksize=(filter_params[0], filter_params[1]),
sigmaX=0,
sigmaY=0)
gray_image = cv.cvtColor(src=image, code=cv.COLOR_RGB2GRAY)
harris_images_ksize = []
harris_images_titles = []
for ksize in ksizes:
harris_image = np.copy(image)
mask = cv.cornerHarris(src=gray_image,
blockSize=bsize,
ksize=ksize,
k=k)
harris_image[mask > threshold * mask.max()] = [255, 0, 0]
harris_images_ksize.append(harris_image)
harris_images_titles.append(f"sobel aperture = {ksize}")
harris_images = harris_images_ksize
harris_images.insert(0, image)
titles_images = harris_images_titles
titles_images.insert(0, "Orig Image")
# Convert from Float 64 to Unsigned Int 8
# Also needs to be converted from np.array to list
harris_images = list(np.uint8(np.abs(harris_images)))
# Plots the images.
fig = util.plotImages(harris_images,
titles_images,
show=True,
main_title="Harris Corner Detector - cv.cornerHarris"
f"\nblock size = {bsize}"
f"\nharris parameter = {k}",
cmap="gray",
cols=2,
num=num,
dpi=dpi)
# Saves the figure.
if save != "None":
fig.savefig(save)
# Wait for a key press to close figures
input("Press Enter to continue...")
# ------------------------------------------------------------------------------
# Harris Corner Detector Harris Parameter
# ------------------------------------------------------------------------------
# PASSED
@click.command()
@click.option("--image", prompt="Path",
default=_FULL_PATH_ORIG,
type=str,
help="The path to the image")
@click.option("--bsize",
default=3,
type=int,
help="Neighborhood size")
@click.option("--ksize",
default=5,
type=int,
help="Aperture parameter for the Sobel operator")
@click.option("--ks",
default=str(list(np.array([-0.08, -0.02, 0, 0.02, 0.08]))),
help="Harris detector free parameter")
@click.option("--threshold",
default=0.01,
type=float,
help="values above threshold*max(R) are considered corners.")
@click.option("--filter_params",
default=str([0]),
help="Tuple with initial filtering parameters. If None no "
"filter will be applied. If a 2 element tuple the a "
"Gaussian Blur will be applied with ksize=(filter_params["
"0], filter_params[1]).")
@click.option("--save",
default="output_HarrisCornerDetector_HarrisParameter",
type=str,
help="The save name(s) of the output figure(s)")
@click.option("--dpi",
default=None,
type=int,
help="Quality of the figure window generated. If None its the "
"default 100 dpi.")
@click.option("--num",
default=None,
type=int,
help="Number of the figure window generated. If None its "
"cumulative.")
def harris_detector_k(image, bsize, ksize, ks, threshold, filter_params,
save, dpi, num):
image = util.load_image_RGB(image)
ks = json.loads(ks)
ks = np.round(ks, 2)
filter_params = json.loads(filter_params)
if len(filter_params) == 2:
print("Applying Gaussian Filter")
image = cv.GaussianBlur(src=image,
ksize=(filter_params[0], filter_params[1]),
sigmaX=0,
sigmaY=0)
gray_image = cv.cvtColor(src=image, code=cv.COLOR_RGB2GRAY)
harris_images_ks = []
harris_images_titles = []
for k in ks:
harris_image = np.copy(image)
mask = cv.cornerHarris(src=gray_image,
blockSize=bsize,
ksize=ksize,
k=k)
harris_image[mask > threshold * np.max(mask)] = [255, 0, 0]
harris_images_ks.append(harris_image)
harris_images_titles.append(f"harris parameter={k}")
harris_images = harris_images_ks
harris_images.insert(0, image)
titles_images = harris_images_titles
titles_images.insert(0, "Orig Image")
# Convert from Float 64 to Unsigned Int 8
# Also needs to be converted from np.array to list
harris_images = list(np.uint8(np.abs(harris_images)))
# Plots the images.
fig = util.plotImages(harris_images,
titles_images,
show=True,
main_title="Harris Corner Detector - cv.cornerHarris"
f"\n block size = {bsize}"
f"\nsobel aperture = {ksize}",
cmap="gray",
cols=3,
num=num,
dpi=dpi)
# Saves the figure.
if save != "None":
fig.savefig(save)
# Wait for a key press to close figures
input("Press Enter to continue...")
# region
@click.group()
def entry_point():
pass
entry_point.add_command(harris_detector)
entry_point.add_command(harris_detector_bsize)
entry_point.add_command(harris_detector_ksize)
entry_point.add_command(harris_detector_k)
entry_point.add_command(harris_detector_animate)
if __name__ == "__main__":
entry_point()
# endregion
| 37.096386
| 110
| 0.512968
|
bdf0286f8a544c7748f52237cb794903ea1de15b
| 714
|
py
|
Python
|
Scripts/python/scripts mundo 1/JOGO CURSO EM VIDEO/fase10.py
|
BrenoNAlmeida/Scripts-Escola
|
20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb
|
[
"Apache-2.0"
] | null | null | null |
Scripts/python/scripts mundo 1/JOGO CURSO EM VIDEO/fase10.py
|
BrenoNAlmeida/Scripts-Escola
|
20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb
|
[
"Apache-2.0"
] | null | null | null |
Scripts/python/scripts mundo 1/JOGO CURSO EM VIDEO/fase10.py
|
BrenoNAlmeida/Scripts-Escola
|
20d886d0401ef7f40a4a46e307eadbf5b1c0a5eb
|
[
"Apache-2.0"
] | null | null | null |
print('''\033[1;34mOS EXERCICIOS DESSA FASE SÃO : !
[28] - jogo do adivinha
[29] - Radar eletronico
[30] - Par ou impar
[31] - custo da viagem
[32] - Ano bissexto
[33] - Maior e menor valor
[34] - Almento de salario
[35] - Analizando triangulo\033[32m''')
opção3 = int(input('QUAL EXERCICIO VOCÊ QUER EXECUTAR ? '))
if opção3 == 28:
import exercicio028
elif opção3 == 29:
import exercicio029
elif opção3 == 30:
import exercicio030
elif opção3 == 31:
import exercicio031
elif opção3 == 32:
import exercicio032
elif opção3 == 33:
import exercicio033
elif opção3 == 34:
import exercicio034
elif opção3 == 35:
import exercicio035
| 26.444444
| 59
| 0.634454
|
182881b8e6afac3ed91530d110eaed27a944a4f5
| 278
|
py
|
Python
|
100doc/005-password_generator.py
|
ralexrivero/python_fundation
|
34a855db7380d3d91db6a8f02d97f287d038ef5f
|
[
"Apache-2.0"
] | 1
|
2021-09-19T04:09:48.000Z
|
2021-09-19T04:09:48.000Z
|
100doc/005-password_generator.py
|
ralexrivero/python_fundation
|
34a855db7380d3d91db6a8f02d97f287d038ef5f
|
[
"Apache-2.0"
] | null | null | null |
100doc/005-password_generator.py
|
ralexrivero/python_fundation
|
34a855db7380d3d91db6a8f02d97f287d038ef5f
|
[
"Apache-2.0"
] | null | null | null |
import string
import random
printable_chars = list(string.printable)
j = 0
valid_chars = []
for i in printable_chars:
if i == " ":
break
valid_chars.append(i)
for i in range(0, 12):
print(valid_chars[random.randint(0, len(valid_chars))], end='')
print()
| 16.352941
| 67
| 0.661871
|
a9e9673fb4dc7e5bb2960d3a4f73b0233b22b9a1
| 12,754
|
py
|
Python
|
payslip/south_migrations/0001_initial.py
|
andrejsab/django-payslip
|
14b7fdc12696bb1894a49467c88c361ac2a12c00
|
[
"MIT"
] | 22
|
2017-01-09T22:27:08.000Z
|
2021-05-07T10:26:00.000Z
|
payslip/south_migrations/0001_initial.py
|
andrejsab/django-payslip
|
14b7fdc12696bb1894a49467c88c361ac2a12c00
|
[
"MIT"
] | 1
|
2018-10-04T15:53:19.000Z
|
2018-11-10T09:06:22.000Z
|
payslip/south_migrations/0001_initial.py
|
andrejsab/django-payslip
|
14b7fdc12696bb1894a49467c88c361ac2a12c00
|
[
"MIT"
] | 21
|
2017-02-15T05:15:31.000Z
|
2021-04-17T09:55:25.000Z
|
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Company'
db.create_table('payslip_company', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('address', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('payslip', ['Company'])
# Adding M2M table for field extra_fields on 'Company'
db.create_table('payslip_company_extra_fields', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('company', models.ForeignKey(orm['payslip.company'], null=False)),
('extrafield', models.ForeignKey(orm['payslip.extrafield'], null=False))
))
db.create_unique('payslip_company_extra_fields', ['company_id', 'extrafield_id'])
# Adding model 'Employee'
db.create_table('payslip_employee', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='employees', to=orm['auth.User'])),
('company', self.gf('django.db.models.fields.related.ForeignKey')(related_name='employees', to=orm['payslip.Company'])),
('hr_number', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('address', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal('payslip', ['Employee'])
# Adding M2M table for field extra_fields on 'Employee'
db.create_table('payslip_employee_extra_fields', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('employee', models.ForeignKey(orm['payslip.employee'], null=False)),
('extrafield', models.ForeignKey(orm['payslip.extrafield'], null=False))
))
db.create_unique('payslip_employee_extra_fields', ['employee_id', 'extrafield_id'])
# Adding model 'ExtraFieldType'
db.create_table('payslip_extrafieldtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('payslip', ['ExtraFieldType'])
# Adding model 'ExtraField'
db.create_table('payslip_extrafield', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('field_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='extra_fields', to=orm['payslip.ExtraFieldType'])),
('value', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('payslip', ['ExtraField'])
# Adding model 'PaymentType'
db.create_table('payslip_paymenttype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('payslip', ['PaymentType'])
# Adding model 'Payment'
db.create_table('payslip_payment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('payment_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='payments', to=orm['payslip.PaymentType'])),
('employee', self.gf('django.db.models.fields.related.ForeignKey')(related_name='payments', to=orm['payslip.Employee'])),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2)),
('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 1, 5, 0, 0))),
('description', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('payslip', ['Payment'])
# Adding M2M table for field extra_fields on 'Payment'
db.create_table('payslip_payment_extra_fields', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('payment', models.ForeignKey(orm['payslip.payment'], null=False)),
('extrafield', models.ForeignKey(orm['payslip.extrafield'], null=False))
))
db.create_unique('payslip_payment_extra_fields', ['payment_id', 'extrafield_id'])
def backwards(self, orm):
# Deleting model 'Company'
db.delete_table('payslip_company')
# Removing M2M table for field extra_fields on 'Company'
db.delete_table('payslip_company_extra_fields')
# Deleting model 'Employee'
db.delete_table('payslip_employee')
# Removing M2M table for field extra_fields on 'Employee'
db.delete_table('payslip_employee_extra_fields')
# Deleting model 'ExtraFieldType'
db.delete_table('payslip_extrafieldtype')
# Deleting model 'ExtraField'
db.delete_table('payslip_extrafield')
# Deleting model 'PaymentType'
db.delete_table('payslip_paymenttype')
# Deleting model 'Payment'
db.delete_table('payslip_payment')
# Removing M2M table for field extra_fields on 'Payment'
db.delete_table('payslip_payment_extra_fields')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'payslip.company': {
'Meta': {'object_name': 'Company'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['payslip.ExtraField']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'payslip.employee': {
'Meta': {'object_name': 'Employee'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'employees'", 'to': "orm['payslip.Company']"}),
'extra_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['payslip.ExtraField']", 'null': 'True', 'blank': 'True'}),
'hr_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'employees'", 'to': "orm['auth.User']"})
},
'payslip.extrafield': {
'Meta': {'object_name': 'ExtraField'},
'field_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extra_fields'", 'to': "orm['payslip.ExtraFieldType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'payslip.extrafieldtype': {
'Meta': {'object_name': 'ExtraFieldType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'payslip.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 5, 0, 0)'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'employee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': "orm['payslip.Employee']"}),
'extra_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['payslip.ExtraField']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': "orm['payslip.PaymentType']"})
},
'payslip.paymenttype': {
'Meta': {'object_name': 'PaymentType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['payslip']
| 62.519608
| 182
| 0.598244
|
392c2ba16a6d8074314aa4267c6f4071047b1366
| 3,233
|
py
|
Python
|
tests/middleware/test_errors.py
|
phillipuniverse/starlette
|
b032e07f6a883c0de2445fd5953a323ec43a94ed
|
[
"BSD-3-Clause"
] | null | null | null |
tests/middleware/test_errors.py
|
phillipuniverse/starlette
|
b032e07f6a883c0de2445fd5953a323ec43a94ed
|
[
"BSD-3-Clause"
] | 17
|
2022-01-01T21:22:33.000Z
|
2022-03-07T10:33:37.000Z
|
tests/middleware/test_errors.py
|
sthagen/encode-starlette
|
ba31df75daac7cc3b5fcfea89939372768a166cb
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T10:05:17.000Z
|
2022-03-25T10:05:17.000Z
|
import pytest
from starlette.applications import Starlette
from starlette.background import BackgroundTask
from starlette.middleware.errors import ServerErrorMiddleware
from starlette.responses import JSONResponse, Response
from starlette.routing import Route
def test_handler(test_client_factory):
async def app(scope, receive, send):
raise RuntimeError("Something went wrong")
def error_500(request, exc):
return JSONResponse({"detail": "Server Error"}, status_code=500)
app = ServerErrorMiddleware(app, handler=error_500)
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/")
assert response.status_code == 500
assert response.json() == {"detail": "Server Error"}
def test_debug_text(test_client_factory):
async def app(scope, receive, send):
raise RuntimeError("Something went wrong")
app = ServerErrorMiddleware(app, debug=True)
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/")
assert response.status_code == 500
assert response.headers["content-type"].startswith("text/plain")
assert "RuntimeError: Something went wrong" in response.text
def test_debug_html(test_client_factory):
async def app(scope, receive, send):
raise RuntimeError("Something went wrong")
app = ServerErrorMiddleware(app, debug=True)
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/", headers={"Accept": "text/html, */*"})
assert response.status_code == 500
assert response.headers["content-type"].startswith("text/html")
assert "RuntimeError" in response.text
def test_debug_after_response_sent(test_client_factory):
async def app(scope, receive, send):
response = Response(b"", status_code=204)
await response(scope, receive, send)
raise RuntimeError("Something went wrong")
app = ServerErrorMiddleware(app, debug=True)
client = test_client_factory(app)
with pytest.raises(RuntimeError):
client.get("/")
def test_debug_not_http(test_client_factory):
"""
DebugMiddleware should just pass through any non-http messages as-is.
"""
async def app(scope, receive, send):
raise RuntimeError("Something went wrong")
app = ServerErrorMiddleware(app)
with pytest.raises(RuntimeError):
client = test_client_factory(app)
with client.websocket_connect("/"):
pass # pragma: nocover
def test_background_task(test_client_factory):
accessed_error_handler = False
def error_handler(request, exc):
nonlocal accessed_error_handler
accessed_error_handler = True
def raise_exception():
raise Exception("Something went wrong")
async def endpoint(request):
task = BackgroundTask(raise_exception)
return Response(status_code=204, background=task)
app = Starlette(
routes=[Route("/", endpoint=endpoint)],
exception_handlers={Exception: error_handler},
)
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/")
assert response.status_code == 204
assert accessed_error_handler
| 32.656566
| 73
| 0.718218
|
f13670e398952759140b671c0cf7e3ac1c81866d
| 4,577
|
py
|
Python
|
python/tvm/contrib/tf_op/module.py
|
heweiwill/incubator-tvm
|
5317afb79616454a27dedd4cf3b28dd0986aacdb
|
[
"Apache-2.0"
] | 5
|
2020-06-19T03:22:24.000Z
|
2021-03-17T22:16:48.000Z
|
python/tvm/contrib/tf_op/module.py
|
heweiwill/incubator-tvm
|
5317afb79616454a27dedd4cf3b28dd0986aacdb
|
[
"Apache-2.0"
] | 2
|
2020-07-08T12:34:59.000Z
|
2020-07-11T15:54:47.000Z
|
python/tvm/contrib/tf_op/module.py
|
heweiwill/incubator-tvm
|
5317afb79616454a27dedd4cf3b28dd0986aacdb
|
[
"Apache-2.0"
] | 2
|
2019-08-24T00:06:36.000Z
|
2022-03-03T02:07:27.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module container of TensorFlow TVMDSO op"""
import tensorflow as tf
from tensorflow.python.framework import load_library
class OpModule:
"""Module container of TensorFlow TVMDSO op which wraps exported
TVM op implementation library to be called on TensorFlow side"""
def __init__(self, lib_path):
self.lib_path = lib_path
def func(self, name, output_dtype=None, output_shape=None):
"""Get tvm op function wrapped as TensorFlow tensor to tensor function
Parameters
----------
name: str
function name
output_dtype: str or TensorFlow datatype
Output datatype, default is float32
output_shape: List of integer/tf scalar tensor or tf shape tensor
Output shape, default the same with first input's shape
Returns
----------
Func object that acts as TensorFlow tensor to tensor function.
"""
return TensorFunc(self.lib_path, name, output_dtype, output_shape)
def __getitem__(self, func_name):
return self.func(func_name)
class TensorFunc:
"""Function object that acts as TensorFlow tensor to tensor function."""
def __init__(self, lib_path, func_name, output_dtype, output_shape):
self.lib_path = lib_path
self.func_name = func_name
self.output_dtype = output_dtype
# const(0) indicate invalid dynamic shape
self.dynamic_output_shape = tf.constant(0, tf.int64)
self.static_output_shape = None
self.has_static_output_shape = False # extra flag is required
if self._is_static_shape(output_shape):
self.static_output_shape = output_shape
self.has_static_output_shape = True
elif output_shape is not None:
self.dynamic_output_shape = self._pack_shape_tensor(output_shape)
self.module = load_library.load_op_library('tvm_dso_op.so')
self.tvm_dso_op = self.module.tvm_dso_op
def apply(self, *params):
return self.tvm_dso_op(params,
dynamic_output_shape=self.dynamic_output_shape,
static_output_shape=self.static_output_shape,
has_static_output_shape=self.has_static_output_shape,
lib_path=self.lib_path,
func_name=self.func_name,
output_dtype=self.output_dtype)
def __call__(self, *params):
return self.apply(*params)
def _is_static_shape(self, shape):
if shape is None or not isinstance(shape, list):
return False
for dim_value in shape:
if not isinstance(dim_value, int):
return False
if dim_value < 0:
raise Exception("Negative dimension is illegal: %d" % dim_value)
return True
def _pack_shape_tensor(self, shape):
if isinstance(shape, tf.Tensor):
if shape.dtype == tf.int32:
shape = tf.cast(shape, tf.int64)
elif isinstance(shape, list):
shape_dims = []
for dim_value in shape:
if isinstance(dim_value, int):
shape_dims.append(tf.constant(dim_value, tf.int64))
elif isinstance(dim_value, tf.Tensor) and dim_value.shape.rank == 0:
if dim_value.dtype == tf.int32:
dim_value = tf.cast(dim_value, tf.int64)
shape_dims.append(dim_value)
else:
raise TypeError("Input shape dimension is neither scalar tensor nor int")
shape = tf.stack(shape_dims)
else:
raise TypeError("Input shape is neither tensor nor list")
return shape
| 40.149123
| 93
| 0.642124
|
1bdcf99bc7867589005ac772457741b6ae9eb59a
| 937
|
py
|
Python
|
src/tests/interactive/cleanup.py
|
Trundle/js_of_fstar
|
1c8a2a1b951cc119dd49d3f5d9817571e17aab51
|
[
"Apache-2.0"
] | 1
|
2017-05-14T21:45:17.000Z
|
2017-05-14T21:45:17.000Z
|
src/tests/interactive/cleanup.py
|
Trundle/js_of_fstar
|
1c8a2a1b951cc119dd49d3f5d9817571e17aab51
|
[
"Apache-2.0"
] | null | null | null |
src/tests/interactive/cleanup.py
|
Trundle/js_of_fstar
|
1c8a2a1b951cc119dd49d3f5d9817571e17aab51
|
[
"Apache-2.0"
] | null | null | null |
"""Cleanup interactive transcript received on standard input.
This mostly consists in pretty-pretting JSON messages and sorting their
fields, to permit text-based comparisons against reference transcripts.
Usage: python2 cleanup.py [fname.clean] < [fname.dirty]
"""
import io
import json
import sys
def cleanup_one(line):
try:
return json.dumps(json.loads(line), ensure_ascii=False, sort_keys=True) + "\n"
except:
return line
def main():
# Writing to stdout converts newlines, which confuses diff on Windows, so
# write to a file instead. There's no reasonable way to do this in a Python
# 2/3 compatible way, so the following is Python-2 only.
lines = [line.decode("utf-8") for line in sys.stdin]
with open(sys.argv[1], mode="wb") as out:
for line in lines:
out.write(cleanup_one(line).encode("utf-8"))
out.flush()
if __name__ == '__main__':
main()
| 30.225806
| 86
| 0.685165
|
c221e25030cd0c8a2b7898da8b8d18a047e80730
| 64,146
|
py
|
Python
|
Tests/test_strformat.py
|
aisk/ironpython3
|
d492fd811a0cee4d0a07cd46f02a29a3c90d964b
|
[
"Apache-2.0"
] | 1,872
|
2015-01-02T18:56:47.000Z
|
2022-03-31T07:34:39.000Z
|
Tests/test_strformat.py
|
aisk/ironpython3
|
d492fd811a0cee4d0a07cd46f02a29a3c90d964b
|
[
"Apache-2.0"
] | 675
|
2015-02-27T09:01:01.000Z
|
2022-03-31T14:03:25.000Z
|
Tests/test_strformat.py
|
aisk/ironpython3
|
d492fd811a0cee4d0a07cd46f02a29a3c90d964b
|
[
"Apache-2.0"
] | 278
|
2015-01-02T03:48:20.000Z
|
2022-03-29T20:40:44.000Z
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import _string
import sys
import unittest
from iptest import IronPythonTestCase, is_cli, is_cpython, is_netcoreapp21, long, run_test, skipUnlessIronPython
allChars = ''
for y in [chr(x) for x in range(256) if chr(x) != '[' and chr(x) != '.']:
allChars += y
class TestException(Exception): pass
class bad_str(object):
def __str__(self):
raise TestException('booh')
class StrFormatTest(IronPythonTestCase):
def test_formatter_parser_errors(self):
errors = [ ("{0!", "unmatched '{' in format spec" if is_cli else "end of string while looking for conversion specifier"),
("}a{", "Single '}' encountered in format string"),
("{0:0.1a", "unmatched '{' in format spec"),
("{0:{}", "unmatched '{' in format spec"),
("{0:aa{ab}", "unmatched '{' in format spec"),
("{0!}", "end of format while looking for conversion specifier" if is_cli else "unmatched '{' in format spec"),
("{0!", "unmatched '{' in format spec" if is_cli else "end of string while looking for conversion specifier"),
("{0!aa}", "expected ':' after format specifier" if is_cli else "expected ':' after conversion specifier"),
("{0.{!:{.}.}}", "expected ':' after format specifier" if is_cli else "unexpected '{' in field name"),
("{", "Single '{' encountered in format string"),
]
if is_cli: # https://github.com/IronLanguages/ironpython3/issues/867
errors += [
("{0!}}", "end of format while looking for conversion specifier"),
]
else:
_string.formatter_parser("{0!}}")
for format, errorMsg in errors:
self.assertRaisesMessage(ValueError, errorMsg, list, _string.formatter_parser(format))
def test_formatter_parser(self):
tests = [ ('{0.}', [('', '0.', '', None)]),
('{0:.{abc}}', [('', '0', '.{abc}', None)]),
('{0[]}', [('', '0[]', '', None)]),
('{0!!}', [('', '0', '', '!')]),
('{0:::!!::!::}', [('', '0', '::!!::!::', None)]),
('{0.]}', [('', '0.]', '', None)]),
('{0..}', [('', '0..', '', None)]),
('{{', [('{', None, None, None)]),
('}}', [('}', None, None, None)]),
('{{}}', [('{', None, None, None), ('}', None, None, None)]),
]
for format, expected in tests:
self.assertEqual(list(_string.formatter_parser(format)), expected)
tests = [
('{0.{:{.}.}}', [('', '0.{', '{.}.}', None)]),
('{0.{:.}.}', [('', '0.{', '.}.', None)]),
('{0.{!.:{.}.}}', [('', '0.{', '{.}.}', '.')]),
('{0.{!!:{.}.}}', [('', '0.{', '{.}.}', '!')]),
('{0[}', [('', '0[', '', None)]),
('{0.[}', [('', '0.[', '', None)]),
]
for format, expected in tests:
if is_cli: # https://github.com/IronLanguages/ironpython3/issues/867
self.assertEqual(list(_string.formatter_parser(format)), expected)
else:
with self.assertRaises(ValueError):
list(_string.formatter_parser(format))
def test_format_field_name_split_errors(self):
if is_cpython: #http://ironpython.codeplex.com/workitem/28224
temp = _string.formatter_field_name_split('') #Just ensure it doesn't throw
else:
self.assertRaisesMessage(ValueError, "empty field name", _string.formatter_field_name_split, '')
self.assertRaisesMessage(ValueError, "empty field name", _string.formatter_field_name_split, '[')
self.assertRaisesMessage(ValueError, "empty field name", _string.formatter_field_name_split, '.')
self.assertRaisesMessage(ValueError, "empty field name", _string.formatter_field_name_split, '[.abc')
self.assertRaisesMessage(ValueError, "empty field name", _string.formatter_field_name_split, '.abc')
errors = [ ("0[", "Missing ']' in format string"),
("abc.", "Empty attribute in format string"),
("abc[]", "Empty attribute in format string"),
]
for format, errorMsg in errors:
self.assertRaisesMessage(ValueError, errorMsg, list, _string.formatter_field_name_split(format)[1])
def test_format_field_name_split(self):
tests = [ ('0', [long(0), []]),
('abc.foo', ['abc', [(True, 'foo')]]),
('abc[2]', ['abc', [(False, long(2))]]),
('1[2]', [long(1), [(False, long(2))]]),
('1.abc', [long(1), [(True, 'abc')]]),
('abc 2.abc', ['abc 2', [(True, 'abc')]]),
('abc!2.abc', ['abc!2', [(True, 'abc')]]),
('].abc', [']', [(True, 'abc')]]),
("abc[[]", ['abc', [(False, '[')]] ),
("abc[[[]", ['abc', [(False, '[[')]] ),
]
if not is_cpython: #http://ironpython.codeplex.com/workitem/28331
tests.append(("abc[2]#x", ['abc', [(False, long(2))]] ))
tests.append([allChars, [allChars, []]])
tests.append([allChars + '.foo', [allChars, [(True, 'foo')]]])
tests.append([allChars + '[2]', [allChars, [(False, long(2))]]])
for format, expected in tests:
res = list(_string.formatter_field_name_split(format))
res[1] = list(res[1])
self.assertEqual(res, expected)
def test_format_arg_errors(self):
self.assertRaises(IndexError, '{0}'.format)
self.assertRaisesMessage(ValueError, "Empty attribute in format string", '{0.}'.format, 42)
self.assertRaisesMessage(ValueError, "Empty attribute in format string", '{0[]}'.format, 42)
self.assertRaisesMessage(ValueError, "Missing ']' in format string" if is_cli else "expected '}' before end of string", '{0[}'.format, 42)
self.assertRaises(IndexError if is_cli else ValueError, '{0[}'.format)
@skipUnlessIronPython()
def test_format_cli_interop(self):
# test classes implementing IFormattable where we pass
# the format spec through
import System
dt = System.DateTime(2008, 10, 26)
class x(object):
abc = dt
self.assertEqual(format(dt, 'MM-dd'), '10-26')
self.assertEqual('{0:MM-dd}'.format(dt), '10-26')
self.assertEqual('{abc:MM-dd}'.format(abc=dt), '10-26')
self.assertEqual('{0.abc:MM-dd}'.format(x), '10-26')
# test accessing a .NET attribute
self.assertEqual('{0.Year}'.format(dt), '2008')
# indexing into .NET dictionaries
strDict = System.Collections.Generic.Dictionary[str, object]()
strDict['abc'] = dt
self.assertEqual('{0[abc]:MM-dd}'.format(strDict), '10-26')
intDict = System.Collections.Generic.Dictionary[int, object]()
intDict[42] = dt
self.assertEqual('{0[42]:MM-dd}'.format(intDict), '10-26')
objDict = System.Collections.Generic.Dictionary[object, object]()
objDict[42], objDict['42'] = 'abc', 'def'
self.assertEqual('{0[42]}'.format(objDict), 'abc')
# import clr doesn't flow through
self.assertRaises(AttributeError, '{0.MaxValue}'.format, int)
def test_format_object_access(self):
class y(object):
bar = 23
class x(object):
def __getitem__(self, index):
return type(index).__name__ + ' ' + str(index)
abc = 42
baz = y
def __str__(self): return 'foo'
def __repr__(self): return 'bar'
self.assertEqual('{0.abc}'.format(x), '42')
self.assertEqual('{0.abc}xyz'.format(x), '42xyz')
self.assertEqual('{0[42]}'.format(x()), 'int 42')
self.assertEqual('{0[abc]}'.format(x()), 'str abc')
self.assertEqual('{0.baz.bar}'.format(x()), '23')
self.assertEqual('{0[abc]!r}'.format(x()), "'str abc'")
self.assertEqual('{0[abc]!s}'.format(x()), 'str abc')
self.assertEqual('{0!r}'.format(x()), 'bar')
self.assertEqual('{0!s}'.format(x()), 'foo')
self.assertEqual('{abc!s}'.format(abc = x()), 'foo')
self.assertEqual('{abc!r}'.format(abc = x()), 'bar')
def test_format(self):
class x(object):
def __format__(self, formatSpec):
return formatSpec
# computed format specs
self.assertEqual('{0:{1}}'.format(x(), 'abc'), 'abc')
self.assertRaisesMessage(ValueError, "Max string recursion exceeded", '{0:{1:{2}}}'.format, x(), x(), 'abc')
# built-in format method
self.assertEqual(format(x()), '')
self.assertEqual(format(x(), 'abc'), 'abc')
class x:
def __format__(self, *args):
return 'abc'
self.assertEqual(format(x(), ''), 'abc')
self.assertEqual('{0}'.format(x()), 'abc')
self.assertTrue('__format__' in type(x()).__dict__)
def test_format_errors(self):
class bad(object):
def __format__(self, *args):
return None
class bad2(object):
def __format__(self, *args):
return 42
self.assertRaisesMessage(TypeError, "bad.__format__ must return string, not NoneType" if is_cli else "__format__ method did not return string", '{0}'.format, bad())
self.assertRaisesMessage(TypeError, "bad2.__format__ must return string, not int" if is_cli else "__format__ method did not return string", '{0}'.format, bad2())
self.assertRaisesMessage(ValueError, "Unknown conversion specifier x", '{0!x}'.format, 'abc')
self.assertRaisesMessage(TypeError, "bad.__format__ must return string, not NoneType" if is_cli else "__format__ method did not return string", format, bad())
self.assertRaisesMessage(TypeError, "bad2.__format__ must return string, not int" if is_cli else "__format__ method did not return string", format, bad2())
def test_object__format__(self):
self.assertEqual(object.__format__("aaa", ""), "aaa")
with self.assertRaises(TypeError):
object.__format__("aaa", "6") # TypeError: non-empty format string passed to object.__format__
def test_str___format__(self):
x = "abc"
tests = [ ('', 'abc'),
('6', 'abc '),
('6s', 'abc '),
('<6', 'abc '),
('>6', ' abc'),
('^6', ' abc '),
('x^6', 'xabcxx'),
('x<6', 'abcxxx'),
('<<6', 'abc<<<'),
('x>6', 'xxxabc'),
('}^6', '}abc}}'),
('\0<6', 'abc\0\0\0'),
('.0', ''),
('.1', 'a'),
('5.2', 'ab '),
('5.2s', 'ab '),
]
for spec, result in tests:
self.assertEqual(str.__format__(x, spec), result)
def test_str___format___errors(self):
errors = [ ("+", "Sign not allowed in string format specifier"),
("=+", "Sign not allowed in string format specifier"),
('=10', "'=' alignment not allowed in string format specifier"),
("10r", "Unknown format code 'r' for object of type 'str'"),
("=+r", "Unknown format code 'r' for object of type 'str'"),
(".", "Format specifier missing precision"),
(".a", "Format specifier missing precision"),
]
# ensure only the s format type is recognized
for char in allChars:
if char != 's' and (char < '0' or char > '9'):
x = ord(char)
if char==',':
errors.append(('10' + char, "Cannot specify ',' with 's'."))
elif 0x20 < x < 0x80:
errors.append(('10' + char, "Unknown format code '%s' for object of type 'str'" % char))
else:
errors.append(('10' + char, "Unknown format code '\\x%x' for object of type 'str'" % x))
for errorFmt, errorMsg in errors:
self.assertRaisesMessage(ValueError, errorMsg, str().__format__, errorFmt)
if is_cli or sys.version_info >= (3, 5):
self.assertRaisesMessage(TypeError, 'unsupported format string passed to bad_str.__format__', bad_str().__format__, '+')
else:
# __str__ is called before processing spec
self.assertRaisesMessage(TestException, 'booh', bad_str().__format__, '+')
self.assertRaisesMessage(TestException, 'booh', bad_str().__format__, '=10')
self.assertRaisesMessage(TestException, 'booh', bad_str().__format__, '.')
def test_float___format__(self):
tests = []
if is_cpython: #In large part due to http://ironpython.codeplex.com/workitem/28206
tests+= [ (2.0, '6.1', ' 2e+00'),
(2.5, '6.1', ' 2e+00'),
(2.25, '6.1', ' 2e+00'),
(2.25, '6.2', ' 2.2'),
(23.0, '.1', '2e+01'),
(23.0, '.2', '2.3e+01'),
(230.5, '.3', '2.3e+02'),
(11230.54, '.5', '1.1231e+04'),
(111230.54, '.1', '1e+05'),
(100000.0, '.5', '1e+05'),
(230.5, '.3g', '230'),
(230.5, '.3n', '230'),
(0.0, '1.1', '0e+00'),
(0.0, '1.0', '0e+00'),
(1.0, '.0', '1e+00'),
(1.1, '.0', '1e+00'),
(1.1, '.1', '1e+00'),
(10.0, '.1', '1e+01'),
(10.0, '.0', '1e+01'),
(100000000000.0, '', '100000000000.0'),
(1000000000.12, '1.10', '1e+09'),
(1000000000.12, '1.3', '1e+09'),
(999999999999.9, '1.0', '1e+12'),
(999999999999.9, '1.2', '1e+12'),
(999999999999.0, '', '999999999999.0'),
(-999999999999.0, '', '-999999999999.0'),
(10e667, '+', '+inf'),
(-10e667, '+', '-inf'),
(10e667/10e667, '+', '+nan'),
(10e667, '-', 'inf'),
(-10e667, '-', '-inf'),
(10e667/10e667, '-', 'nan'),
(10e667, ' ', ' inf'),
(-10e667, ' ', '-inf'),
(10e667/10e667, ' ', ' nan'),
]
else:
tests+= [ (2.0, '6.1', ' 2.0'),
(2.5, '6.1', ' 2.0'),
(2.25, '6.1', ' 2.0'),
(2.25, '6.2', ' 2.2'),
(23.0, '.1', '2.0e+01'),
(23.0, '.2', '23.0'),
(230.5, '.3', '230.0'),
(11230.54, '.5', '11231.0'),
(111230.54, '.1', '1.0e+05'),
(100000.0, '.5', '1.0e+05'),
(230.5, '.3g', '230'),
(230.5, '.3n', '230'),
(0.0, '1.1', '0.0'),
(0.0, '1.0', '0.0'),
(1.0, '.0', '1.0'),
(1.1, '.0', '1.0'),
(1.1, '.1', '1.0'),
(10.0, '.1', '1.0e+01'),
(10.0, '.0', '1.0e+01'),
(100000000000.0, '', '100000000000.0'),
(1000000000.12, '1.10', '1000000000.0'),
(1000000000.12, '1.3', '1.0e+09'),
(999999999999.9, '1.0', '1.0e+12'),
(999999999999.9, '1.2', '1.0e+12'),
(999999999999.0, '', '999999999999.0'),
(-999999999999.0, '', '-999999999999.0'),
(10e667, '+', '+inf'),
(-10e667, '+', '-inf'),
(10e667/10e667, '+', '+nan'),
(10e667, '-', 'inf'),
(-10e667, '-', '-inf'),
(10e667/10e667, '-', 'nan'),
(10e667, ' ', ' inf'),
(-10e667, ' ', '-inf'),
(10e667/10e667, ' ', ' nan'),
]
tests+= [ (2.0, '', '2.0'),
(2.0, 'g', '2'),
(2.0, 'f', '2.000000'),
(2.5, '', '2.5'),
(2.5, 'g', '2.5'),
(2.0, '+', '+2.0'),
(2.0, '-', '2.0'),
(2.0, ' ', ' 2.0'),
(2.0, '<5', '2.0 '),
(2.0, '>5', ' 2.0'),
(2.0, '=5', ' 2.0'),
(2.0, '^6', ' 2.0 '),
(2.0, '6', ' 2.0'),
(2.0, 'x< 10.10', ' 2.0xxxxxx'),
(2.01, 'x< 10.10', ' 2.01xxxxx'),
(2.0, 'x> 10.10', 'xxxxxx 2.0'),
(2.0, 'x= 10.10', ' xxxxxx2.0'),
(2.0, 'x^ 10.10', 'xxx 2.0xxx'),
(2.0, 'x^ 9.10', 'xx 2.0xxx'),
(2.0, '\0^ 9.10', '\0\0 2.0\0\0\0'),
(2.23, '6.2', ' 2.2'),
(2.25, '6.3', ' 2.25'),
(2.123456789, '2.10', '2.123456789'),
(230.0, '.2', '2.3e+02'),
(230.1, '.2', '2.3e+02'),
(230.5, '.4', '230.5'),
(230.54, '.4', '230.5'),
(230.54, '.5', '230.54'),
(1230.54, '.5', '1230.5'),
(111230.54, '.5', '1.1123e+05'),
(111230.54, '.4', '1.112e+05'),
(111230.54, '.3', '1.11e+05'),
(111230.54, '.2', '1.1e+05'),
(23.0, 'e', '2.300000e+01'),
(23.0, '.6e', '2.300000e+01'),
(23.0, '.0e', '2e+01'),
(23.0, '.1e', '2.3e+01'),
(23.0, '.2e', '2.30e+01'),
(23.0, '.3e', '2.300e+01'),
(23.0, '.4e', '2.3000e+01'),
(230.0, '.2e', '2.30e+02'),
(230.1, '.2e', '2.30e+02'),
(230.5, '.3e', '2.305e+02'),
(230.5, '.4e', '2.3050e+02'),
(230.54, '.4e', '2.3054e+02'),
(230.54, '.5e', '2.30540e+02'),
(1230.54, '.5e', '1.23054e+03'),
(11230.54, '.5e', '1.12305e+04'),
(111230.54, '.5e', '1.11231e+05'),
(111230.54, '.4e', '1.1123e+05'),
(111230.54, '.3e', '1.112e+05'),
(111230.54, '.2e', '1.11e+05'),
(111230.54, '.1e', '1.1e+05'),
(23.0, 'E', '2.300000E+01'),
(23.0, '.6E', '2.300000E+01'),
(23.0, '.0E', '2E+01'),
(23.0, '.1E', '2.3E+01'),
(23.0, '.2E', '2.30E+01'),
(23.0, '.3E', '2.300E+01'),
(23.0, '.4E', '2.3000E+01'),
(230.0, '.2E', '2.30E+02'),
(230.1, '.2E', '2.30E+02'),
(230.5, '.3E', '2.305E+02'),
(230.5, '.4E', '2.3050E+02'),
(230.54, '.4E', '2.3054E+02'),
(230.54, '.5E', '2.30540E+02'),
(1230.54, '.5E', '1.23054E+03'),
(11230.54, '.5E', '1.12305E+04'),
(111230.54, '.5E', '1.11231E+05'),
(111230.54, '.4E', '1.1123E+05'),
(111230.54, '.3E', '1.112E+05'),
(111230.54, '.2E', '1.11E+05'),
(111230.54, '.1E', '1.1E+05'),
(23.0, 'F', '23.000000'),
(23.0, '.6F', '23.000000'),
(23.0, '.0F', '23'),
(23.0, '.1F', '23.0'),
(23.0, '.2F', '23.00'),
(23.0, '.3F', '23.000'),
(23.0, '.4F', '23.0000'),
(230.0, '.2F', '230.00'),
(230.1, '.2F', '230.10'),
(230.5, '.3F', '230.500'),
(230.5, '.4F', '230.5000'),
(230.54, '.4F', '230.5400'),
(230.54, '.5F', '230.54000'),
(1230.54, '.5F', '1230.54000'),
(11230.54, '.5F', '11230.54000'),
(111230.54, '.5F', '111230.54000'),
(111230.54, '.4F', '111230.5400'),
(111230.54, '.3F', '111230.540'),
(111230.54, '.2F', '111230.54'),
(111230.54, '.1F', '111230.5'),
(111230.55, '.1F', '111230.6'),
(-111230.55, '.1F', '-111230.6'),
(111230.55, '.1f', '111230.6'),
(-111230.55, '.1f', '-111230.6'),
(23.0, '%', '2300.000000%'),
(23.0, '.6%', '2300.000000%'),
(23.0, '.0%', '2300%'),
(23.0, '.1%', '2300.0%'),
(23.0, '.2%', '2300.00%'),
(23.0, '.3%', '2300.000%'),
(23.0, '.4%', '2300.0000%'),
(230.0, '.2%', '23000.00%'),
(230.1, '.2%', '23010.00%'),
(230.5, '.3%', '23050.000%'),
(230.5, '.4%', '23050.0000%'),
(230.54, '.4%', '23054.0000%'),
(230.54, '.5%', '23054.00000%'),
(1230.54, '.5%', '123054.00000%'),
(11230.54, '.5%', '1123054.00000%'),
(111230.54, '.5%', '11123054.00000%'),
(111230.54, '.4%', '11123054.0000%'),
(111230.54, '.3%', '11123054.000%'),
(111230.54, '.2%', '11123054.00%'),
(111230.54, '.1%', '11123054.0%'),
(111230.55, '.1%', '11123055.0%'),
(-111230.55, '.1%', '-11123055.0%'),
(23.0, '.1g', '2e+01'),
(23.0, '.0g', '2e+01'),
(230.0, '.1g', '2e+02'),
(23.0, '.2g', '23'),
(23.5, '.2g', '24'),
(23.4, '.2g', '23'),
(23.45, '.2g', '23'),
(230.0, '.2g', '2.3e+02'),
(230.1, '.2g', '2.3e+02'),
(230.5, '.4g', '230.5'),
(230.54, '.4g', '230.5'),
(230.54, '.5g', '230.54'),
(1230.54, '.5g', '1230.5'),
(11230.54, '.5g', '11231'),
(111230.54, '.5g', '1.1123e+05'),
(111230.54, '.4g', '1.112e+05'),
(111230.54, '.3g', '1.11e+05'),
(111230.54, '.2g', '1.1e+05'),
(111230.54, '.1g', '1e+05'),
(23.0, '.1n', '2e+01'),
(23.0, '.2n', '23'),
(230.0, '.2n', '2.3e+02'),
(230.1, '.2n', '2.3e+02'),
(230.5, '.4n', '230.5'),
(230.54, '.4n', '230.5'),
(230.54, '.5n', '230.54'),
(1230.54, '.5n', '1230.5'),
(11230.54, '.5n', '11231'),
(111230.54, '.5n', '1.1123e+05'),
(111230.54, '.4n', '1.112e+05'),
(111230.54, '.3n', '1.11e+05'),
(111230.54, '.2n', '1.1e+05'),
(111230.54, '.1n', '1e+05'),
(11231.54, 'n', '11231.5'),
(111230.54, 'n', '111231'),
(111230.54, 'g', '111231'),
(0.0, '', '0.0'),
(0.0, '1', '0.0'),
(1.1, '.2', '1.1'),
(1000000.0, '', '1000000.0'),
(10000000.0, '', '10000000.0'),
(100000000.0, '', '100000000.0'),
(1000000000.0, '', '1000000000.0'),
(10000000000.0, '', '10000000000.0'),
(1000000000000.0, '', '1000000000000.0'),
(1000000000000.0, 'g', '1e+12'),
(-1000000000000.0, '', '-1000000000000.0'),
(-1000000000000.0, 'g', '-1e+12'),
(-1000000000000.0, 'G', '-1E+12'),
(-1000000000000.0, '.1g', '-1e+12'),
(-1000000000000.0, '.1G', '-1E+12'),
(10e667, '', 'inf'),
(-10e667, '', '-inf'),
(10e667/10e667, '', 'nan'),
]
for value, spec, result in tests:
actual = value.__format__(spec)
self.assertEqual(actual, result, "value:{0}, spec:{1}, (expected) result:{2}, actual:{3}, expr:({0}).__format__('{1}')".format(value, spec, result, actual))
if is_netcoreapp21: return # https://github.com/IronLanguages/ironpython3/issues/751
# check locale specific formatting
import _locale
try:
if is_cli:
_locale.setlocale(_locale.LC_ALL, 'en_US')
else:
_locale.setlocale(_locale.LC_ALL, 'English_United States.1252')
tests = [
(1000.0, 'n', '1,000'),
(1000.12345, 'n', '1,000.12'),
(1000.5, 'n', '1,000.5'),
(100000.0, 'n', '100,000'),
(100000.0, '.5n', '1e+05'),
(100000.5, '.5n', '1e+05'),
(100000.5, '.7n', '100,000.5'),
]
if is_cpython: #http://ironpython.codeplex.com/workitem/28206
tests+= [
(100000.5, 'n', '100,000'),
(100000.5, '.6n', '100,000'),
]
else:
tests+= [
(100000.5, 'n', '100,000'),
(100000.5, '.6n', '100,000'),
]
for value, spec, result in tests:
actual = value.__format__(spec)
self.assertEqual(actual, result, "value:{0}, spec:{1}, (expected) result:{2}, actual:{3}, expr:({0}).__format__('{1}')".format(value, spec, result, actual))
finally:
# and restore it back...
_locale.setlocale(_locale.LC_ALL, 'C')
self.assertEqual(100000.0.__format__('n'), '100000')
def test_float___format___errors(self):
errors = []
okChars = set(['\0', '%', 'E', 'F', 'G', 'e', 'f', 'g', 'n', ','] + [chr(x) for x in range(ord('0'), ord('9') + 1)])
# verify the okChars are actually ok
for char in okChars:
2.0.__format__('10' + char)
for char in allChars:
if char not in okChars:
x = ord(char)
if 0x20 < x < 0x80:
errors.append((2.0, '10' + char, "Unknown format code '%s' for object of type 'float'" % char))
else:
errors.append((2.0, '10' + char, "Unknown format code '\\x%x' for object of type 'float'" % x))
for value, errorFmt, errorMsg in errors:
self.assertRaisesMessage(ValueError, errorMsg, value.__format__, errorFmt)
def test_int___format__(self):
tests = [
(0, '+', '+0'),
(0, ' ', ' 0'),
(0, '-', '0'),
(2, '', '2'),
(2, '+', '+2'),
(2, '-', '2'),
(2, ' ', ' 2'),
(2, '<5', '2 '),
(2, '05', '00002'),
(20000,'+4', '+20000'),
(2, '>5', ' 2'),
(2, '=5', ' 2'),
(2, '^6', ' 2 '),
(2, '6', ' 2'),
(2, 'x< 10', ' 2xxxxxxxx'),
(2, 'x> 10', 'xxxxxxxx 2'),
(2, 'x= 10', ' xxxxxxxx2'),
(2, 'x^ 10', 'xxxx 2xxxx'),
(2, 'x^ 9', 'xxx 2xxxx'),
(2, 'x<+10', '+2xxxxxxxx'),
(2, 'x>+10', 'xxxxxxxx+2'),
(2, 'x=+10', '+xxxxxxxx2'),
(2, 'x^+10', 'xxxx+2xxxx'),
(2, 'x^+9', 'xxx+2xxxx'),
(2, 'x<-10', '2xxxxxxxxx'),
(2, 'x>-10', 'xxxxxxxxx2'),
(2, 'x=-10', 'xxxxxxxxx2'),
(2, 'x^-10', 'xxxx2xxxxx'),
(2, 'x^-9', 'xxxx2xxxx'),
(-2, 'x<-10', '-2xxxxxxxx'),
(-2, 'x>-10', 'xxxxxxxx-2'),
(-2, 'x=-10', '-xxxxxxxx2'),
(-2, 'x^-10', 'xxxx-2xxxx'),
(-2, 'x^-9', 'xxx-2xxxx'),
(-2, 'x<+10', '-2xxxxxxxx'),
(-2, 'x>+10', 'xxxxxxxx-2'),
(-2, 'x=+10', '-xxxxxxxx2'),
(-2, 'x^+10', 'xxxx-2xxxx'),
(-2, 'x^+9', 'xxx-2xxxx'),
(-2, 'x< 10', '-2xxxxxxxx'),
(-2, 'x> 10', 'xxxxxxxx-2'),
(-2, 'x= 10', '-xxxxxxxx2'),
(-2, 'x^ 10', 'xxxx-2xxxx'),
(-2, 'x^ 9', 'xxx-2xxxx'),
(2, '\0^ 9', '\0\0\0 2\0\0\0\0'),
(2, 'c', '\x02'),
(2, '<5c', '\x02 '),
(2, '>5c', ' \x02'),
(2, '=5c', ' \x02'),
(2, '^6c', ' \x02 '),
(2, '6c', ' \x02'),
(3, 'b', '11'),
(3, '+b', '+11'),
(3, '-b', '11'),
(3, ' b', ' 11'),
(3, '<5b', '11 '),
(3, '>5b', ' 11'),
(3, '=5b', ' 11'),
(3, '^6b', ' 11 '),
(3, '6b', ' 11'),
(3, 'x< 010b', ' 11xxxxxxx'),
(3, '< 010b', ' 110000000'),
(3, 'x< 010b', ' 11xxxxxxx'),
(3, 'x< 10b', ' 11xxxxxxx'),
(3, 'x< 10b', ' 11xxxxxxx'),
(3, 'x> 10b', 'xxxxxxx 11'),
(3, 'x= 10b', ' xxxxxxx11'),
(3, 'x^ 10b', 'xxx 11xxxx'),
(3, 'x^ 9b', 'xxx 11xxx'),
(3, 'x<+10b', '+11xxxxxxx'),
(3, 'x>+10b', 'xxxxxxx+11'),
(3, 'x=+10b', '+xxxxxxx11'),
(3, 'x^+10b', 'xxx+11xxxx'),
(3, 'x^+9b', 'xxx+11xxx'),
(3, 'x<-10b', '11xxxxxxxx'),
(3, 'x>-10b', 'xxxxxxxx11'),
(3, 'x=-10b', 'xxxxxxxx11'),
(3, 'x^-10b', 'xxxx11xxxx'),
(3, 'x^-9b', 'xxx11xxxx'),
(-3, 'x<-10b', '-11xxxxxxx'),
(-3, 'x>-10b', 'xxxxxxx-11'),
(-3, 'x=-10b', '-xxxxxxx11'),
(-3, 'x^-10b', 'xxx-11xxxx'),
(-3, 'x^-9b', 'xxx-11xxx'),
(-3, 'x<+10b', '-11xxxxxxx'),
(-3, 'x>+10b', 'xxxxxxx-11'),
(-3, 'x=+10b', '-xxxxxxx11'),
(-3, 'x^+10b', 'xxx-11xxxx'),
(-3, 'x^+9b', 'xxx-11xxx'),
(-3, 'x< 10b', '-11xxxxxxx'),
(-3, 'x> 10b', 'xxxxxxx-11'),
(-3, 'x= 10b', '-xxxxxxx11'),
(-3, 'x^ 10b', 'xxx-11xxxx'),
(-3, 'x^ #10b', 'xx-0b11xxx'),
(-3, 'x^ 9b', 'xxx-11xxx'),
(3, '\0^ 9b', '\0\0\0 11\0\0\0'),
(-2147483648, 'b', '-10000000000000000000000000000000'),
(0, 'b', '0'),
(9, 'o', '11'),
(9, '+o', '+11'),
(9, '-o', '11'),
(9, ' o', ' 11'),
(9, '<5o', '11 '),
(9, '>5o', ' 11'),
(9, '=5o', ' 11'),
(9, '^6o', ' 11 '),
(9, '6o', ' 11'),
(9, 'x< 10o', ' 11xxxxxxx'),
(9, 'x> 10o', 'xxxxxxx 11'),
(9, 'x= 10o', ' xxxxxxx11'),
(9, 'x^ 10o', 'xxx 11xxxx'),
(9, 'x^ 9o', 'xxx 11xxx'),
(9, 'x<+10o', '+11xxxxxxx'),
(9, 'x>+10o', 'xxxxxxx+11'),
(9, 'x=+10o', '+xxxxxxx11'),
(9, 'x^+10o', 'xxx+11xxxx'),
(9, 'x^+9o', 'xxx+11xxx'),
(9, 'x<-10o', '11xxxxxxxx'),
(9, 'x>-10o', 'xxxxxxxx11'),
(9, 'x=-10o', 'xxxxxxxx11'),
(9, 'x^-10o', 'xxxx11xxxx'),
(9, 'x^-9o', 'xxx11xxxx'),
(-9, 'x<-10o', '-11xxxxxxx'),
(-9, 'x>-10o', 'xxxxxxx-11'),
(-9, 'x=-10o', '-xxxxxxx11'),
(-9, 'x^-10o', 'xxx-11xxxx'),
(-9, 'x^-9o', 'xxx-11xxx'),
(-9, 'x<+10o', '-11xxxxxxx'),
(-9, 'x>+10o', 'xxxxxxx-11'),
(-9, 'x=+10o', '-xxxxxxx11'),
(-9, 'x^+10o', 'xxx-11xxxx'),
(-9, 'x^+9o', 'xxx-11xxx'),
(-9, 'x< 10o', '-11xxxxxxx'),
(-9, 'x< #10o', '-0o11xxxxx'),
(-9, 'x> 10o', 'xxxxxxx-11'),
(-9, 'x= 10o', '-xxxxxxx11'),
(-9, 'x^ 10o', 'xxx-11xxxx'),
(-9, 'x^ 9o', 'xxx-11xxx'),
(9, '\0^ 9o', '\0\0\0 11\0\0\0'),
(-9, 'x^ 9o', 'xxx-11xxx'),
(-2147483648, 'o', '-20000000000'),
(-42, 'o', '-52'),
(42, 'o', '52'),
(0, 'o', '0'),
(-2147483648, 'X', '-80000000'),
(-2147483648, 'x', '-80000000'),
(-42, 'X', '-2A'),
(-42, 'x', '-2a'),
(42, 'X', '2A'),
(42, 'x', '2a'),
(2147483647, 'X', '7FFFFFFF'),
(0, 'x', '0'),
(2147483647, 'x', '7fffffff'),
(2147483647, '#x', '0x7fffffff'),
(2147483647, '#X', '0X7FFFFFFF'),
(2147483647, 'f', '2147483647.000000'),
(2147483647, '%', '214748364700.000000%'),
(999999, '-g', '999999'),
(999999, '+g', '+999999'),
(999999, ' g', ' 999999'),
(999999, 'g', '999999'),
(999999 , 'G', '999999'),
(-999999, 'g', '-999999'),
(-999999 , 'G', '-999999'),
(100000, 'g', '100000'),
(100000, 'G', '100000'),
(-1000000, 'g', '-1e+06'),
(-1000000, 'G', '-1E+06'),
(1000000, 'g', '1e+06'),
(1000000, 'G', '1E+06'),
(10000000, 'g', '1e+07'),
(10000000, 'G', '1E+07'),
(100000000, 'g', '1e+08'),
(100000000, 'G', '1E+08'),
(1000000, '10g', ' 1e+06'),
(1000000, '10G', ' 1E+06'),
(10000000, '10g', ' 1e+07'),
(10000000, '10G', ' 1E+07'),
(100000000, '10g', ' 1e+08'),
(100000000, '10G', ' 1E+08'),
(110000000, 'g', '1.1e+08'),
(110000000, 'G', '1.1E+08'),
(112000000, 'g', '1.12e+08'),
(112000000, 'G', '1.12E+08'),
(112300000, 'g', '1.123e+08'),
(112300000, 'G', '1.123E+08'),
(112340000, 'g', '1.1234e+08'),
(112340000, 'G', '1.1234E+08'),
(112345000, 'g', '1.12345e+08'),
(112345000, 'G', '1.12345E+08'),
(112345600, 'g', '1.12346e+08'),
(112345600, 'G', '1.12346E+08'),
(112345500, 'g', '1.12346e+08'),
(112345500, 'G', '1.12346E+08'),
(112345510, 'g', '1.12346e+08'),
(112345510, 'G', '1.12346E+08'),
(112345400, 'g', '1.12345e+08'),
(112345400, 'G', '1.12345E+08'),
(112345401, 'g', '1.12345e+08'),
(112345401, 'G', '1.12345E+08'),
(-112345000, 'g', '-1.12345e+08'),
(-112345000, 'G', '-1.12345E+08'),
(-112345600, 'g', '-1.12346e+08'),
(-112345600, 'G', '-1.12346E+08'),
(-112345500, 'g', '-1.12346e+08'),
(-112345500, 'G', '-1.12346E+08'),
(-112345510, 'g', '-1.12346e+08'),
(-112345510, 'G', '-1.12346E+08'),
(-112345400, 'g', '-1.12345e+08'),
(-112345400, 'G', '-1.12345E+08'),
(-112345401, 'g', '-1.12345e+08'),
(-112345401, 'G', '-1.12345E+08'),
(2147483647, 'g', '2.14748e+09'),
(2147483647, 'G', '2.14748E+09'),
(-2147483647, 'g', '-2.14748e+09'),
(-2147483647, 'G', '-2.14748E+09'),
(2147483647, 'e', '2.147484e+09'),
(100000, 'e', '1.000000e+05'),
(100000, 'E', '1.000000E+05'),
(10000000, 'E', '1.000000E+07'),
(100000000, 'e', '1.000000e+08'),
(100000000, 'E', '1.000000E+08'),
(110000000, 'e', '1.100000e+08'),
(110000000, 'E', '1.100000E+08'),
(112000000, 'e', '1.120000e+08'),
(112000000, 'E', '1.120000E+08'),
(112300000, 'e', '1.123000e+08'),
(112300000, 'E', '1.123000E+08'),
(112340000, 'e', '1.123400e+08'),
(112340000, 'E', '1.123400E+08'),
(112345000, 'e', '1.123450e+08'),
(112345000, 'E', '1.123450E+08'),
(1112345600, 'e', '1.112346e+09'),
(1112345600, 'E', '1.112346E+09'),
(1112345500, 'e', '1.112346e+09'),
(1112345500, 'E', '1.112346E+09'),
(1112345510, 'e', '1.112346e+09'),
(1112345510, 'E', '1.112346E+09'),
(1112345400, 'e', '1.112345e+09'),
(1112345400, 'E', '1.112345E+09'),
(1112345401, 'e', '1.112345e+09'),
(1112345401, 'E', '1.112345E+09'),
(100000, 'n', '100000'),
]
for value, spec, result in tests:
self.assertEqual(value.__format__(spec), result)
# check locale specific formatting
import _locale
try:
if is_cli:
_locale.setlocale(_locale.LC_ALL, 'en_US')
else:
_locale.setlocale(_locale.LC_ALL, 'English_United States.1252')
x = 100000
self.assertEqual(x.__format__('n'), '100,000')
finally:
# and restore it back...
_locale.setlocale(_locale.LC_ALL, 'C')
self.assertEqual(x.__format__('n'), '100000')
def test_int___format___errors(self):
errors = [
(ValueError, 2, '6.1', "Precision not allowed in integer format specifier"),
(ValueError, 2, '+c', "Sign not allowed with integer format specifier 'c'"),
(ValueError, 2, '-c', "Sign not allowed with integer format specifier 'c'"),
(ValueError, 2, ' c', "Sign not allowed with integer format specifier 'c'"),
(OverflowError, -2, 'c', "%c arg not in range(0x110000)"),
#(-2, 'c', ),
#(-2, '%', "Sign not allowed with integer format specifier 'c'"),
]
okChars = set(['%', 'E', 'F', 'G', 'X', 'x', 'b', 'c', 'd', 'o', 'e', 'f', 'g', 'n', ','] + [chr(x) for x in range(ord('0'), ord('9') + 1)])
# verify the okChars are actually ok
for char in okChars:
(2).__format__('10' + char)
for char in allChars:
if char not in okChars and (char < '0' or char > '9'):
x = ord(char)
if 0x20 < x < 0x80:
errors.append((ValueError, 2, '10' + char, "Unknown format code '%s' for object of type 'int'" % char))
else:
errors.append((ValueError, 2, '10' + char, "Unknown format code '\\x%x' for object of type 'int'" % x))
for error, value, errorFmt, errorMsg in errors:
self.assertRaisesMessage(error, errorMsg, value.__format__, errorFmt)
def test_long___format__(self):
tests = [
(long(0), '+', '+0'),
(long(0), ' ', ' 0'),
(long(0), '-', '0'),
(long(2), '', '2'),
(long(2), '+', '+2'),
(long(2), '-', '2'),
(long(2), ' ', ' 2'),
(long(2), '<5', '2 '),
(long(2), '>5', ' 2'),
(long(2), '=5', ' 2'),
(long(2), '^6', ' 2 '),
(long(2), '6', ' 2'),
(long(2), 'x< 10', ' 2xxxxxxxx'),
(long(2), 'x> 10', 'xxxxxxxx 2'),
(long(2), 'x= 10', ' xxxxxxxx2'),
(long(2), 'x^ 10', 'xxxx 2xxxx'),
(long(2), 'x^ 9', 'xxx 2xxxx'),
(long(2), 'x<+10', '+2xxxxxxxx'),
(long(2), 'x>+10', 'xxxxxxxx+2'),
(long(2), 'x=+10', '+xxxxxxxx2'),
(long(2), 'x^+10', 'xxxx+2xxxx'),
(long(2), 'x^+9', 'xxx+2xxxx'),
(long(2), 'x<-10', '2xxxxxxxxx'),
(long(2), 'x>-10', 'xxxxxxxxx2'),
(long(2), 'x=-10', 'xxxxxxxxx2'),
(long(2), 'x^-10', 'xxxx2xxxxx'),
(long(2), 'x^-9', 'xxxx2xxxx'),
(-long(2), 'x<-10', '-2xxxxxxxx'),
(-long(2), 'x>-10', 'xxxxxxxx-2'),
(-long(2), 'x=-10', '-xxxxxxxx2'),
(-long(2), 'x^-10', 'xxxx-2xxxx'),
(-long(2), 'x^-9', 'xxx-2xxxx'),
(-long(2), 'x<+10', '-2xxxxxxxx'),
(-long(2), 'x>+10', 'xxxxxxxx-2'),
(-long(2), 'x=+10', '-xxxxxxxx2'),
(-long(2), 'x^+10', 'xxxx-2xxxx'),
(-long(2), 'x^+9', 'xxx-2xxxx'),
(-long(2), 'x< 10', '-2xxxxxxxx'),
(-long(2), 'x> 10', 'xxxxxxxx-2'),
(-long(2), 'x= 10', '-xxxxxxxx2'),
(-long(2), 'x^ 10', 'xxxx-2xxxx'),
(-long(2), 'x^ 9', 'xxx-2xxxx'),
(long(2), '\0^ 9', '\0\0\0 2\0\0\0\0'),
(long(2), 'c', '\x02'),
(long(2), '<5c', '\x02 '),
(long(2), '>5c', ' \x02'),
(long(2), '=5c', ' \x02'),
(long(2), '^6c', ' \x02 '),
(long(2), '6c', ' \x02'),
(long(3), 'b', '11'),
(long(3), '+b', '+11'),
(long(3), '-b', '11'),
(long(3), ' b', ' 11'),
(long(3), '<5b', '11 '),
(long(3), '>5b', ' 11'),
(long(3), '=5b', ' 11'),
(long(3), '^6b', ' 11 '),
(long(3), '6b', ' 11'),
(long(3), 'x< 010b', ' 11xxxxxxx'),
(long(3), '< 010b', ' 110000000'),
(long(3), 'x< 010b', ' 11xxxxxxx'),
(long(3), 'x< 10b', ' 11xxxxxxx'),
(long(3), 'x< 10b', ' 11xxxxxxx'),
(long(3), 'x> 10b', 'xxxxxxx 11'),
(long(3), 'x= 10b', ' xxxxxxx11'),
(long(3), 'x^ 10b', 'xxx 11xxxx'),
(long(3), 'x^ 9b', 'xxx 11xxx'),
(long(3), 'x<+10b', '+11xxxxxxx'),
(long(3), 'x>+10b', 'xxxxxxx+11'),
(long(3), 'x=+10b', '+xxxxxxx11'),
(long(3), 'x^+10b', 'xxx+11xxxx'),
(long(3), 'x^+9b', 'xxx+11xxx'),
(long(3), 'x<-10b', '11xxxxxxxx'),
(long(3), 'x>-10b', 'xxxxxxxx11'),
(long(3), 'x=-10b', 'xxxxxxxx11'),
(long(3), 'x^-10b', 'xxxx11xxxx'),
(long(3), 'x^-9b', 'xxx11xxxx'),
(-long(3), 'x<-10b', '-11xxxxxxx'),
(-long(3), 'x>-10b', 'xxxxxxx-11'),
(-long(3), 'x=-10b', '-xxxxxxx11'),
(-long(3), 'x^-10b', 'xxx-11xxxx'),
(-long(3), 'x^-9b', 'xxx-11xxx'),
(-long(3), 'x<+10b', '-11xxxxxxx'),
(-long(3), 'x>+10b', 'xxxxxxx-11'),
(-long(3), 'x=+10b', '-xxxxxxx11'),
(-long(3), 'x^+10b', 'xxx-11xxxx'),
(-long(3), 'x^+9b', 'xxx-11xxx'),
(-long(3), 'x< 10b', '-11xxxxxxx'),
(-long(3), 'x> 10b', 'xxxxxxx-11'),
(-long(3), 'x= 10b', '-xxxxxxx11'),
(-long(3), 'x^ 10b', 'xxx-11xxxx'),
(-long(3), 'x^ #10b', 'xx-0b11xxx'),
(-long(3), 'x^ 9b', 'xxx-11xxx'),
(long(3), '\0^ 9b', '\0\0\0 11\0\0\0'),
(-long(2147483648), 'b', '-10000000000000000000000000000000'),
(long(0), 'b', '0'),
(long(9), 'o', '11'),
(long(9), '+o', '+11'),
(long(9), '-o', '11'),
(long(9), ' o', ' 11'),
(long(9), '<5o', '11 '),
(long(9), '>5o', ' 11'),
(long(9), '=5o', ' 11'),
(long(9), '^6o', ' 11 '),
(long(9), '6o', ' 11'),
(long(9), 'x< 10o', ' 11xxxxxxx'),
(long(9), 'x> 10o', 'xxxxxxx 11'),
(long(9), 'x= 10o', ' xxxxxxx11'),
(long(9), 'x^ 10o', 'xxx 11xxxx'),
(long(9), 'x^ 9o', 'xxx 11xxx'),
(long(9), 'x<+10o', '+11xxxxxxx'),
(long(9), 'x>+10o', 'xxxxxxx+11'),
(long(9), 'x=+10o', '+xxxxxxx11'),
(long(9), 'x^+10o', 'xxx+11xxxx'),
(long(9), 'x^+9o', 'xxx+11xxx'),
(long(9), 'x<-10o', '11xxxxxxxx'),
(long(9), 'x>-10o', 'xxxxxxxx11'),
(long(9), 'x=-10o', 'xxxxxxxx11'),
(long(9), 'x^-10o', 'xxxx11xxxx'),
(long(9), 'x^-9o', 'xxx11xxxx'),
(-long(9), 'x<-10o', '-11xxxxxxx'),
(-long(9), 'x>-10o', 'xxxxxxx-11'),
(-long(9), 'x=-10o', '-xxxxxxx11'),
(-long(9), 'x^-10o', 'xxx-11xxxx'),
(-long(9), 'x^-9o', 'xxx-11xxx'),
(-long(9), 'x<+10o', '-11xxxxxxx'),
(-long(9), 'x>+10o', 'xxxxxxx-11'),
(-long(9), 'x=+10o', '-xxxxxxx11'),
(-long(9), 'x^+10o', 'xxx-11xxxx'),
(-long(9), 'x^+9o', 'xxx-11xxx'),
(-long(9), 'x< 10o', '-11xxxxxxx'),
(-long(9), 'x< #10o', '-0o11xxxxx'),
(-long(9), 'x> 10o', 'xxxxxxx-11'),
(-long(9), 'x= 10o', '-xxxxxxx11'),
(-long(9), 'x^ 10o', 'xxx-11xxxx'),
(-long(9), 'x^ 9o', 'xxx-11xxx'),
(long(9), '\0^ 9o', '\0\0\0 11\0\0\0'),
(-long(9), 'x^ 9o', 'xxx-11xxx'),
(-long(2147483648), 'o', '-20000000000'),
(-long(42), 'o', '-52'),
(0, 'o', '0'),
(long(42), 'o', '52'),
(0, 'x', '0'),
(-long(2147483648), 'X', '-80000000'),
(-long(2147483648), 'x', '-80000000'),
(-long(42), 'X', '-2A'),
(-long(42), 'x', '-2a'),
(long(42), 'X', '2A'),
(long(42), 'x', '2a'),
(long(2147483647), 'X', '7FFFFFFF'),
(long(2147483647), 'x', '7fffffff'),
(long(2147483647), '#x', '0x7fffffff'),
(long(2147483647), '#X', '0X7FFFFFFF'),
(long(2147483647), 'f', '2147483647.000000'),
(long(2147483647), '%', '214748364700.000000%'),
(long(999999), '-g', '999999'),
(long(999999), '+g', '+999999'),
(long(999999), ' g', ' 999999'),
(long(999999), 'g', '999999'),
(long(999999), 'G', '999999'),
(-long(999999), 'g', '-999999'),
(-long(999999), 'G', '-999999'),
(long(100000), 'g', '100000'),
(long(100000), 'G', '100000'),
(-long(1000000), 'g', '-1e+06'),
(-long(1000000), 'G', '-1E+06'),
(long(1000000), 'g', '1e+06'),
(long(1000000), 'G', '1E+06'),
(long(10000000), 'g', '1e+07'),
(long(10000000), 'G', '1E+07'),
(long(100000000), 'g', '1e+08'),
(long(100000000), 'G', '1E+08'),
(long(1000000), '10g', ' 1e+06'),
(long(1000000), '10G', ' 1E+06'),
(long(10000000), '10g', ' 1e+07'),
(long(10000000), '10G', ' 1E+07'),
(long(10200000), '10G', ' 1.02E+07'),
(long(100000000), '10g', ' 1e+08'),
(long(100000000), '10G', ' 1E+08'),
(long(110000000), 'g', '1.1e+08'),
(long(110000000), 'G', '1.1E+08'),
(long(112000000), 'g', '1.12e+08'),
(long(112000000), 'G', '1.12E+08'),
(long(112300000), 'g', '1.123e+08'),
(long(112300000), 'G', '1.123E+08'),
(long(112340000), 'g', '1.1234e+08'),
(long(112340000), 'G', '1.1234E+08'),
(long(112345000), 'g', '1.12345e+08'),
(long(112345000), 'G', '1.12345E+08'),
(long(112345600), 'g', '1.12346e+08'),
(long(112345600), 'G', '1.12346E+08'),
(long(112345500), 'g', '1.12346e+08'),
(long(112345500), 'G', '1.12346E+08'),
(long(112345510), 'g', '1.12346e+08'),
(long(112345510), 'G', '1.12346E+08'),
(long(112345400), 'g', '1.12345e+08'),
(long(112345400), 'G', '1.12345E+08'),
(long(112345401), 'g', '1.12345e+08'),
(long(112345401), 'G', '1.12345E+08'),
(-long(112345000), 'g', '-1.12345e+08'),
(-long(112345000), 'G', '-1.12345E+08'),
(-long(112345600), 'g', '-1.12346e+08'),
(-long(112345600), 'G', '-1.12346E+08'),
(-long(112345500), 'g', '-1.12346e+08'),
(-long(112345500), 'G', '-1.12346E+08'),
(-long(112345510), 'g', '-1.12346e+08'),
(-long(112345510), 'G', '-1.12346E+08'),
(-long(112345400), 'g', '-1.12345e+08'),
(-long(112345400), 'G', '-1.12345E+08'),
(-long(112345401), 'g', '-1.12345e+08'),
(-long(112345401), 'G', '-1.12345E+08'),
(long(2147483647), 'g', '2.14748e+09'),
(long(2147483647), 'G', '2.14748E+09'),
(-long(2147483647), 'g', '-2.14748e+09'),
(-long(2147483647), 'G', '-2.14748E+09'),
(long(2147483647), 'e', '2.147484e+09'),
(long(100000), 'e', '1.000000e+05'),
(long(100000), 'E', '1.000000E+05'),
(long(10000000), 'E', '1.000000E+07'),
(long(100000000), 'e', '1.000000e+08'),
(long(100000000), 'E', '1.000000E+08'),
(long(110000000), 'e', '1.100000e+08'),
(long(110000000), 'E', '1.100000E+08'),
(long(112000000), 'e', '1.120000e+08'),
(long(112000000), 'E', '1.120000E+08'),
(long(112300000), 'e', '1.123000e+08'),
(long(112300000), 'E', '1.123000E+08'),
(long(112340000), 'e', '1.123400e+08'),
(long(112340000), 'E', '1.123400E+08'),
(long(112345000), 'e', '1.123450e+08'),
(long(112345000), 'E', '1.123450E+08'),
(long(1112345600), 'e', '1.112346e+09'),
(long(1112345600), 'E', '1.112346E+09'),
(long(1112345500), 'e', '1.112346e+09'),
(long(1112345500), 'E', '1.112346E+09'),
(long(1112345510), 'e', '1.112346e+09'),
(long(1112345510), 'E', '1.112346E+09'),
(long(1112345400), 'e', '1.112345e+09'),
(long(1112345400), 'E', '1.112345E+09'),
(long(1112345401), 'e', '1.112345e+09'),
(long(1112345401), 'E', '1.112345E+09'),
(long(111234540100), 'E', '1.112345E+11'),
(long(100000), 'n', '100000'),
]
for value, spec, result in tests:
self.assertEqual(value.__format__(spec), result)
# check locale specific formatting
import _locale
try:
if is_cli:
_locale.setlocale(_locale.LC_ALL, 'en_US')
else:
_locale.setlocale(_locale.LC_ALL, 'English_United States.1252')
self.assertEqual(long(100000).__format__('n'), '100,000')
self.assertEqual(long(100000000).__format__('n'), '100,000,000')
finally:
# and restore it back...
_locale.setlocale(_locale.LC_ALL, 'C')
self.assertEqual(long(100000).__format__('n'), '100000')
self.assertEqual(long(100000000).__format__('n'), '100000000')
def test_long___format___errors(self):
errors = [
(ValueError, long(2), '6.1', "Precision not allowed in integer format specifier"),
(ValueError, long(2), '+c', "Sign not allowed with integer format specifier 'c'"),
(ValueError, long(2), '-c', "Sign not allowed with integer format specifier 'c'"),
(ValueError, long(2), ' c', "Sign not allowed with integer format specifier 'c'"),
(OverflowError, -long(2), 'c', "%c arg not in range(0x110000)"),
(OverflowError, long(0x110000), 'c', "%c arg not in range(0x110000)"),
]
if is_cli: #http://ironpython.codeplex.com/workitem/28373
errors.append((OverflowError, sys.maxsize + 1, 'c', "long int too large to convert to int"))
else:
errors.append((OverflowError, sys.maxsize + 1, 'c', "Python int too large to convert to C long"))
okChars = set(['%', 'E', 'F', 'G', 'X', 'x', 'b', 'c', 'd', 'o', 'e', 'f', 'g', 'n', ','] + [chr(x) for x in range(ord('0'), ord('9') + 1)])
# verify the okChars are actually ok
for char in okChars:
(long(2)).__format__('10' + char)
for char in allChars:
if char not in okChars and (char < '0' or char > '9'):
x = ord(char)
if 0x20 < x < 0x80:
errors.append((ValueError, long(2), '10' + char, "Unknown format code '%s' for object of type 'int'" % char))
else:
errors.append((ValueError, long(2), '10' + char, "Unknown format code '\\x%x' for object of type 'int'" % x))
for exceptionType, value, errorFmt, errorMsg in errors:
self.assertRaisesMessage(exceptionType, errorMsg, value.__format__, errorFmt)
def test_builtin_types_that_implement_format(self):
import builtins
types = [getattr(builtins, typeName) for typeName in dir(builtins) if type(getattr(builtins, typeName)) is type]
formatTypes = list(set([builtinType.__name__ for builtinType in types if '__format__' in builtinType.__dict__]))
formatTypes.sort()
if is_cli:
# why does bool have __format__ in ipy?
self.assertEqual(formatTypes, ['bool', 'complex', 'float', 'int', 'object', 'str'])
else:
self.assertEqual(formatTypes, ['complex', 'float', 'int', 'object', 'str'])
def test_computed_format(self):
self.assertEqual("|{0:10}|".format("a"), "|a |")
self.assertEqual("|{0:*^10}|".format("a"), "|****a*****|")
self.assertEqual("|{0:*^{1}}|".format("a", 10), "|****a*****|")
self.assertEqual("{0:*{2}10}".format("a", "*", "^", "10"), "****a*****")
self.assertEqual("{0:{1}^{3}}".format("a", "*", "^", "10"), "****a*****")
self.assertEqual("{0:{1}{2}{3}}".format("a", "*", "^", "10"), "****a*****")
self.assertEqual("{0:{1}*^{2}}".format("a", "", "10"), "****a*****")
def test_none_format(self):
self.assertEqual("{0} {1}".format(None, 10), "None 10")
self.assertEqual("{0}".format(None), 'None')
run_test(__name__)
| 51.357886
| 172
| 0.348829
|
93e2e57cc3a09ab6661595d81fc2aa32ef0e6a01
| 29,293
|
py
|
Python
|
util/design/secded_gen.py
|
asb/opentitan
|
af68ff5041b10c81e97adc075a4d042f8ac7ab20
|
[
"Apache-2.0"
] | 1,375
|
2019-11-05T15:11:00.000Z
|
2022-03-28T17:50:43.000Z
|
util/design/secded_gen.py
|
asb/opentitan
|
af68ff5041b10c81e97adc075a4d042f8ac7ab20
|
[
"Apache-2.0"
] | 7,045
|
2019-11-05T16:05:45.000Z
|
2022-03-31T23:08:08.000Z
|
util/design/secded_gen.py
|
asb/opentitan
|
af68ff5041b10c81e97adc075a4d042f8ac7ab20
|
[
"Apache-2.0"
] | 428
|
2019-11-05T15:00:20.000Z
|
2022-03-28T15:34:57.000Z
|
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""SECDED encoder/decoder generator
Current version doesn't optimize Fan-In. It uses Hsiao code (modified version
of Hamming code + parity). Please refer https://arxiv.org/pdf/0803.1217.pdf
For some further background and info on the differences between Hamming and
Hsiao SECDED codes, refer to https://ieeexplore.ieee.org/document/8110065.g
"""
import argparse
import itertools
import logging as log
import math
import random
import hjson
import subprocess
from typing import List, Tuple
from pathlib import Path
COPYRIGHT = """// Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
//
"""
C_SRC_TOP = """
#include "secded_enc.h"
#include <stdbool.h>
#include <stdint.h>
// Calculates even parity for a 64-bit word
static uint8_t calc_parity(uint64_t word, bool invert) {
bool parity = false;
while (word) {
if (word & 1) {
parity = !parity;
}
word >>= 1;
}
return parity ^ invert;
}
"""
C_H_TOP = """
#ifndef OPENTITAN_HW_IP_PRIM_DV_PRIM_SECDED_SECDED_ENC_H_
#define OPENTITAN_HW_IP_PRIM_DV_PRIM_SECDED_SECDED_ENC_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
// Integrity encode functions for varying bit widths matching the functionality
// of the RTL modules of the same name. Each takes an array of bytes in
// little-endian order and returns the calculated integrity bits.
"""
C_H_FOOT = """
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
#endif // OPENTITAN_HW_IP_PRIM_DV_PRIM_SECDED_SECDED_ENC_H_
"""
CODE_OPTIONS = {'hsiao': '',
'inv_hsiao': '_inv',
'hamming': '_hamming',
'inv_hamming': '_inv_hamming'}
# secded configurations
SECDED_CFG_FILE = "util/design/data/secded_cfg.hjson"
PROJ_ROOT = Path(__file__).parent.parent.parent
SECDED_CFG_PATH = Path(PROJ_ROOT) / SECDED_CFG_FILE
# The seed we use to initialise the PRNG when running the randomised algorithm
# to choose constants for Hsiao codes.
_RND_SEED = 123
def min_paritysize(k):
# SECDED --> Hamming distance 'd': 4
# 2^(m-1) should cover (m+k)
for m in range(2, 10):
if 2**m >= (k + m + 1):
return m + 1
return -1
def ideal_fanin(k, m):
"""Compute Ideal Max Fanin of any bit in the ecc codes."""
fanin = 0
needed = k
for select in range(3, m + 1, 2):
combinations = list(itertools.combinations(range(m), select))
if len(combinations) <= needed:
fanin += int(math.ceil(float(len(combinations) * select) / m))
needed -= len(combinations)
else:
fanin += int(math.ceil(float(needed * select) / m))
needed = 0
if not needed:
break
return fanin
def calc_fanin(width, codes):
"""Sum the ones in a column"""
fanins = [0] * width
log.info("Calc Code: {}".format(codes))
for i in codes:
for e in i:
fanins[e] += 1
return fanins
def calc_bitmasks(k, m, codes, dec):
# Transform fanin indices into bitmask.
fanin_masks = [0] * m
for i, c in enumerate(codes):
for j in c:
fanin_masks[j] += 1 << i
# For decode ops, include ECC bit position.
if dec:
for j in range(m):
fanin_masks[j] += 1 << (k + j)
return fanin_masks
def print_secded_enum_and_util_fns(cfgs):
enum_vals = [" SecdedNone"]
parity_width_vals = []
data_width_vals = []
for cfg in cfgs:
k = cfg['k']
m = cfg['m']
n = k + m
suffix = CODE_OPTIONS[cfg['code_type']]
suffix = suffix.split('_')
suffix = [x.capitalize() for x in suffix]
formatted_suffix = ''.join(suffix)
enum_name = " Secded%s_%s_%s" % (formatted_suffix, n, k)
enum_vals.append(enum_name)
parity_width = " %s: return %s;" % (enum_name, m)
parity_width_vals.append(parity_width)
data_width = " %s: return %s;" % (enum_name, k)
data_width_vals.append(data_width)
enum_str = ",\n".join(enum_vals)
parity_width_fn_str = "\n".join(parity_width_vals)
data_width_fn_str = "\n".join(data_width_vals)
enum_str = '''
typedef enum int {{
{}
}} prim_secded_e;
function automatic int get_ecc_data_width(prim_secded_e ecc_type);
case (ecc_type)
{}
// Return a non-zero width to avoid VCS compile issues
default: return 32;
endcase
endfunction
function automatic int get_ecc_parity_width(prim_secded_e ecc_type);
case (ecc_type)
{}
default: return 0;
endcase
endfunction
'''.format(enum_str, data_width_fn_str, parity_width_fn_str)
return enum_str
def print_pkg_allzero(n, k, m, codes, suffix, codetype):
suffix = suffix.split('_')
suffix = [x.capitalize() for x in suffix]
suffix = ''.join(suffix)
invecc = 0
invcode = 0
if codetype in ["inv_hsiao", "inv_hamming"]:
for x in range(m):
invecc += (x % 2) << x
invcode = invecc << k
zerostr = f'''
parameter logic [{m-1}:0] Secded{suffix}{n}{k}ZeroEcc = {m}'h{invecc:0X};
parameter logic [{n-1}:0] Secded{suffix}{n}{k}ZeroWord = {n}'h{invcode:0X};
'''
return zerostr
def print_pkg_types(n, k, m, codes, suffix, codetype):
typename = "secded%s_%d_%d_t" % (suffix, n, k)
typestr = '''
typedef struct packed {{
logic [{}:0] data;
logic [{}:0] syndrome;
logic [1:0] err;
}} {};
'''.format((k - 1), (m - 1), typename)
return typestr
def print_fn(n, k, m, codes, suffix, codetype, inv=False):
enc_out = print_enc(n, k, m, codes, codetype)
dec_out = print_dec(n, k, m, codes, codetype, "function")
typename = "secded%s_%d_%d_t" % (suffix, n, k)
module_name = "prim_secded%s_%d_%d" % (suffix, n, k)
outstr = '''
function automatic logic [{}:0]
{}_enc (logic [{}:0] data_i);
logic [{}:0] data_o;
{} return data_o;
endfunction
function automatic {}
{}_dec (logic [{}:0] data_i);
logic [{}:0] data_o;
logic [{}:0] syndrome_o;
logic [1:0] err_o;
{} dec;
{}
dec.data = data_o;
dec.syndrome = syndrome_o;
dec.err = err_o;
return dec;
endfunction
'''.format((n - 1), module_name, (k - 1), (n - 1), enc_out,
typename, module_name, (n - 1), (k - 1), (m - 1), typename, dec_out)
return outstr
def print_enc(n, k, m, codes, codetype):
invert = 1 if codetype in ["inv_hsiao", "inv_hamming"] else 0
outstr = " data_o = {}'(data_i);\n".format(n)
format_str = " data_o[{}] = 1'b{} ^ ^(data_o & " + str(n) + "'h{:0" +\
str((n + 3) // 4) + "X});\n"
# Print parity computation If inverted encoding is turned on, we only
# invert every odd bit so that both all-one and all-zero encodings are not
# possible. This works for most encodings generated if the fanin is
# balanced (such as inverted Hsiao codes). However, since there is no
# guarantee, an FPV assertion is added to prove that all-zero and all-one
# encodings do not exist if an inverted code is used.
for j, mask in enumerate(calc_bitmasks(k, m, codes, False)):
outstr += format_str.format(j + k, invert & (j % 2), mask)
return outstr
def calc_syndrome(code):
log.info("in syndrome {}".format(code))
return sum(map((lambda x: 2**x), code))
def print_dec(n, k, m, codes, codetype, print_type="logic"):
outstr = ""
outstr += " // Syndrome calculation\n"
hexfmt = str(n) + "'h{:0" + str((n + 3) // 4) + "X}"
format_str = " syndrome_o[{}] = ^("
# Add ECC bit inversion if needed (see print_enc function).
if codetype in ["inv_hsiao", "inv_hamming"]:
invval = 0
for x in range(m):
invval += (x % 2) << x
format_str += "(data_i ^ " + hexfmt.format(invval << k) + ")"
else:
format_str += "data_i"
format_str += " & " + hexfmt + ");\n"
# Print syndrome computation
for j, mask in enumerate(calc_bitmasks(k, m, codes, True)):
outstr += format_str.format(j, mask)
outstr += "\n"
outstr += " // Corrected output calculation\n"
for i in range(k):
outstr += " data_o[%d] = (syndrome_o == %d'h%x) ^ data_i[%d];\n" % (
i, m, calc_syndrome(codes[i]), i)
outstr += "\n"
outstr += " // err_o calc. bit0: single error, bit1: double error\n"
# The Hsiao and Hamming syndromes are interpreted slightly differently.
if codetype in ["hamming", "inv_hamming"]:
outstr += " err_o[0] = syndrome_o[%d];\n" % (m - 1)
outstr += " err_o[1] = |syndrome_o[%d:0] & ~syndrome_o[%d];\n" % (
m - 2, m - 1)
else:
outstr += " err_o[0] = ^syndrome_o;\n"
outstr += " err_o[1] = ~err_o[0] & (|syndrome_o);\n"
return outstr
# return whether an integer is a power of 2
def is_pow2(n):
return (n & (n - 1) == 0) and n != 0
def is_odd(n):
return (n % 2) > 0
def verify(cfgs):
error = 0
for cfg in cfgs['cfgs']:
if (cfg['k'] <= 1 or cfg['k'] > 120):
error += 1
log.error("Current tool doesn't support the value k (%d)", cfg['k'])
if (cfg['m'] <= 1 or cfg['m'] > 20):
error += 1
log.error("Current tool doesn't support the value m (%d)", cfg['m'])
# Calculate 'm' (parity size)
min_m = min_paritysize(cfg['k'])
if (cfg['m'] < min_m):
error += 1
log.error("given \'m\' argument is smaller than minimum requirement " +
"using calculated minimum (%d)", min_m)
# Error check code selection
if (cfg['code_type'] not in CODE_OPTIONS):
error += 1
log.error("Invalid code {} selected, use one of {}".format(
cfg['code_type'], CODE_OPTIONS))
return error
def _ecc_pick_code(codetype: str, k: int) -> Tuple[int, List[int], int]:
# first check to see if bit width is supported among configuration
config = hjson.load(SECDED_CFG_PATH.open())
codes = None
bitmasks = None
m = None
for cfg in config['cfgs']:
if cfg['k'] == k and cfg['code_type'] == codetype:
m = cfg['m']
codes = gen_code(codetype, k, m)
bitmasks = calc_bitmasks(k, m, codes, False)
invert = 1 if codetype in ['inv_hsiao', 'inv_hamming'] else 0
return (m, bitmasks, invert)
# error if k not supported
raise Exception(f'ECC for length {k} of type {codetype} unsupported')
def _ecc_encode(k: int,
m: int, bitmasks: List[int], invert: int,
dataword: int) -> int:
assert 0 <= dataword < (1 << k)
# represent supplied dataword as a binary string
word_bin = format(dataword, '0' + str(k) + 'b')
codeword = word_bin
for k, mask in enumerate(bitmasks):
bit = 0
log.debug(f'codeword: {codeword}')
log.debug(f'mask: {hex(mask)}')
mask = (format(mask, '0' + str(k + m) + 'b'))
# reverse codeword for index selection
# This is because the LSB is the farthest entry in the string
codeword_rev = codeword[::-1]
for idx, f in enumerate(mask[::-1]):
if int(f):
bit ^= int(codeword_rev[idx])
# Add ECC bit inversion if needed (see print_enc function).
bit ^= (invert & k % 2)
codeword = str(bit) + codeword
return codeword
def ecc_encode(codetype: str, k: int, dataword: int) -> Tuple[int, int]:
log.info(f"Encoding ECC for {hex(dataword)}")
m, bitmasks, invert = _ecc_pick_code(codetype, k)
codeword = _ecc_encode(k, m, bitmasks, invert, dataword)
# Debug printouts
log.debug(f'original hex: {hex(dataword)}')
log.debug(f'codeword hex: {hex(int(codeword,2))}')
return int(codeword, 2), m
def ecc_encode_some(codetype: str,
k: int,
datawords: int) -> Tuple[List[int], int]:
m, bitmasks, invert = _ecc_pick_code(codetype, k)
codewords = [int(_ecc_encode(k, m, bitmasks, invert, w), 2)
for w in datawords]
return codewords, m
def gen_code(codetype, k, m):
# The hsiao_code generator uses (pseudo)random values to pick good ECC
# constants. Rather than exposing the seed, we pick a fixed one here to
# ensure everything stays stable in future.
old_rnd_state = random.getstate()
random.seed(_RND_SEED)
try:
return globals()["_{}_code".format(codetype)](k, m)
finally:
random.setstate(old_rnd_state)
def generate(cfgs, args):
pkg_out_str = ""
pkg_type_str = ""
c_src_filename = args.c_outdir + "/" + "secded_enc.c"
c_h_filename = args.c_outdir + "/" + "secded_enc.h"
with open(c_src_filename, "w") as f:
f.write(COPYRIGHT)
f.write("// SECDED encode code generated by\n")
f.write(f"// util/design/secded_gen.py from {SECDED_CFG_FILE}\n\n")
f.write(C_SRC_TOP)
with open(c_h_filename, "w") as f:
f.write(COPYRIGHT)
f.write("// SECDED encode code generated by\n")
f.write(f"// util/design/secded_gen.py from {SECDED_CFG_FILE}\n")
f.write(C_H_TOP)
for cfg in cfgs['cfgs']:
log.debug("Working on {}".format(cfg))
k = cfg['k']
m = cfg['m']
n = k + m
codetype = cfg['code_type']
suffix = CODE_OPTIONS[codetype]
codes = gen_code(codetype, k, m)
# write out rtl files
write_enc_dec_files(n, k, m, codes, suffix, args.outdir, codetype)
# write out C files, only hsiao codes are supported
if codetype in ["hsiao", "inv_hsiao"]:
write_c_files(n, k, m, codes, suffix, c_src_filename, c_h_filename,
codetype)
# write out all-zero word values for all codes
pkg_type_str += print_pkg_allzero(n, k, m, codes, suffix, codetype)
# write out package typedefs
pkg_type_str += print_pkg_types(n, k, m, codes, suffix, codetype)
# print out functions
pkg_out_str += print_fn(n, k, m, codes, suffix, codetype)
if not args.no_fpv:
write_fpv_files(n, k, m, codes, suffix, args.fpv_outdir, codetype)
with open(c_h_filename, "a") as f:
f.write(C_H_FOOT)
format_c_files(c_src_filename, c_h_filename)
# create enum of various ECC types - useful for DV purposes in mem_bkdr_if
enum_str = print_secded_enum_and_util_fns(cfgs['cfgs'])
# write out package file
full_pkg_str = enum_str + pkg_type_str + pkg_out_str
write_pkg_file(args.outdir, full_pkg_str)
def _inv_hsiao_code(k, m):
return _hsiao_code(k, m)
# k = data bits
# m = parity bits
# generate hsiao code
def _hsiao_code(k, m):
# using itertools combinations, generate odd number of 1 in a row
required_row = k # k rows are needed, decreasing everytime when it acquite
fanin_ideal = ideal_fanin(k, m)
log.info("Ideal Fan-In value: %d" % fanin_ideal)
# Each entry represents a row in below parity matrix
# Entry is tuple and the value inside is the position of ones
# e.g. (0,1,2) in m:=7
# row -> [1 1 1 0 0 0 0]
codes = []
# Find code matrix =======================================================
# This is main part to find the parity matrix.
# For example, find SECDED for 4bit message is to find 4x4 matrix as below
# | 1 0 0 0 x x x x |
# | 0 1 0 0 x x x x |
# | 0 0 1 0 x x x x |
# | 0 0 0 1 x x x x |
# Then message _k_ X matrix_code ==> original message with parity
#
# Make a row to have even number of 1 including the I matrix.
# This helps to calculate the syndrom at the decoding stage.
# To reduce the max fan-in, Starting with smallest number 3.
# the number means the number of one in a row.
# Small number of ones means smaller fan-in overall.
for step in range(3, m + 1, 2):
# starting from 3 as I matrix represents data
# Increased by 2 as number of 1 should be even in a row (odd excluding I)
# get the list of combinations [0, .., m-1] with `step`
# e.g. step := 3 ==> [(0,1,2), (0,1,3), ... ]
candidate = list(itertools.combinations(range(m), step))
if len(candidate) <= required_row:
# we need more round use all of them
codes.extend(candidate)
required_row -= len(candidate)
else:
# Find optimized fan-in ==========================================
# Calculate each row fan-in with current
fanins = calc_fanin(m, codes)
while required_row != 0:
# Let's shuffle
# Shuffling makes the sequence randomized --> it reduces the
# fanin as the code takes randomly at the end of the round
# TODO: There should be a clever way to find the subset without
# random retrying.
# Suggested this algorithm
# https://en.wikipedia.org/wiki/Assignment_problem
random.shuffle(candidate)
# Take a subset
subset = candidate[0:required_row]
subset_fanins = calc_fanin(m, subset)
# Check if it exceeds Ideal Fan-In
ideal = True
for i in range(m):
if fanins[i] + subset_fanins[i] > fanin_ideal:
# Exceeded. Retry
ideal = False
break
if ideal:
required_row = 0
# Append to the code matrix
codes.extend(subset)
if required_row == 0:
# Found everything!
break
log.info("Hsiao codes {}".format(codes))
return codes
def _inv_hamming_code(k, m):
return _hamming_code(k, m)
# n = total bits
# k = data bits
# m = parity bits
# generate hamming code
def _hamming_code(k, m):
n = k + m
# construct a list of code tuples.
# Tuple corresponds to each bit position and shows which parity bit it participates in
# Only the data bits are shown, the parity bits are not.
codes = []
for pos in range(1, n + 1):
# this is a valid parity bit position or the final parity bit
if (is_pow2(pos) or pos == n):
continue
else:
code = ()
for p in range(m):
# this is the starting parity position
parity_pos = 2**p
# back-track to the closest parity bit multiple and see if it is even or odd
# If even, we are in the skip phase, do not include
# If odd, we are in the include phase
parity_chk = int((pos - (pos % parity_pos)) / parity_pos)
# valid for inclusion or final parity bit that includes everything
if is_odd(parity_chk) or p == m - 1:
code = code + (p, )
codes.append(code)
# final parity bit includes all ECC bits
for p in range(m - 1):
codes.append((m - 1, ))
log.info("Hamming codes {}".format(codes))
return codes
def write_pkg_file(outdir, pkg_str):
with open(outdir + "/" + "prim_secded_pkg.sv", "w") as f:
outstr = '''{}// SECDED package generated by
// util/design/secded_gen.py from {}
package prim_secded_pkg;
{}
endpackage
'''.format(COPYRIGHT, SECDED_CFG_FILE, pkg_str)
f.write(outstr)
def bytes_to_c_type(num_bytes):
if num_bytes == 1:
return 'uint8_t'
elif num_bytes <= 2:
return 'uint16_t'
elif num_bytes <= 4:
return 'uint32_t'
elif num_bytes <= 8:
return 'uint64_t'
return None
def write_c_files(n, k, m, codes, suffix, c_src_filename, c_h_filename,
codetype):
in_bytes = math.ceil(k / 8)
out_bytes = math.ceil(m / 8)
if (k > 64):
log.warning(f"Cannot generate C encoder for k = {k}."
" The tool has no support for k > 64 for C encoder "
"generation")
return
in_type = bytes_to_c_type(in_bytes)
out_type = bytes_to_c_type(out_bytes)
assert in_type
assert out_type
assert codetype in ["hsiao", "inv_hsiao"]
invert = (codetype == "inv_hsiao")
with open(c_src_filename, "a") as f:
# Write out function prototype in src
f.write(f"\n{out_type} enc_secded{suffix}_{n}_{k}"
f"(const uint8_t bytes[{in_bytes}]) {{\n")
# Form a single word from the incoming byte data
f.write(f"{in_type} word = ")
f.write(" | ".join(
[f"(({in_type})bytes[{i}] << {i*8})" for i in range(in_bytes)]))
f.write(";\n\n")
# AND the word with the codes, calculating parity of each and combine
# into a single word of integrity bits
f.write("return ")
parity_bit_masks = enumerate(calc_bitmasks(k, m, codes, False))
# Add ECC bit inversion if needed (see print_enc function).
f.write(" | ".join(
[f"(calc_parity(word & 0x{mask:x}, "
f"{'true' if invert and (par_bit % 2) else 'false'}) << {par_bit})"
for par_bit, mask in parity_bit_masks]))
f.write(";\n}\n")
with open(c_h_filename, "a") as f:
# Write out function declaration in header
f.write(f"{out_type} enc_secded{suffix}_{n}_{k}"
f"(const uint8_t bytes[{in_bytes}]);\n")
def format_c_files(c_src_filename, c_h_filename):
try:
# Call clang-format to in-place format generated C code. If there are
# any issues log a warning.
result = subprocess.run(['clang-format', '-i', c_src_filename,
c_h_filename], stderr=subprocess.PIPE,
universal_newlines=True)
result.check_returncode()
except Exception as e:
stderr = ''
if result:
stderr = '\n' + result.stderr
log.warning(f"Could not format generated C source: {e}{stderr}")
def write_enc_dec_files(n, k, m, codes, suffix, outdir, codetype):
enc_out = print_enc(n, k, m, codes, codetype)
module_name = "prim_secded%s_%d_%d" % (suffix, n, k)
with open(outdir + "/" + module_name + "_enc.sv", "w") as f:
outstr = '''{}// SECDED encoder generated by util/design/secded_gen.py
module {}_enc (
input [{}:0] data_i,
output logic [{}:0] data_o
);
always_comb begin : p_encode
{} end
endmodule : {}_enc
'''.format(COPYRIGHT, module_name, (k - 1), (n - 1), enc_out, module_name)
f.write(outstr)
dec_out = print_dec(n, k, m, codes, codetype)
with open(outdir + "/" + module_name + "_dec.sv", "w") as f:
outstr = '''{}// SECDED decoder generated by util/design/secded_gen.py
module {}_dec (
input [{}:0] data_i,
output logic [{}:0] data_o,
output logic [{}:0] syndrome_o,
output logic [1:0] err_o
);
always_comb begin : p_encode
{} end
endmodule : {}_dec
'''.format(COPYRIGHT, module_name, (n - 1), (k - 1), (m - 1),
dec_out, module_name)
f.write(outstr)
def write_fpv_files(n, k, m, codes, suffix, outdir, codetype):
module_name = "prim_secded%s_%d_%d" % (suffix, n, k)
with open(outdir + "/tb/" + module_name + "_tb.sv", "w") as f:
outstr = '''{}// SECDED FPV testbench generated by util/design/secded_gen.py
module {}_tb (
input clk_i,
input rst_ni,
input [{}:0] data_i,
output logic [{}:0] data_o,
output logic [{}:0] encoded_o,
output logic [{}:0] syndrome_o,
output logic [1:0] err_o,
input [{}:0] error_inject_i
);
{}_enc {}_enc (
.data_i,
.data_o(encoded_o)
);
{}_dec {}_dec (
.data_i(encoded_o ^ error_inject_i),
.data_o,
.syndrome_o,
.err_o
);
endmodule : {}_tb
'''.format(COPYRIGHT, module_name, (k - 1), (k - 1), (n - 1), (m - 1), (n - 1),
module_name, module_name, module_name, module_name, module_name)
f.write(outstr)
# Additional assertions for inverted codes.
if codetype in ["inv_hsiao", "inv_hamming"]:
inv_asserts = '''
// Check that all-one and all-zero data does not result in all-one or all-zero codewords
`ASSERT(AllZerosCheck_A, data_i == '0 |-> encoded_o != '0)
`ASSERT(AllOnesCheck_A, data_i == '1 |-> encoded_o != '1)
'''
else:
inv_asserts = ""
with open(outdir + "/vip/" + module_name + "_assert_fpv.sv", "w") as f:
outstr = '''{}// SECDED FPV assertion file generated by util/design/secded_gen.py
module {}_assert_fpv (
input clk_i,
input rst_ni,
input [{}:0] data_i,
input [{}:0] data_o,
input [{}:0] encoded_o,
input [{}:0] syndrome_o,
input [1:0] err_o,
input [{}:0] error_inject_i
);
// Inject a maximum of two errors simultaneously.
`ASSUME_FPV(MaxTwoErrors_M, $countones(error_inject_i) <= 2)
// This bounds the input data state space to make sure the solver converges.
`ASSUME_FPV(DataLimit_M, $onehot0(data_i) || $onehot0(~data_i))
// Single bit error detection
`ASSERT(SingleErrorDetect_A, $countones(error_inject_i) == 1 |-> err_o[0])
`ASSERT(SingleErrorDetectReverse_A, err_o[0] |-> $countones(error_inject_i) == 1)
// Double bit error detection
`ASSERT(DoubleErrorDetect_A, $countones(error_inject_i) == 2 |-> err_o[1])
`ASSERT(DoubleErrorDetectReverse_A, err_o[1] |-> $countones(error_inject_i) == 2)
// Single bit error correction (implicitly tests the syndrome output)
`ASSERT(SingleErrorCorrect_A, $countones(error_inject_i) < 2 |-> data_i == data_o)
// Basic syndrome check
`ASSERT(SyndromeCheck_A, |syndrome_o |-> $countones(error_inject_i) > 0)
`ASSERT(SyndromeCheckReverse_A, $countones(error_inject_i) > 0 |-> |syndrome_o)
{}
endmodule : {}_assert_fpv
'''.format(COPYRIGHT, module_name, (k - 1), (k - 1), (n - 1), (m - 1), (n - 1),
inv_asserts, module_name)
f.write(outstr)
with open(outdir + "/tb/" + module_name + "_bind_fpv.sv", "w") as f:
outstr = '''{}// SECDED FPV bind file generated by util/design/secded_gen.py
module {}_bind_fpv;
bind {}_tb
{}_assert_fpv {}_assert_fpv (
.clk_i,
.rst_ni,
.data_i,
.data_o,
.encoded_o,
.syndrome_o,
.err_o,
.error_inject_i
);
endmodule : {}_bind_fpv
'''.format(COPYRIGHT, module_name, module_name, module_name, module_name,
module_name)
f.write(outstr)
with open(outdir + "/" + module_name + "_fpv.core", "w") as f:
outstr = '''CAPI=2:
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
name: "lowrisc:fpv:{}_fpv:0.1"
description: "SECDED FPV target"
filesets:
files_formal:
depend:
- lowrisc:prim:all
- lowrisc:prim:secded
files:
- vip/{}_assert_fpv.sv
- tb/{}_tb.sv
- tb/{}_bind_fpv.sv
file_type: systemVerilogSource
targets:
default: &default_target
# note, this setting is just used
# to generate a file list for jg
default_tool: icarus
filesets:
- files_formal
toplevel:
- {}_tb
formal:
<<: *default_target
lint:
<<: *default_target
'''.format(module_name, module_name, module_name, module_name, module_name)
f.write(outstr)
def main():
parser = argparse.ArgumentParser(
prog="secded_gen",
description='''This tool generates Single Error Correction Double Error
Detection(SECDED) encoder and decoder modules in SystemVerilog.
''')
parser.add_argument('--no_fpv',
action='store_true',
help='Do not generate FPV testbench.')
parser.add_argument('--outdir',
default='hw/ip/prim/rtl/',
help='''
Output directory. The output file will be named
`prim_secded_<n>_<k>_enc/dec.sv` (default: %(default)s)
''')
parser.add_argument('--fpv_outdir',
default='hw/ip/prim/fpv/',
help='''
FPV output directory. The output files will have
the base name `prim_secded_<n>_<k>_*_fpv` (default: %(default)s)
''')
parser.add_argument('--c_outdir',
default='hw/ip/prim/dv/prim_secded',
help='''
C output directory. The output files are named secded_enc.c and
secded_enc.h
''')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
args = parser.parse_args()
if (args.verbose):
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
else:
log.basicConfig(format="%(levelname)s: %(message)s")
with open(SECDED_CFG_FILE, 'r') as infile:
config = hjson.load(infile)
# Error checking
error = verify(config)
if (error):
exit(1)
# Generate outputs
generate(config, args)
if __name__ == "__main__":
main()
| 30.932418
| 92
| 0.594238
|
44e680def8a8c08539037ebb35fbd0376f4196b5
| 3,805
|
py
|
Python
|
first_project/first_app/views.py
|
andihaki/django
|
e6e2f2afbef4fb2442bf891490dc3013c25b4748
|
[
"BSD-3-Clause"
] | null | null | null |
first_project/first_app/views.py
|
andihaki/django
|
e6e2f2afbef4fb2442bf891490dc3013c25b4748
|
[
"BSD-3-Clause"
] | null | null | null |
first_project/first_app/views.py
|
andihaki/django
|
e6e2f2afbef4fb2442bf891490dc3013c25b4748
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
#login
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.contrib.auth.decorators import login_required
# class base view
from django.shortcuts import render
from django.views.generic import View, TemplateView, ListView, DetailView
from django.views.generic import CreateView, UpdateView, DeleteView
from first_app.models import AccessRecord, Topic, Webpage
from first_app.models import UserProfileInfo
from first_app.forms import UserForm, UserProfileInfoForm
# untuk delete halaman
from django.urls import reverse_lazy
# Create your views here.
def index(request):
webpages_list = AccessRecord.objects.order_by('date')
# data dictionary yang akan di-inject ke index.html
my_dict = {
'access_record': webpages_list,
}
return render(request, 'first_app/index.html', context=my_dict)
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def idx(request):
return render(request, 'default/index.html')
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileInfoForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserForm()
profile_form = UserProfileInfoForm()
data = {
'registered': registered,
'user_form': user_form,
'profile_form': profile_form,
}
return render(request, 'first_app/registration.html', context=data)
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse("Akun disable")
else:
print('wrong username / password')
print(f"Username: {username}, Password: {password}")
else:
return render(request, 'first_app/login.html', {})
def other(request):
return render(request, 'default/other.html')
def relative(request):
return render(request, 'default/relative_url_templates.html')
# class base view
from . import models
class IndexView(TemplateView):
template_name = 'first_app/index.html'
class SchoolListView(ListView):
# jika tidak di define context_object_name
# maka, context yang diinject ke html = school_list
context_object_name = 'schools'
model = models.School
class SchoolDetailView(DetailView):
# jika tidak di define context_object_name
# maka, context yang diinject ke html = school
context_object_name = 'school_detail'
model = models.School
template_name = 'first_app/school_detail.html'
class SchoolCreateView(CreateView):
fields = ('name', 'principal', 'location')
model = models.School
class SchoolUpdateView(UpdateView):
fields = ('name', 'principal')
model = models.School
class SchoolDeleteView(DeleteView):
model = models.School
success_url = reverse_lazy("first_app:index")
| 29.96063
| 73
| 0.689093
|
977ef2bb28678bb0a64482d6baf80992c5687159
| 3,452
|
py
|
Python
|
python/external/stacktracer.py
|
cschutijser/scion
|
054cef53b31a577ed224a090d6a4fd3883fd520b
|
[
"Apache-2.0"
] | 1
|
2018-03-18T14:46:34.000Z
|
2018-03-18T14:46:34.000Z
|
python/external/stacktracer.py
|
cschutijser/scion
|
054cef53b31a577ed224a090d6a4fd3883fd520b
|
[
"Apache-2.0"
] | 1
|
2020-03-20T01:28:56.000Z
|
2020-03-20T01:28:56.000Z
|
python/external/stacktracer.py
|
cschutijser/scion
|
054cef53b31a577ed224a090d6a4fd3883fd520b
|
[
"Apache-2.0"
] | 2
|
2020-03-14T16:03:27.000Z
|
2020-03-18T08:13:19.000Z
|
"""Stack tracer for multi-threaded applications.
Usage:
import stacktracer
stacktracer.start_trace("trace.html",interval=5,auto=True) # Set auto flag to always update file!
....
stacktracer.stop_trace()
"""
# Source: http://code.activestate.com/recipes/577334-how-to-debug-deadlocked-multi-threaded-programs/
from datetime import datetime, timezone
import sys
import threading
import traceback
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from lib.thread import thread_safety_net
# Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/
def stacktraces():
# First map thread IDs to thread names
id_name = {}
for thread in threading.enumerate():
id_name[thread.ident] = thread.name
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# ThreadID: %s, Name: %s" %
(threadId, id_name.get(threadId, "UNKNOWN")))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
return highlight("\n".join(code), PythonLexer(), HtmlFormatter(
full=False,
# style="native",
noclasses=True,
))
# This part was made by nagylzs
import os
import time
import threading
class TraceDumper(threading.Thread):
"""Dump stack traces into a given file periodically."""
def __init__(self,fpath,interval,auto):
"""
@param fpath: File path to output HTML (stack trace file)
@param auto: Set flag (True) to update trace continuously.
Clear flag (False) to update only if file not exists.
(Then delete the file to force update.)
@param interval: In seconds: how often to update the trace file.
"""
assert(interval>0.1)
self.auto = auto
self.interval = interval
self.fpath = os.path.abspath(fpath)
self.stop_requested = threading.Event()
threading.Thread.__init__(self, name="TraceDumper")
def run(self):
thread_safety_net(self._run)
def _run(self):
while not self.stop_requested.isSet():
time.sleep(self.interval)
if self.auto or not os.path.isfile(self.fpath):
self.stacktraces()
def stop(self):
self.stop_requested.set()
self.join()
try:
if os.path.isfile(self.fpath):
os.unlink(self.fpath)
except:
pass
def stacktraces(self):
fout = open(self.fpath,"wb+")
dt = datetime.now(tz=timezone.utc)
try:
fout.write(bytes("Generated at %s" % dt.isoformat(' '), "UTF-8"))
fout.write(bytes(stacktraces(), "UTF-8"))
finally:
fout.close()
_tracer = None
def trace_start(fpath,interval=5,auto=True):
"""Start tracing into the given file."""
global _tracer
if _tracer is None:
_tracer = TraceDumper(fpath,interval,auto)
_tracer.setDaemon(True)
_tracer.start()
else:
raise Exception("Already tracing to %s"%_tracer.fpath)
def trace_stop():
"""Stop tracing."""
global _tracer
if _tracer is None:
raise Exception("Not tracing, cannot stop.")
else:
_trace.stop()
_trace = None
| 29.758621
| 101
| 0.626014
|
3be037a0b945f4a8b26f718aefbe0bb5eb3f27ef
| 1,532
|
py
|
Python
|
aulabase10.py
|
jeffhawk/pythontraining
|
1ba2efbdf0473114036bbd30ec64b54c7d706265
|
[
"MIT"
] | null | null | null |
aulabase10.py
|
jeffhawk/pythontraining
|
1ba2efbdf0473114036bbd30ec64b54c7d706265
|
[
"MIT"
] | null | null | null |
aulabase10.py
|
jeffhawk/pythontraining
|
1ba2efbdf0473114036bbd30ec64b54c7d706265
|
[
"MIT"
] | null | null | null |
#Declaração de variáveis globais
Arq = "biblios.txt"
Py = 'PySimpleGUI'
Oracx = 'cx_Oracle'
biblio = False
biblios = ['PySimpleGUI','cx_Oracle','os','ctypes','sys','string','pip','subprocess','system']
#python -m pip install cx_Oracle
atua = ''
i=0
#Importando as bibliotecas
# #Verifica se existe as bibliotecas, caso contrário pergunta se quer instala-las
try:
import os, ctypes, sys
import PySimpleGUI as psg
import string
import termcolor
import cx_Oracle
import pip
import subprocess
from os import system
biblio = True
except ImportError:
#print (len(biblios))
#system('cls')
print('Erro ao tentar importar bibliotecas necessárias!!')
for i in biblios:
print(i)
print('Erro ao tentar importar bibliotecas')
atua = input('Deseja instalar as bibliotecas necessárias? - S/N: ')
if atua == 's' or atua == 'S':
subprocess.check_call([sys.executable, '-m', 'pip', 'install', Py])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', Oracx])
'''if not Py in sys.modules.keys():
pip.main(['install', Py])
if not Oracx in sys.modules.keys():
pip.main(['install', Oracx])'''
print('Instalação efetuada!')
biblio = True
else:
print('Bibliotecas não instaladas!')
biblio = False
#Subs, Módulos e Classes
def Main():
if biblio == True:
print('Entrou no sistema, OK!')
else:
print('Decidiu não instalar e saiu!')
Main()
| 26.413793
| 94
| 0.625326
|
cfa3c5380c65c3879fb34faf48d1ea7904d85e49
| 2,921
|
py
|
Python
|
opendeep/utils/tests/test_batch.py
|
vitruvianscience/OpenDeep
|
e96efc449101094354b615cf15afe6d03644fc36
|
[
"Apache-2.0"
] | 252
|
2015-03-13T21:55:22.000Z
|
2021-09-06T21:37:38.000Z
|
opendeep/utils/tests/test_batch.py
|
afcarl/OpenDeep
|
e96efc449101094354b615cf15afe6d03644fc36
|
[
"Apache-2.0"
] | 16
|
2015-03-14T06:47:04.000Z
|
2016-09-23T19:13:35.000Z
|
opendeep/utils/tests/test_batch.py
|
afcarl/OpenDeep
|
e96efc449101094354b615cf15afe6d03644fc36
|
[
"Apache-2.0"
] | 68
|
2015-03-14T00:05:53.000Z
|
2020-06-04T13:36:13.000Z
|
from __future__ import division
import unittest
from opendeep.utils.batch import *
from opendeep.utils.misc import numpy_one_hot
import numpy
class TestBatch(unittest.TestCase):
def setUp(self):
# numpy array to test
self.np = numpy.eye(10)
# generator over word vectors to test
words = "Hello\nWorld\nThis\nIs\nA\nTest!".split("\n")
vocab = {char: n for char, n in zip(list(set(words)), range(len(set(words))))}
words = [vocab[x] for x in words]
self.words = numpy_one_hot(words, n_classes=len(vocab))
def testNumpyBatchSize(self):
for size in range(12):
test = True
try:
batch = numpy_minibatch(self.np, batch_size=size)
i = 0
for x in batch:
assert x.shape[0] <= size
i += 1
except AssertionError:
assert size == 0
test = False
if test:
iters = int(numpy.ceil(self.np.shape[0] / size))
assert i == iters, "iterations was %d, expected %d for batch_size %d" % (i, iters, size)
def testNumpyMinBatchSize(self):
batch = numpy_minibatch(self.np, batch_size=3, min_batch_size=2)
i = 0
for x in batch:
assert 2 <= x.shape[0] <= 3
i += 1
assert i == 3
try:
batch = numpy_minibatch(self.np, batch_size=2, min_batch_size=3)
raise AssertionError("Was able to create batch with invalid sizes.")
except Exception as e:
assert isinstance(e, AssertionError)
def testIterBatchSize(self):
for size in range(12):
gen = (row for row in self.words)
test = True
try:
batch = iterable_minibatch(gen, batch_size=size)
i = 0
for x in batch:
assert x.shape[0] <= size
i += 1
except AssertionError:
assert size == 0
test = False
if test:
iters = int(numpy.ceil(6. / size))
assert i == iters, "iterations was %d, expected %d for batch_size %d" % (i, iters, size)
def testIterMinBatchSize(self):
gen = (row for row in self.words)
batch = iterable_minibatch(gen, batch_size=4, min_batch_size=3)
i = 0
for x in batch:
assert 3 <= x.shape[0] <= 4
i += 1
assert i == 1
gen = (row for row in self.words)
try:
batch = iterable_minibatch(gen, batch_size=2, min_batch_size=3)
raise AssertionError("Was able to create batch with invalid sizes.")
except Exception as e:
assert isinstance(e, AssertionError)
def tearDown(self):
del self.np, self.words
if __name__ == '__main__':
unittest.main()
| 33.965116
| 104
| 0.539541
|
bb33fa600fac361d036585f650ffa7a922c3ed76
| 3,941
|
py
|
Python
|
commode_utils/metrics/sequential_f1.py
|
SpirinEgor/commode-utils
|
11b9a26b70544c26de5a4f4aa588723d906db707
|
[
"Apache-2.0"
] | null | null | null |
commode_utils/metrics/sequential_f1.py
|
SpirinEgor/commode-utils
|
11b9a26b70544c26de5a4f4aa588723d906db707
|
[
"Apache-2.0"
] | null | null | null |
commode_utils/metrics/sequential_f1.py
|
SpirinEgor/commode-utils
|
11b9a26b70544c26de5a4f4aa588723d906db707
|
[
"Apache-2.0"
] | 1
|
2021-09-24T22:59:27.000Z
|
2021-09-24T22:59:27.000Z
|
from typing import Optional, List
import torch
from torchmetrics import Metric
from commode_utils.metrics import ClassificationMetrics
class SequentialF1Score(Metric):
def __init__(self, pad_idx: int, eos_idx: int, ignore_idx: Optional[List[int]] = None, **kwargs):
"""Metric for computing f1 score on sequence of tokens.
This metric used in many works about code summarization.
:param pad_idx: index of PAD token, required for masking the end of sequence.
:param eos_idx: index of EOS token, required for masking the end of sequence.
:param ignore_idx: additional list of tokens to ignore.
"""
super().__init__(**kwargs)
self._pad_idx = pad_idx
self._eos_idx = eos_idx
self._ignore_idx = ignore_idx if ignore_idx is not None else []
self._ignore_idx += [self._pad_idx, self._eos_idx]
# Metric states
self.add_state("true_positive", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("false_positive", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("false_negative", default=torch.tensor(0), dist_reduce_fx="sum")
def _get_end_sequence_mask(self, tokens: torch.Tensor) -> torch.Tensor:
"""For each sequence, create a mask with all tokens after first PAD or EOS.
:param tokens: [seq len; batch size] tensor with tokens indexes.
:return: mask with the same shape as tokens.
"""
occurrence_mask = torch.bitwise_or((tokens == self._pad_idx), (tokens == self._eos_idx))
mask_max_value, mask_max_indices = torch.max(occurrence_mask, dim=0)
# if no pad token use len+1 position
mask_max_indices[~mask_max_value] = tokens.shape[0]
mask = torch.arange(tokens.shape[0], device=tokens.device).view(-1, 1) >= mask_max_indices
return mask
def update(self, predicted: torch.Tensor, target: torch.Tensor):
"""Calculated token occurrence statistic in the predicted tensor w.r.t. target tensor.
:param predicted: [pred seq len; batch size] -- tensor with predicted tokens
:param target: [target seq len; batch size] -- tensor with ground truth tokens
:return:
"""
batch_size = target.shape[1]
if predicted.shape[1] != batch_size:
raise ValueError(f"Wrong batch size for prediction (expected: {batch_size}, actual: {predicted.shape[1]})")
end_sequence_mask = self._get_end_sequence_mask(predicted)
predicted[end_sequence_mask] = self._pad_idx
for batch_idx in range(batch_size):
target_seq = [token for token in target[:, batch_idx] if token not in self._ignore_idx]
predicted_seq = [token for token in predicted[:, batch_idx] if token not in self._ignore_idx]
for predicted_token in predicted_seq:
if predicted_token in target_seq:
self.true_positive += 1
else:
self.false_positive += 1
for target_token in target_seq:
if target_token not in predicted_seq:
self.false_negative += 1
def compute(self) -> ClassificationMetrics:
"""Calculate precision, recall, and F1-score based on stored statistic.
:return: calculated metrics aggregated in data class
"""
precision = self.true_positive
if self.true_positive + self.false_positive > 0:
precision = self.true_positive / (self.true_positive + self.false_positive)
recall = self.true_positive
if self.true_positive + self.false_negative > 0:
recall = self.true_positive / (self.true_positive + self.false_negative)
f1_score = 2 * precision * recall
if precision + recall > 0:
f1_score /= precision + recall
return ClassificationMetrics(f1_score=f1_score, precision=precision, recall=recall)
| 46.916667
| 119
| 0.663537
|
80779a40c07ff7128683c0870c70ddac11cac1e4
| 13,149
|
py
|
Python
|
cto/jump.py
|
losenineai/CTO
|
6c328b2be7b10ed2b219c8f3e9f99842c9e3d1d0
|
[
"MIT"
] | 138
|
2021-10-07T21:35:13.000Z
|
2022-03-26T07:31:20.000Z
|
cto/jump.py
|
losenineai/CTO
|
6c328b2be7b10ed2b219c8f3e9f99842c9e3d1d0
|
[
"MIT"
] | 2
|
2021-10-10T07:27:28.000Z
|
2021-10-20T02:34:41.000Z
|
cto/jump.py
|
losenineai/CTO
|
6c328b2be7b10ed2b219c8f3e9f99842c9e3d1d0
|
[
"MIT"
] | 12
|
2021-10-08T19:38:02.000Z
|
2022-03-16T08:16:35.000Z
|
import ida_kernwin
import ida_lines
import ida_funcs
import ida_moves
import ida_name
import ida_ua
import ida_idaapi
import ida_xref
import idc
#import cto_utils
#ida_idaapi.require("cto_utils")
def jump_to_func_ptr_line_pos(text, func_name, w, tweak=0, add_x=False):
flag = False
idx = text.find(func_name)
#w = find_widget(wname)
if w is None:
return flag
# we are already at a certain point. but ida sometimes does not point to a function in the middle of the instruction.
# that's why I use position 0 of x first, and then move to the right position again.
if idx == 0:
pos, x, y = ida_kernwin.get_custom_viewer_place(w, 0)
orig_x = x
if not add_x:
orig_x = 0
ida_kernwin.jumpto(w, pos, 0, y)
pos, x, y = ida_kernwin.get_custom_viewer_place(w, 0)
ida_kernwin.jumpto(w, pos, orig_x+tweak, y)
flag = True
elif idx > 0:
pos, x, y = ida_kernwin.get_custom_viewer_place(w, 0)
if not add_x:
x = 0
ida_kernwin.jumpto(w, pos, x+idx+tweak, y)
flag = True
return flag
"""
def get_apiname_line(ea, wname=g_wname):
fn = idc.get_name(ea)
if not fn:
return None, None
fn, line, idx = get_funcname_line(ea)
if not fn:
return None, None
w = find_widget(wname)
if w is None:
return None, None
pos, x, y = ida_kernwin.get_custom_viewer_place(w, 0)
ida_kernwin.jumpto(w, pos, 0, y)
l = ida_kernwin.get_curline()
if l and l.find(fn) >= 0:
l_removed = ida_lines.tag_remove(l)
return fn, l_removed
return None, None
"""
def jump_to_line(ea, i, w):
#w = find_widget(wname)
if w is None:
return False
pos, x, y = ida_kernwin.get_custom_viewer_place(w, 0)
pos.lnnum = i
ida_kernwin.jumpto(w, pos, x, y)
return True
"""
def check_line(ea, i, fn, wname=g_wname):
w = find_widget(wname)
if w is None:
return None, -1
pos, x, y = ida_kernwin.get_custom_viewer_place(w, 0)
pos.lnnum = i
ida_kernwin.jumpto(w, pos, x, y)
l = ida_kernwin.get_curline()
l_removed = ida_lines.tag_remove(l)
l_removed_content = l_removed.split(";")[0]
idx = l_removed_content.find(fn)
if idx >= 0:
return l_removed_content, idx
return None, -1
"""
def get_line_no_decomp(text, vu, max_trial=200):
line = ""
lineno = -1
x = -1
lnnum = 0
y = 0
for i, l in enumerate(vu.cfunc.get_pseudocode()):
if ida_lines.tag_remove(l.line).startswith("//"):
y += 1
continue
#print(i, ida_lines.tag_remove(l.line))
ln = ida_lines.tag_remove(l.line)
ln = ln.split("//", 1)[0]
x = ln.find(text)
if x >= 0:
line = ln
lineno = i
break
if max_trial > 0 and i > max_trial:
break
lnnum += 1
y = 0
return line, lineno, lnnum, x, y
def get_funcname_line_decomp(ea, vu, max_trial=200):
func_decl_str = ida_lines.tag_remove(vu.cfunc.print_dcl())
f = ida_funcs.get_func(ea)
func_name = ida_funcs.get_func_name(f.start_ea)
func_name = ida_name.validate_name(func_name, ida_name.VNT_VISIBLE)
# for API thunk call (e.g. __WSADisSet -> _WSADisSet)
if func_decl_str.find(func_name) < 0 and func_name.startswith("_"):
func_name = func_name[1:]
line, lineno, lnnum, x, y = get_line_no_decomp(func_name, vu)
return line, lineno, lnnum, x, y
def get_line_no(ea, text, max_trial=200, chk_cmt=False):
_, disass = ida_lines.generate_disassembly(
ea,
max_trial, # maximum number of lines
False, # as_stack=False
True) # notags
for i, l in enumerate(disass):
if chk_cmt:
l_content = ''
if l.find(";") >= 0:
l_content = l.split(";")[1]
else:
l_content = l.split(";")[0]
#print(l_content)
idx = l_content.find(text)
if idx >= 0:
return l, i, idx
return "", -1, -1
def get_funcname_line(ea, w, max_trial=200):
f = ida_funcs.get_func(ea)
if f:
func_ea = f.start_ea
fn = idc.get_func_name(func_ea)
else:
fn = ida_name.get_name(ea)
if fn:
fn = ida_name.validate_name(fn, ida_name.VNT_VISIBLE)
#print(fn)
line, i, idx = get_line_no(ea, fn, max_trial)
if i < 0:
return None, None, -1
jump_to_line(ea, i, w)
if jump_to_func_ptr_line_pos(line, fn, w):
return fn, line, idx
return None, None, -1
def push_lochist_jump(w):
r = False
if w is None:
return False
loc = ida_moves.lochist_entry_t()
r = ida_kernwin.get_custom_viewer_location(loc, w)
if r:
loc.renderer_info().pos.cx = 0
r = ida_kernwin.custom_viewer_jump(w, loc, ida_kernwin.CVNF_JUMP)
return False
def jumpto_name_decomp(ea, w):
try:
import ida_hexrays
ida_hexrays.init_hexrays_plugin()
except ImportError:
return
vu = ida_hexrays.get_widget_vdui(w)
#print(vu.cfunc)
vu.get_current_item(ida_hexrays.USE_KEYBOARD)
ea = vu.cfunc.entry_ea
line, lineno, lnnum, x, y = get_funcname_line_decomp(ea, vu)
if line:
#print(line, lineno, lnnum, x, y)
pos, _x, _y = ida_kernwin.get_custom_viewer_place(w, 0)
#print(pos.lnnum, _x, _y)
idaplace = ida_kernwin.place_t_as_idaplace_t(pos)
idaplace.lnnum = lnnum
idaplace.ea = ea
ida_kernwin.jumpto(w, idaplace, x, y)
vu.refresh_cpos(ida_hexrays.USE_KEYBOARD)
##vu.refresh_ctext(ida_hexrays.USE_KEYBOARD)
#pos, _x, _y = ida_kernwin.get_custom_viewer_place(w, 0)
#print(pos.lnnum, _x, _y)
def jumpto_name(ea, w):
wt = ida_kernwin.get_widget_type(w)
if wt == ida_kernwin.BWN_DISASM:
jumpto_name_ida(ea, w)
elif wt == ida_kernwin.BWN_PSEUDOCODE:
jumpto_name_decomp(ea, w)
def jumpto_name_ida(ea, w):
flag = False
ida_kernwin.jumpto(ea)
func = ida_funcs.get_func(ea)
# for callee
if func:
#print("%x" % func.start_ea)
fn, line, idx = get_funcname_line(func.start_ea, w)
if idx >= 0:
if w is None:
return False
pos, x, y = ida_kernwin.get_custom_viewer_place(w, 0)
ida_kernwin.jumpto(w, pos, idx, 0)
flag = True
# for APIs and strings
else:
fn, line, idx = get_funcname_line(ea, w)
if fn:
flag = jump_to_func_ptr_line_pos(line, fn, w)
return flag
def jumpto_offset(ea, w):
flag = False
wt = ida_kernwin.get_widget_type(w)
if wt == ida_kernwin.BWN_DISASM:
flag = jumpto_offset_ida(ea, w)
elif wt == ida_kernwin.BWN_PSEUDOCODE:
pass
return flag
def jumpto_offset_ida(ea, w):
curr_line = ida_lines.tag_remove(ida_kernwin.get_curline())
search_key = " offset "
x = curr_line.find(search_key)
if x > 0:
x += len(search_key)
func_name = ida_name.extract_name(curr_line.split(search_key)[1], 0)
# sometimes mismatch actual function name and and validate name.
# to fix it, get name and compare its name with it.
off_v = ida_xref.get_first_dref_from(ea)
fn = ida_name.get_name(off_v)
vfn = ida_name.validate_name(fn, ida_name.VNT_VISIBLE)
if vfn == func_name and fn != vfn:
func_name = fn
if func_name:
func_ea = ida_name.get_name_ea(ida_idaapi.BADADDR, func_name)
if func_ea != ida_idaapi.BADADDR:
pos, _x, y = ida_kernwin.get_custom_viewer_place(w, 0)
ida_kernwin.jumpto(w, pos, x, y)
return True
return False
def jumpto_opn(ea, opn, w):
flag = False
if opn >= 0:
tweak = 0
#ida_kernwin.jumpto(ea, opn, ida_kernwin.UIJMP_DONTPUSH)
ida_kernwin.jumpto(ea, opn)
wt = ida_kernwin.get_widget_type(w)
if wt == ida_kernwin.BWN_DISASM:
jumpto_opn_ida(ea, opn, w)
elif wt == ida_kernwin.BWN_PSEUDOCODE:
jumpto_opn_decomp(ea, opn, w)
return flag
def get_highlight_decomp(w):
vu = None
highlight = None
try:
import ida_hexrays
ida_hexrays.init_hexrays_plugin()
except ImportError:
return None, None
vu = ida_hexrays.get_widget_vdui(w)
#print(vu.cfunc)
vu.get_current_item(ida_hexrays.USE_KEYBOARD)
if vu.item.is_citem():
highlight = vu.item.e
return highlight, vu
def get_current_line_decomp(highlight, vu):
x, y = vu.cfunc.find_item_coords(highlight)
return ida_lines.tag_remove(list(vu.cfunc.get_pseudocode())[y].line)
def get_highlight_name_decomp(w):
highlight, vu = get_highlight_decomp(w)
if highlight and highlight.is_expr():
hl_str = highlight.print1(None)
return hl_str
return None
def jumpto_opn_decomp(ea, opn, w):
flag = False
try:
import ida_hexrays
ida_hexrays.init_hexrays_plugin()
except ImportError:
return False
highlight, vu = get_highlight_decomp(w)
if highlight and highlight.is_expr():
hl_str = highlight.print1(None)
#print(ida_lines.tag_remove(hl_str))
#print(type(highlight))
#print(highlight.op)
if highlight.op in [ida_hexrays.cot_call, ida_hexrays.cot_ptr, ida_hexrays.cot_ref, ida_hexrays.cot_cast, ida_hexrays.cot_idx]:
#print(type(highlight.x))
#print(highlight.x.op)
if highlight.x.op in [ida_hexrays.cot_cast, ida_hexrays.cot_idx]:
#print(highlight.x.x.op)
if highlight.x.x.op in [ida_hexrays.cot_idx]:
x, y = vu.cfunc.find_item_coords(highlight.x.x.x)
else:
x, y = vu.cfunc.find_item_coords(highlight.x.x)
else:
x, y = vu.cfunc.find_item_coords(highlight.x)
pos, _x, _y = ida_kernwin.get_custom_viewer_place(w, 0)
#print(pos.lnnum, _x, _y)
idaplace = ida_kernwin.place_t_as_idaplace_t(pos)
ida_kernwin.jumpto(w, idaplace, x, y)
flag = True
return flag
def jumpto_opn_ida(ea, opn, w):
flag = False
if True:
tweak = 0
op = idc.print_operand(ea, opn)
optype = idc.get_operand_type(ea, opn)
v = idc.get_operand_value(ea, opn)
f = ida_funcs.get_func(ea)
func_flags = idc.get_func_attr(ea, idc.FUNCATTR_FLAGS)
func_name = ida_funcs.get_func_name(ea)
if func_flags & ida_funcs.FUNC_THUNK:
func_name = ida_name.get_name(v)
elif not f:
func_name = ida_name.get_name(ea)
fn = ""
if func_name:
fn = ida_lines.tag_remove(func_name)
if optype in [ida_ua.o_displ, ida_ua.o_phrase, ida_ua.o_mem]:
# IDA's jumpto API does not point to the first character
# if an operand starts with "[" like "lea rax, [rdi+0B0h]".
# This is a tweak value for it.
if op and op[0] == '[':
tweak = -1
# for an applied structure member
if op.find(".") >= 0:
fn = op.rsplit(".", 1)[1]
# for a stack variable name or a non-applied structure member
elif op.find("+") >= 0:
fn = op.rsplit("+", 1)[1]
if fn.find("]") >= 0:
fn = fn.split("]")[0]
if fn.find(")") >= 0:
fn = fn.split(")")[0]
elif op.find("[") >= 0:
fn = op.rsplit("[", 1)[1]
if fn.find("]") >= 0:
fn = fn.split("]")[0]
elif op.find(":") >= 0:
fn = op.rsplit(":", 1)[1]
else:
fn = op
# mov [ebp+lpMultiByteStr], (offset MultiByteStr+40h)
elif optype in [ida_ua.o_imm]:
if op.find(".") >= 0:
fn = op.rsplit(".", 1)[1]
elif op.find("offset ") >= 0:
fn = op.rsplit("offset ", 1)[1]
if fn.find("+") >= 0:
fn = fn.split("+")[0]
else:
fn = op
# for offset
else:
fn = ida_name.get_name(v)
if fn:
fn = ida_lines.tag_remove(fn)
if fn:
fn = ida_name.validate_name(fn, ida_name.VNT_VISIBLE)
flag = jump_to_func_ptr_line_pos(op, fn, w, tweak, add_x=True)
return flag
| 32.708955
| 136
| 0.560423
|
26b85edb07f2e25639420a10fb1cd8534119810a
| 3,916
|
py
|
Python
|
xDeepFM/modules.py
|
jingxiufenghua/rec-model
|
23204f70fc1bf384d3cdd0cc85e43117d3394074
|
[
"MIT"
] | 1,323
|
2020-08-24T02:34:25.000Z
|
2022-03-31T06:03:28.000Z
|
xDeepFM/modules.py
|
yiLinMaster/Recommender-System-with-TF2.0
|
cfc7b3fbd4ba2d9157a78938e6bdaeba7df82822
|
[
"MIT"
] | 65
|
2020-08-25T06:07:41.000Z
|
2022-03-18T20:10:53.000Z
|
xDeepFM/modules.py
|
yiLinMaster/Recommender-System-with-TF2.0
|
cfc7b3fbd4ba2d9157a78938e6bdaeba7df82822
|
[
"MIT"
] | 395
|
2020-08-24T00:57:08.000Z
|
2022-03-31T12:41:13.000Z
|
"""
Created on May 19, 2021
modules of xDeepFM: Linear, DNN, CIN
@author: Ziyao Geng(zggzy1996@163.com)
"""
import tensorflow as tf
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Dropout, Dense, Layer
class DNN(Layer):
def __init__(self, hidden_units, dnn_dropout=0., dnn_activation='relu'):
"""DNN
:param hidden_units: A list. list of hidden layer units's numbers.
:param dnn_dropout: A scalar. dropout number.
:param dnn_activation: A string. activation function.
"""
super(DNN, self).__init__()
self.dnn_network = [Dense(units=unit, activation=dnn_activation) for unit in hidden_units]
self.dropout = Dropout(dnn_dropout)
def call(self, inputs, **kwargs):
x = inputs
for dnn in self.dnn_network:
x = dnn(x)
x = self.dropout(x)
return x
class Linear(Layer):
def __init__(self, feature_length, w_reg=1e-6):
"""
Linear Part
:param feature_length: A scalar. The length of features.
:param w_reg: A scalar. The regularization coefficient of parameter w.
"""
super(Linear, self).__init__()
self.feature_length = feature_length
self.w_reg = w_reg
def build(self, input_shape):
self.w = self.add_weight(name="w",
shape=(self.feature_length, 1),
regularizer=l2(self.w_reg),
trainable=True)
def call(self, inputs, **kwargs):
result = tf.reduce_sum(tf.nn.embedding_lookup(self.w, inputs), axis=1) # (batch_size, 1)
return result
class CIN(Layer):
def __init__(self, cin_size, l2_reg=1e-4):
"""CIN
:param cin_size: A list. [H_1, H_2 ,..., H_k], a list of the number of layers
:param l2_reg: A scalar. L2 regularization.
"""
super(CIN, self).__init__()
self.cin_size = cin_size
self.l2_reg = l2_reg
def build(self, input_shape):
# get the number of embedding fields
self.embedding_nums = input_shape[1]
# a list of the number of CIN
self.field_nums = [self.embedding_nums] + self.cin_size
# filters
self.cin_W = {
'CIN_W_' + str(i): self.add_weight(
name='CIN_W_' + str(i),
shape=(1, self.field_nums[0] * self.field_nums[i], self.field_nums[i + 1]),
initializer='random_normal',
regularizer=l2(self.l2_reg),
trainable=True)
for i in range(len(self.field_nums) - 1)
}
def call(self, inputs, **kwargs):
dim = inputs.shape[-1]
hidden_layers_results = [inputs]
# split dimension 2 for convenient calculation
split_X_0 = tf.split(hidden_layers_results[0], dim, 2) # dim * (None, field_nums[0], 1)
for idx, size in enumerate(self.cin_size):
split_X_K = tf.split(hidden_layers_results[-1], dim, 2) # dim * (None, filed_nums[i], 1)
result_1 = tf.matmul(split_X_0, split_X_K, transpose_b=True) # (dim, None, field_nums[0], field_nums[i])
result_2 = tf.reshape(result_1, shape=[dim, -1, self.embedding_nums * self.field_nums[idx]])
result_3 = tf.transpose(result_2, perm=[1, 0, 2]) # (None, dim, field_nums[0] * field_nums[i])
result_4 = tf.nn.conv1d(input=result_3, filters=self.cin_W['CIN_W_' + str(idx)], stride=1,
padding='VALID')
result_5 = tf.transpose(result_4, perm=[0, 2, 1]) # (None, field_num[i+1], dim)
hidden_layers_results.append(result_5)
final_results = hidden_layers_results[1:]
result = tf.concat(final_results, axis=1) # (None, H_1 + ... + H_K, dim)
result = tf.reduce_sum(result, axis=-1) # (None, dim)
return result
| 36.943396
| 117
| 0.592952
|
44ff5bc6eec5c08e2b851a22c0cbece0ad51f035
| 3,359
|
py
|
Python
|
examples/ldc2.py
|
BIMAU/fvm
|
fef6e8c577848b105e04273e1357a5e279d26aba
|
[
"Apache-2.0"
] | 1
|
2021-11-19T17:16:38.000Z
|
2021-11-19T17:16:38.000Z
|
examples/ldc2.py
|
BIMAU/fvm
|
fef6e8c577848b105e04273e1357a5e279d26aba
|
[
"Apache-2.0"
] | 4
|
2021-05-20T16:33:19.000Z
|
2021-06-03T13:52:47.000Z
|
examples/ldc2.py
|
BIMAU/fvm
|
fef6e8c577848b105e04273e1357a5e279d26aba
|
[
"Apache-2.0"
] | 5
|
2021-05-02T13:24:11.000Z
|
2021-09-29T12:10:27.000Z
|
import numpy
import matplotlib.pyplot as plt
from fvm import Continuation
from fvm import Interface
from fvm import plot_utils
from fvm import utils
class Data:
def __init__(self):
self.mu = []
self.value = []
def append(self, mu, value):
self.mu.append(mu)
self.value.append(value)
def main():
''' An example of performing a continuation for a 2D lid-driven cavity and detecting a bifurcation point'''
dim = 2
dof = 3
nx = 32
ny = nx
nz = 1
n = dof * nx * ny * nz
# Define the problem
parameters = {'Problem Type': 'Lid-driven Cavity',
# Problem parameters
'Reynolds Number': 1,
'Lid Velocity': 0,
# Use a stretched grid
'Grid Stretching Factor': 1.5,
# Set a maximum step size ds
'Maximum Step Size': 500,
# Give back extra output (this is also more expensive)
'Verbose': False}
interface = Interface(parameters, nx, ny, nz, dim, dof)
# Define a point of interest
poi = (nx // 2 - 1, ny // 4 - 1)
print('Looking at point ({}, {})'.format(interface.discretization.x[poi[0]],
interface.discretization.y[poi[1]]))
continuation = Continuation(interface, parameters)
# Compute an initial guess
x0 = numpy.zeros(n)
x0 = continuation.continuation(x0, 'Lid Velocity', 0, 1, 1)[0]
# Store data for computing the bifurcation diagram using postprocessing
data = Data()
parameters['Postprocess'] = lambda x, mu: data.append(mu, utils.get_u_value(x, poi[0], poi[1], 0, interface))
# Perform an initial continuation to Reynolds number 7000 without detecting bifurcation points
ds = 100
target = 7000
x, mu = continuation.continuation(x0, 'Reynolds Number', 0, target, ds)
parameters['Newton Tolerance'] = 1e-12
parameters['Destination Tolerance'] = 1e-4
parameters['Detect Bifurcation Points'] = True
parameters['Maximum Step Size'] = 100
parameters['Eigenvalue Solver'] = {}
parameters['Eigenvalue Solver']['Target'] = 3j
parameters['Eigenvalue Solver']['Tolerance'] = 1e-9
parameters['Eigenvalue Solver']['Number of Eigenvalues'] = 5
# Now detect the bifurcation point
target = 10000
x2, mu2 = continuation.continuation(x, 'Reynolds Number', mu, target, ds)
# Compute the unstable branch after the bifurcation
parameters['Detect Bifurcation Points'] = False
parameters['Maximum Step Size'] = 2000
target = 10000
parameters['Newton Tolerance'] = 1e-4
x3, mu3 = continuation.continuation(x2, 'Reynolds Number', mu2, target, ds)
# Plot a bifurcation diagram. Note that this is a bit of a mess, because we
# have to go back an forth when converging onto a target
plt.plot(data.mu, data.value)
plt.show()
# Add a perturbation based on the eigenvector
interface.set_parameter('Reynolds Number', mu2)
_, v = interface.eigs(x2, True)
v = v[:, 0].real
# Plot the velocity magnitude
plot_utils.plot_velocity_magnitude(v, interface)
# Plot the pressure
v = plot_utils.create_state_mtx(v, nx, ny, nz, dof)
plot_utils.plot_value(v[:, :, 0, 2], interface)
if __name__ == '__main__':
main()
| 31.392523
| 113
| 0.628461
|
d4825674739daee4ae751896282e2301a98e6113
| 2,020
|
py
|
Python
|
setup.py
|
yoloseem/sqlalchemy-imageattach
|
385ea31463530fbbb5dafc51313a4982c84dd581
|
[
"MIT"
] | null | null | null |
setup.py
|
yoloseem/sqlalchemy-imageattach
|
385ea31463530fbbb5dafc51313a4982c84dd581
|
[
"MIT"
] | null | null | null |
setup.py
|
yoloseem/sqlalchemy-imageattach
|
385ea31463530fbbb5dafc51313a4982c84dd581
|
[
"MIT"
] | null | null | null |
import os.path
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from setuptools.command.test import test
from sqlalchemy_imageattach.version import VERSION
def readme():
try:
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
return f.read()
except (IOError, OSError):
return ''
class pytest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
from pytest import main
errno = main(self.test_args)
raise SystemExit(errno)
install_requires = [
'SQLAlchemy >= 0.8.0',
'Wand >= 0.3.0'
]
setup(
name='SQLAlchemy-ImageAttach',
version=VERSION,
description='SQLAlchemy extension for attaching images to entity objects',
long_description=readme(),
url='https://github.com/crosspop/sqlalchemy-imageattach',
author='Hong Minhee',
author_email='minhee' '@' 'dahlia.kr',
license='MIT License',
packages=find_packages(exclude=['tests']),
install_requires=install_requires,
tests_require=['pytest >= 2.3.0', 'WebOb'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Database :: Front-Ends',
'Topic :: Multimedia :: Graphics'
],
cmdclass={'test': pytest}
)
| 28.857143
| 78
| 0.639604
|
6f629ede0ad3a6ad9bf614ae4b3bb078e06d8285
| 16,017
|
py
|
Python
|
serialmonitor.py
|
francescozoccheddu/SerialMonitor
|
66dd983d35be28774c8f4721c46fdca7ed737edb
|
[
"MIT"
] | null | null | null |
serialmonitor.py
|
francescozoccheddu/SerialMonitor
|
66dd983d35be28774c8f4721c46fdca7ed737edb
|
[
"MIT"
] | null | null | null |
serialmonitor.py
|
francescozoccheddu/SerialMonitor
|
66dd983d35be28774c8f4721c46fdca7ed737edb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
''' Copyright (c) 2017 Francesco Zoccheddu
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. '''
#Welcome
print("Serial monitor")
print("Copyright (c) 2017 Francesco Zoccheddu")
def qt(msg):
return "'" + str(msg) + "'"
#Imports
try:
import sys
import argparse
import serial
import signal
import collections
import serial.tools.list_ports
except ImportError as err:
print()
print("Module import error")
print("You may need to install " + qt(err.name) + " module")
raise SystemExit()
class SerialStream:
def __init__(self):
self.ptr = 0
def read(self, sw):
byte = sw.peek(self.ptr)
self.ptr += 1
return byte
def trim(self, index):
self.ptr -= index
def getIndex(self):
return self.ptr
class SerialWrapper:
def __init__(self, port, baudrate, bytesize, parity, stopbits, timeout, swflowctl, rtscts, dsrdtr):
self.ser = serial.Serial(port, baudrate, bytesize, parity, stopbits, timeout, swflowctl, rtscts, dsrdtr)
self.buf = []
def push(self):
byte = self.ser.read()
self.buf += [byte]
def peek(self, index):
while index >= len(self.buf):
self.push()
return self.buf[index]
def pop(self, count):
self.buf = self.buf[count:]
def close(self):
if self.ser is not None:
self.ser.close()
self.ser = None
return True
return False
class Session:
@staticmethod
def genEscapeHandlers():
class EscapeHandler:
def __init__(self, char, description):
self.char = char
self.description = description
def process(self, stream, session):
raise NotImplementedError("Abstract EscapeHandler")
def getDescription(self):
return self.description
def getChar(self):
return self.char
handlers = []
#Binary byte
class BinByteEscapeHandler(EscapeHandler):
def process(self, stream, session):
return str(session.intToBin(session.byteToInt(stream.read(session.sw))))
handlers += [BinByteEscapeHandler("b", "print next byte as binary string")]
#Hex byte
class HexByteEscapeHandler(EscapeHandler):
def process(self, stream, session):
return str(session.intToHex(session.byteToInt(stream.read(session.sw))))
handlers += [HexByteEscapeHandler("h", "print next byte as hexadecimal string")]
#Decimal byte
class DecimalByteEscapeHandler(EscapeHandler):
def process(self, stream, session):
return str(session.byteToInt(stream.read(session.sw)))
handlers += [DecimalByteEscapeHandler("d", "print next byte as decimal integer")]
#Decimal word
class DecimalWordEscapeHandler(EscapeHandler):
def process(self, stream, session):
return str((session.byteToInt(stream.read(session.sw)) << 8) | session.byteToInt(stream.read(session.sw)))
handlers += [DecimalWordEscapeHandler("w", "print next word as decimal integer")]
#Ascii byte
class AsciiByteEscapeHandler(EscapeHandler):
def process(self, stream, session):
return str(chr(session.byteToInt(stream.read(session.sw))))
handlers += [AsciiByteEscapeHandler("a", "print next byte as ascii char")]
#Discard byte
class DiscardByteEscapeHandler(EscapeHandler):
def process(self, stream, session):
stream.read(session.sw)
return ""
handlers += [DiscardByteEscapeHandler("x", "discard next byte")]
#Recursive escape
class RecursiveByteEscapeHandler(EscapeHandler):
def process(self, stream, session):
resc = str(chr(session.byteToInt(stream.read(session.sw))))
if resc == self.getChar():
return "<RECESC>"
return session.processEscape(resc, stream)
handlers += [RecursiveByteEscapeHandler("e", "use next byte as ascii escape char")]
#New line
class NewlineEscapeHandler(EscapeHandler):
def process(self, stream, session):
return "\n"
handlers += [NewlineEscapeHandler("n", "print new line")]
#Tab
class TabEscapeHandler(EscapeHandler):
def process(self, stream, session):
return "\t"
handlers += [TabEscapeHandler("t", "print tab")]
return handlers
escapeHandlers = genEscapeHandlers.__func__()
@staticmethod
def printEscapeHandlers():
print("Available format chars:")
for h in Session.escapeHandlers:
print(" " + h.getChar() + " " + h.getDescription())
def __init__(self, sw, escape, byteorder, formats, buffer):
self.sw = sw
self.escape = escape
self.byteorder = byteorder
self.formats = formats if formats is not None else [escape + "a"]
self.buffer = buffer
self.streams = []
for f in self.formats:
stream = SerialStream()
self.streams += [stream]
def byteToInt(self, byte):
return int.from_bytes(byte, byteorder=self.byteorder)
def intToBin(self, integer):
return bin(integer).lstrip("0b").zfill(8)
def intToHex(self, integer):
return hex(integer).lstrip("0x").zfill(2)
def processEscape(self, escape, stream):
for h in Session.escapeHandlers:
if escape == h.getChar():
return h.process(stream, self)
return "<BADESC>"
def read(self):
buf = ""
for f, s in zip(self.formats, self.streams):
toks = f.split(self.escape)
buf += toks[0]
toks = toks[1:]
for t in toks:
if len(t) > 0:
buf += self.processEscape(t[0], s)
buf += t[1:]
else:
buf += self.processEscape(self.escape, s)
if self.buffer:
minInd = None
for s in self.streams:
if minInd is None or minInd > s.getIndex():
minInd = s.getIndex()
for s in self.streams:
s.trim(minInd)
self.sw.pop(minInd)
else:
maxInd = None
for s in self.streams:
if maxInd is None or maxInd < s.getIndex():
maxInd = s.getIndex()
s.trim(s.getIndex())
self.sw.pop(maxInd)
return buf
def parseArgs():
def checkPositive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
def checkChar(value):
svalue = str(value)
if len(svalue) != 1:
raise argparse.ArgumentTypeError("%s is an invalid char value" % value)
return svalue
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#Output group
oGroup = parser.add_argument_group("output settings")
#File
oGroup.add_argument("-of", "--ofile", type=argparse.FileType('w'), action="append", help="output to file")
#Char limit
default = 65535
oGroup.add_argument("-om", "--omax", type=checkPositive, default=default, help="output to file formatted line limit")
#Cursor
oGroup.add_argument("-ll", "--llimit", type=checkPositive, help="print a fixed number of lines (requires ANSI escape support)")
#Format group
fGroup = parser.add_argument_group("format settings")
#Format string
fGroup.add_argument("-f", "--format", type=str, action='append', help="custom format strings")
#Escape char
default = "%"
fGroup.add_argument("-e", "--escape", type=checkChar, default=default, help="format escape char")
#Escape char
default = "big"
choices = "big", "little"
fGroup.add_argument("-bo", "--byteorder", type=str, default=default, choices=choices, help="format byte order")
#Bufferize
fGroup.add_argument("-fb", "--fbuffer", action="store_true", help="allow asynchronous format strings with buffer")
#Help
fGroup.add_argument("-fl", "--flist", action="store_true", help="list format chars")
#Connection group
cGroup = parser.add_argument_group("connection settings")
#List
clGroup = cGroup.add_mutually_exclusive_group()
clGroup.add_argument("-l", "--list", action="store_true", help="list available ports")
clGroup.add_argument("-le", "--listex", action="store_true", help="list available ports and print descriptions")
#Port
cGroup.add_argument("-p", "--port", type=str, help="port to connect to")
#Baud rate
default = 9600
cGroup.add_argument("-b", "--baudrate", type=checkPositive, default=default, help="set baud rate")
#Byte size
default = 8
choices = [5, 6, 7, 8]
cGroup.add_argument("-bs", "--bytesize", type=int, choices=choices, default=default, help="set byte size")
#Parity bits
default = "NONE"
choices = ["NONE", "EVEN", "ODD", "SPACE", "MARK"]
cGroup.add_argument("-pb", "--parity", choices=choices, default=default, help="set parity bits")
#Stop bits
default = "ONE"
choices = ["ONE", "ONE_POINT_FIVE", "TWO"]
cGroup.add_argument("-sb", "--stopbits", choices=choices, default=default, help="set stop bits")
#Timeout
default = 1
cGroup.add_argument("-t", "--timeout", type=checkPositive, default=default, help="set timeout")
#Software flow control
cGroup.add_argument("-sfc", "--swflowctl", action="store_true", help="enable software flow control")
#RTS/CTS
cGroup.add_argument("-rc", "--rtscts", action="store_true", help="enable RTS/CTS")
#DSR/DTR
cGroup.add_argument("-dd", "--dsrdtr", action="store_true", help="enable DSR/DTR")
return parser.parse_args()
def main():
args = parseArgs()
if args.flist:
print()
Session.printEscapeHandlers()
if args.list or args.listex:
print()
ports = serial.tools.list_ports.comports()
if len(ports) > 0:
print("Avaliable ports:")
for p in ports:
if args.listex:
print(p.device + "\t" + p.description)
else:
print(p.device)
else:
print("No port available")
if (args.fbuffer):
print()
print("Warning: Format buffer enabled")
print("This may cause high memory consumption")
if args.port is not None:
print()
ports = serial.tools.list_ports.comports()
available = False
for p in ports:
if args.port == p.device:
available = True
break
if available:
print("Port " + qt(args.port) + " available")
sw = None
try:
sw = SerialWrapper(args.port, args.baudrate, args.bytesize, getattr(serial, "PARITY_" + args.parity), getattr(serial, "STOPBITS_" + args.stopbits), args.timeout, args.swflowctl, args.rtscts, args.dsrdtr)
print("Connection to port " + qt(args.port) + " opened")
except (ValueError, serial.SerialException) as err:
print("Error happened while connecting to port " + qt(args.port) + ":")
print(err)
if sw is not None:
session = Session(sw, args.escape, args.byteorder, args.format, args.fbuffer)
if args.ofile is not None:
history = collections.deque([], maxlen=args.omax)
if args.llimit is not None:
sys.stdout.write('\033[s')
sys.stdout.flush()
lastl = []
try:
running = True
def quitSignal(signal, frame):
nonlocal running
if running:
running = False
else:
print()
raise SystemExit("Aborted by keyboard")
signal.signal(signal.SIGINT, quitSignal)
signal.signal(signal.SIGTERM, quitSignal)
while running:
line = session.read()
if args.ofile is not None:
history.extend(line)
if args.llimit is not None:
lastl += [line]
if len(lastl) >= args.llimit:
sys.stdout.write('\033[u')
sys.stdout.flush()
for l in lastl:
sys.stdout.write(l)
sys.stdout.flush()
lastl = []
else:
sys.stdout.write(line)
sys.stdout.flush()
except serial.SerialException as err:
print()
print("Error happened while reading from port " + qt(args.port) + ":")
print(err)
if sw.close():
print()
print("Connection to port " + qt(args.port) + " closed")
if args.ofile is not None:
print()
print("Writing " + str(len(history)) + " formatted lines to output file" + ("s" if len(args.ofile) > 1 else ""))
for f in args.ofile:
try:
for l in history:
f.write(l)
f.close()
print("File " + qt(f.name) + " succesfully closed")
except IOError as err:
print("Error while writing file " + qt(f.name))
print(err)
else:
print("Port " + qt(args.port) + " not available")
if __name__ == "__main__":
main()
| 37.335664
| 220
| 0.54336
|
3cba129486a570219fe36a5e8bcd62f6bd5de872
| 1,606
|
py
|
Python
|
2_faceCrop.py
|
k-m-irfan/MER_dataset_cleaning
|
02e6168f0f08f81ead93b0628f30ad0449862cca
|
[
"MIT"
] | 1
|
2022-03-26T18:18:56.000Z
|
2022-03-26T18:18:56.000Z
|
2_faceCrop.py
|
k-m-irfan/MER_dataset_cleaning
|
02e6168f0f08f81ead93b0628f30ad0449862cca
|
[
"MIT"
] | null | null | null |
2_faceCrop.py
|
k-m-irfan/MER_dataset_cleaning
|
02e6168f0f08f81ead93b0628f30ad0449862cca
|
[
"MIT"
] | null | null | null |
# face detection
# to remove multiple face images, non face images and corrupt/non convertible images
import os
import cv2
import mediapipe as mp
sourceDir = 'D:\MER\Dataset\Expressions\duplicate_deleted\Surprise' # Source folder
targetDir = 'D:\MER\Dataset\Expressions\Face_crop\Surprise' # Target Folder
imageCount = 0 # for file naming
errorCount = 0
findFace = mp.solutions.face_detection.FaceDetection()
def faceBox(frame):#face bounding box
try:
frameRGB = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
except:
return []
results = findFace.process(frameRGB)
myFaces = []
if results.detections != None:
for face in results.detections:
bBox = face.location_data.relative_bounding_box
myFaces.append(bBox)
return myFaces
for sRoot,sDirs,sFiles in os.walk(sourceDir):
break
for sourceName in sFiles:
sourceFile = sourceDir+'\\'+sourceName
image = cv2.imread(sourceFile)
faceBoxes = faceBox(image)
if len(faceBoxes) == 1:
imageCount+=1
fileName = 'surprise'+str(imageCount)+'.jpg' #change file name here
height,width,channel = image.shape
x,y,w,h = int(faceBoxes[0].xmin*width),int(faceBoxes[0].ymin*height),int(faceBoxes[0].width*width),int(faceBoxes[0].height*height)
# cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),1)
croppedImage = image[y:y+h,x:x+w]
try:
cv2.imwrite(targetDir+'\\'+fileName,croppedImage)
except:
errorCount+=1
print(errorCount)
continue
| 34.913043
| 139
| 0.648194
|
4fd53fede7bb7dae8ea0ba6a674bdae8895eb1c8
| 46
|
py
|
Python
|
face_align_example/src/face/__init__.py
|
O0laf/packaging
|
c3f5c0ba62909eb0b72251710c3d853c2b1bf8f3
|
[
"MIT"
] | null | null | null |
face_align_example/src/face/__init__.py
|
O0laf/packaging
|
c3f5c0ba62909eb0b72251710c3d853c2b1bf8f3
|
[
"MIT"
] | null | null | null |
face_align_example/src/face/__init__.py
|
O0laf/packaging
|
c3f5c0ba62909eb0b72251710c3d853c2b1bf8f3
|
[
"MIT"
] | null | null | null |
from .face import *
from .align_trans import *
| 23
| 26
| 0.76087
|
c31595893601f839fd732c00d1e6e1b212e3613f
| 862
|
py
|
Python
|
th/spacy2/tokenizer_exceptions.py
|
t-pimpisa/spaCy
|
a26173d13a3d341822188fec8c410a6d9668f31d
|
[
"MIT"
] | 10
|
2017-10-19T17:34:10.000Z
|
2021-04-16T03:06:56.000Z
|
th/spacy2/tokenizer_exceptions.py
|
t-pimpisa/spaCy
|
a26173d13a3d341822188fec8c410a6d9668f31d
|
[
"MIT"
] | 1
|
2017-09-19T05:44:50.000Z
|
2017-09-27T09:16:15.000Z
|
th/spacy2/tokenizer_exceptions.py
|
t-pimpisa/spaCy
|
a26173d13a3d341822188fec8c410a6d9668f31d
|
[
"MIT"
] | 5
|
2018-03-01T11:46:19.000Z
|
2021-01-03T16:23:26.000Z
|
# encoding: utf8
from __future__ import unicode_literals
from ...symbols import *
TOKENIZER_EXCEPTIONS = {
"ม.ค.": [
{ORTH: "ม.ค.", LEMMA: "มกราคม"}
],
"ก.พ.": [
{ORTH: "ก.พ.", LEMMA: "กุมภาพันธ์"}
],
"มี.ค.": [
{ORTH: "มี.ค.", LEMMA: "มีนาคม"}
],
"เม.ย.": [
{ORTH: "เม.ย.", LEMMA: "เมษายน"}
],
"พ.ค.": [
{ORTH: "พ.ค.", LEMMA: "พฤษภาคม"}
],
"มิ.ย.": [
{ORTH: "มิ.ย.", LEMMA: "มิถุนายน"}
],
"ก.ค.": [
{ORTH: "ก.ค.", LEMMA: "กรกฎาคม"}
],
"ส.ค.": [
{ORTH: "ส.ค.", LEMMA: "สิงหาคม"}
],
"ก.ย.": [
{ORTH: "ก.ย.", LEMMA: "กันยายน"}
],
"ต.ค.": [
{ORTH: "ต.ค.", LEMMA: "ตุลาคม"}
],
"พ.ย.": [
{ORTH: "พ.ย.", LEMMA: "พฤศจิกายน"}
],
"ธ.ค.": [
{ORTH: "ธ.ค.", LEMMA: "ธันวาคม"}
]
}
| 19.590909
| 43
| 0.36891
|
a3f85263dd4cb1997337bb1e2d2305b83d572f86
| 840
|
py
|
Python
|
keras_declarative/hyperparams/__init__.py
|
mrphys/keras-declarative
|
551b3cd01af38cf293027e92829c61b773e33d0f
|
[
"Apache-2.0"
] | null | null | null |
keras_declarative/hyperparams/__init__.py
|
mrphys/keras-declarative
|
551b3cd01af38cf293027e92829c61b773e33d0f
|
[
"Apache-2.0"
] | null | null | null |
keras_declarative/hyperparams/__init__.py
|
mrphys/keras-declarative
|
551b3cd01af38cf293027e92829c61b773e33d0f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Params package definition."""
# pylint: disable=g-multiple-import
from keras_declarative.hyperparams.base_config import *
from keras_declarative.hyperparams.oneof import *
from keras_declarative.hyperparams.params_dict import *
| 42
| 74
| 0.783333
|
5a8323f288b66df8fcf0f247c1769210e12cc017
| 16,709
|
py
|
Python
|
dask_ml/wrappers.py
|
souravsingh/dask-ml
|
37eca7d335509c2a4aa9332aa454f57092318487
|
[
"BSD-3-Clause"
] | null | null | null |
dask_ml/wrappers.py
|
souravsingh/dask-ml
|
37eca7d335509c2a4aa9332aa454f57092318487
|
[
"BSD-3-Clause"
] | null | null | null |
dask_ml/wrappers.py
|
souravsingh/dask-ml
|
37eca7d335509c2a4aa9332aa454f57092318487
|
[
"BSD-3-Clause"
] | null | null | null |
"""Meta-estimators for parallelizing estimators using the scikit-learn API."""
import logging
import dask.array as da
import dask.dataframe as dd
import dask.delayed
import numpy as np
import sklearn.base
import sklearn.metrics
from sklearn.utils.validation import check_is_fitted
from dask_ml.utils import _timer
from ._partial import fit
from ._utils import copy_learned_attributes
from .metrics import check_scoring, get_scorer
logger = logging.getLogger(__name__)
class ParallelPostFit(sklearn.base.BaseEstimator):
"""Meta-estimator for parallel predict and transform.
Parameters
----------
estimator : Estimator
The underlying estimator that is fit.
scoring : string or callable, optional
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique)
strings or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a
single value. Metric functions returning a list/array of values
can be wrapped into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
.. warning::
If None, the estimator's default scorer (if available) is used.
Most scikit-learn estimators will convert large Dask arrays to
a single NumPy array, which may exhaust the memory of your worker.
You probably want to always specify `scoring`.
Notes
-----
.. warning::
This class is not appropriate for parallel or distributed *training*
on large datasets. For that, see :class:`Incremental`, which provides
distributed (but sequential) training. If you're doing distributed
hyperparameter optimization on larger-than-memory datasets, see
:class:`dask_ml.model_selection.IncrementalSearch`.
This estimator does not parallelize the training step. This simply calls
the underlying estimators's ``fit`` method called and copies over the
learned attributes to ``self`` afterwards.
It is helpful for situations where your training dataset is relatively
small (fits on a single machine) but you need to predict or transform
a much larger dataset. ``predict``, ``predict_proba`` and ``transform``
will be done in parallel (potentially distributed if you've connected
to a ``dask.distributed.Client``).
Note that many scikit-learn estimators already predict and transform in
parallel. This meta-estimator may still be useful in those cases when your
dataset is larger than memory, as the distributed scheduler will ensure the
data isn't all read into memory at once.
See Also
--------
Incremental
dask_ml.model_selection.IncrementalSearch
Examples
--------
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> import sklearn.datasets
>>> import dask_ml.datasets
Make a small 1,000 sample 2 training dataset and fit normally.
>>> X, y = sklearn.datasets.make_classification(n_samples=1000,
... random_state=0)
>>> clf = ParallelPostFit(estimator=GradientBoostingClassifier(),
... scoring='accuracy')
>>> clf.fit(X, y)
ParallelPostFit(estimator=GradientBoostingClassifier(...))
>>> clf.classes_
array([0, 1])
Transform and predict return dask outputs for dask inputs.
>>> X_big, y_big = dask_ml.datasets.make_classification(n_samples=100000,
random_state=0)
>>> clf.predict(X)
dask.array<predict, shape=(10000,), dtype=int64, chunksize=(1000,)>
Which can be computed in parallel.
>>> clf.predict_proba(X).compute()
array([[0.99141094, 0.00858906],
[0.93178389, 0.06821611],
[0.99129105, 0.00870895],
...,
[0.97996652, 0.02003348],
[0.98087444, 0.01912556],
[0.99407016, 0.00592984]])
"""
def __init__(self, estimator=None, scoring=None):
self.estimator = estimator
self.scoring = scoring
@property
def _postfit_estimator(self):
# The estimator instance to use for postfit tasks like score
return self.estimator
def fit(self, X, y=None, **kwargs):
"""Fit the underlying estimator.
Parameters
----------
X, y : array-like
**kwargs
Additional fit-kwargs for the underlying estimator.
Returns
-------
self : object
"""
logger.info("Starting fit")
with _timer("fit", _logger=logger):
result = self.estimator.fit(X, y, **kwargs)
# Copy over learned attributes
copy_learned_attributes(result, self)
copy_learned_attributes(result, self.estimator)
return self
def partial_fit(self, X, y=None, **kwargs):
logger.info("Starting partial_fit")
with _timer("fit", _logger=logger):
result = self.estimator.partial_fit(X, y, **kwargs)
# Copy over learned attributes
copy_learned_attributes(result, self)
copy_learned_attributes(result, self.estimator)
return self
def transform(self, X):
"""Transform block or partition-wise for dask inputs.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``transform`` method, then
an ``AttributeError`` is raised.
Parameters
----------
X : array-like
Returns
-------
transformed : array-like
"""
transform = self._check_method("transform")
if isinstance(X, da.Array):
return X.map_blocks(transform)
elif isinstance(X, dd._Frame):
return _apply_partitionwise(X, transform)
else:
return transform(X)
def score(self, X, y):
"""Returns the score on the given data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
return self.estimator.score(X, y)
"""
scoring = self.scoring
if not scoring:
if type(self._postfit_estimator).score == sklearn.base.RegressorMixin.score:
scoring = "r2"
elif (
type(self._postfit_estimator).score
== sklearn.base.ClassifierMixin.score
):
scoring = "accuracy"
else:
scoring = self.scoring
if scoring:
if not dask.is_dask_collection(X) and not dask.is_dask_collection(y):
scorer = sklearn.metrics.get_scorer(scoring)
else:
scorer = get_scorer(scoring)
return scorer(self, X, y)
else:
return self._postfit_estimator.score(X, y)
def predict(self, X):
"""Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
Parameters
----------
X : array-like
Returns
-------
y : array-like
"""
predict = self._check_method("predict")
if isinstance(X, da.Array):
return X.map_blocks(predict, dtype="int", drop_axis=1)
elif isinstance(X, dd._Frame):
return _apply_partitionwise(X, predict)
else:
return predict(X)
def predict_proba(self, X):
"""Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``predict_proba``
method, then an ``AttributeError`` is raised.
Parameters
----------
X : array or dataframe
Returns
-------
y : array-like
"""
predict_proba = self._check_method("predict_proba")
if isinstance(X, da.Array):
# XXX: multiclass
return X.map_blocks(
predict_proba, dtype="float", chunks=(X.chunks[0], len(self.classes_))
)
elif isinstance(X, dd._Frame):
return _apply_partitionwise(X, predict_proba)
else:
return predict_proba(X)
def _check_method(self, method):
"""Check if self.estimator has 'method'.
Raises
------
AttributeError
"""
estimator = self._postfit_estimator
if not hasattr(estimator, method):
msg = "The wrapped estimator '{}' does not have a '{}' method.".format(
estimator, method
)
raise AttributeError(msg)
return getattr(estimator, method)
class Incremental(ParallelPostFit):
"""Metaestimator for feeding Dask Arrays to an estimator blockwise.
This wrapper provides a bridge between Dask objects and estimators
implementing the ``partial_fit`` API. These *incremental learners* can
train on batches of data. This fits well with Dask's blocked data
structures.
.. note::
This meta-estimator is not appropriate for hyperparameter optimization
on larger-than-memory datasets. For that, see
:class:dask_ml.model_selection.IncrementalSearch`.
See the `list of incremental learners`_ in the scikit-learn documentation
for a list of estimators that implement the ``partial_fit`` API. Note that
`Incremental` is not limited to just these classes, it will work on any
estimator implementing ``partial_fit``, including those defined outside of
scikit-learn itself.
Calling :meth:`Incremental.fit` with a Dask Array will pass each block of
the Dask array or arrays to ``estimator.partial_fit`` *sequentially*.
Like :class:`ParallelPostFit`, the methods available after fitting (e.g.
:meth:`Incremental.predict`, etc.) are all parallel and delayed.
The ``estimator_`` attribute is a clone of `estimator` that was actually
used during the call to ``fit``. All attributes learned during training
are available on ``Incremental`` directly.
.. _list of incremental learners: http://scikit-learn.org/stable/modules/scaling_strategies.html#incremental-learning # noqa
Parameters
----------
estimator : Estimator
Any object supporting the scikit-learn ``parital_fit`` API.
scoring : string or callable, optional
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique)
strings or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a
single value. Metric functions returning a list/array of values
can be wrapped into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
.. warning::
If None, the estimator's default scorer (if available) is used.
Most scikit-learn estimators will convert large Dask arrays to
a single NumPy array, which may exhaust the memory of your worker.
You probably want to always specify `scoring`.
random_state : int or numpy.random.RandomState, optional
Random object that determines how to shuffle blocks.
shuffle_blocks : bool, default True
Determines whether to call ``partial_fit`` on a randomly selected chunk
of the Dask arrays (default), or to fit in sequential order. This does
not control shuffle between blocks or shuffling each block.
Attributes
----------
estimator_ : Estimator
A clone of `estimator` that was actually fit during the ``.fit`` call.
See Also
--------
ParallelPostFit
dask_ml.model_selection.IncrementalSearch
Examples
--------
>>> from dask_ml.wrappers import Incremental
>>> from dask_ml.datasets import make_classification
>>> import sklearn.linear_model
>>> X, y = make_classification(chunks=25)
>>> est = sklearn.linear_model.SGDClassifier()
>>> clf = Incremental(est, scoring='accuracy')
>>> clf.fit(X, y, classes=[0, 1])
When used inside a grid search, prefix the underlying estimator's
parameter names with ``estimator__``.
>>> from sklearn.model_selection import GridSearchCV
>>> param_grid = {"estimator__alpha": [0.1, 1.0, 10.0]}
>>> gs = GridSearchCV(clf, param_grid)
>>> gs.fit(X, y, classes=[0, 1])
"""
def __init__(
self, estimator=None, scoring=None, shuffle_blocks=True, random_state=None
):
self.shuffle_blocks = shuffle_blocks
self.random_state = random_state
super(Incremental, self).__init__(estimator=estimator, scoring=scoring)
@property
def _postfit_estimator(self):
check_is_fitted(self, "estimator_")
return self.estimator_
def _fit_for_estimator(self, estimator, X, y, **fit_kwargs):
check_scoring(estimator, self.scoring)
if not dask.is_dask_collection(X) and not dask.is_dask_collection(y):
result = estimator.partial_fit(X=X, y=y, **fit_kwargs)
else:
result = fit(
estimator,
X,
y,
random_state=self.random_state,
shuffle_blocks=self.shuffle_blocks,
**fit_kwargs
)
copy_learned_attributes(result, self)
self.estimator_ = result
return self
def fit(self, X, y=None, **fit_kwargs):
estimator = sklearn.base.clone(self.estimator)
self._fit_for_estimator(estimator, X, y, **fit_kwargs)
return self
def partial_fit(self, X, y=None, **fit_kwargs):
"""Fit the underlying estimator.
If this estimator has not been previously fit, this is identical to
:meth:`Incremental.fit`. If it has been previously fit,
``self.estimator_`` is used as the starting point.
Parameters
----------
X, y : array-like
**kwargs
Additional fit-kwargs for the underlying estimator.
Returns
-------
self : object
"""
estimator = getattr(self, "estimator_", None)
if estimator is None:
estimator = sklearn.base.clone(self.estimator)
return self._fit_for_estimator(estimator, X, y, **fit_kwargs)
def _first_block(dask_object):
"""Extract the first block / partition from a dask object
"""
if isinstance(dask_object, da.Array):
if dask_object.ndim > 1 and dask_object.numblocks[-1] != 1:
raise NotImplementedError(
"IID estimators require that the array "
"blocked only along the first axis. "
"Rechunk your array before fitting."
)
shape = (dask_object.chunks[0][0],)
if dask_object.ndim > 1:
shape = shape + (dask_object.chunks[1][0],)
return da.from_delayed(
dask_object.to_delayed().flatten()[0], shape, dask_object.dtype
)
if isinstance(dask_object, dd._Frame):
return dask_object.get_partition(0)
else:
return dask_object
def _apply_partitionwise(X, func):
"""Apply a prediction partition-wise to a dask.dataframe"""
sample = func(X._meta_nonempty)
if sample.ndim <= 1:
p = ()
else:
p = (sample.shape[1],)
if isinstance(sample, np.ndarray):
blocks = X.to_delayed()
arrays = [
da.from_delayed(
dask.delayed(func)(block), shape=(np.nan,) + p, dtype=sample.dtype
)
for block in blocks
]
return da.concatenate(arrays)
else:
return X.map_partitions(func, meta=sample)
| 33.755556
| 129
| 0.628284
|
4ae726f74d4ff82903218cdfdd2a12cd8562a50b
| 1,359
|
py
|
Python
|
pymc3/distributions/dist_math.py
|
MichielCottaar/pymc3
|
f37198653e7d09881e7bc411cbd10fffbab442c2
|
[
"Apache-2.0"
] | 1
|
2020-09-29T12:32:32.000Z
|
2020-09-29T12:32:32.000Z
|
pymc3/distributions/dist_math.py
|
MichielCottaar/pymc3
|
f37198653e7d09881e7bc411cbd10fffbab442c2
|
[
"Apache-2.0"
] | null | null | null |
pymc3/distributions/dist_math.py
|
MichielCottaar/pymc3
|
f37198653e7d09881e7bc411cbd10fffbab442c2
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Mar 7, 2011
@author: johnsalvatier
'''
from __future__ import division
import theano.tensor as t
from theano.tensor import (
sum, switch, log, exp, sqrt,
eq, neq, lt, gt, le, ge, all, any,
cast, round, arange, max, min,
maximum, minimum, floor, ceil,
zeros_like, ones, ones_like,
concatenate, constant, argmax,
erf, gamma)
from theano.tensor import as_tensor_variable
from numpy import pi, inf, nan
import numpy as np
from .special import gammaln, multigammaln
from theano.printing import Print
from .distribution import *
def bound(logp, *conditions):
"""
Bounds a log probability density with several conditions
Parameters
----------
logp : float
*conditionss : booleans
Returns
-------
logp if all conditions are true
-inf if some are false
"""
return switch(alltrue(conditions), logp, -inf)
def alltrue(vals):
ret = 1
for c in vals:
ret = ret * (1 * c)
return ret
def logpow(x, m):
"""
Calculates log(x**m) since m*log(x) will fail when m, x = 0.
"""
return switch(eq(x, 0) & eq(m, 0), 0, m * log(x))
def factln(n):
return gammaln(n + 1)
def idfn(x):
return x
def std_cdf(x):
"""
Calculates the standard normal cumulative distribution function.
"""
return 0.5 + 0.5*erf(x / sqrt(2.))
| 18.616438
| 68
| 0.628403
|
59f8d7fbe4cbb6c4383138680a51e1fbc5666343
| 1,992
|
py
|
Python
|
API/v1/__init__.py
|
MisakaMikoto0502/XenXenXenSe
|
58a4d288dd2ef3f09ee0062b542b50f0b11d1c43
|
[
"MIT"
] | null | null | null |
API/v1/__init__.py
|
MisakaMikoto0502/XenXenXenSe
|
58a4d288dd2ef3f09ee0062b542b50f0b11d1c43
|
[
"MIT"
] | null | null | null |
API/v1/__init__.py
|
MisakaMikoto0502/XenXenXenSe
|
58a4d288dd2ef3f09ee0062b542b50f0b11d1c43
|
[
"MIT"
] | null | null | null |
from xmlrpc.client import Fault
from fastapi import APIRouter, Depends, HTTPException
from XenAPI.XenAPI import Failure
from XenGarden.session import create_session
from API.v1.Common import xenapi_failure_jsonify
from API.v1.Console import console_router
from API.v1.GuestMetrics import guest_router
from API.v1.Host import host_router
from API.v1.Network import network_router
from API.v1.PIF import pif_router
from API.v1.root import root_router
from API.v1.SR import sr_router
from API.v1.VBD import vbd_router
from API.v1.VDI import vdi_router
from API.v1.VIF import vif_router
from API.v1.VM import vm_router
from app.settings import Settings
v1_router = APIRouter()
v1_router.include_router(root_router)
# === Condition Checker ===
async def verify_cluster_id(cluster_id: str):
# KeyError Handling
try:
session = create_session(
cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
session.xenapi.session.logout()
except KeyError as key_error:
raise HTTPException(
status_code=404, detail=f"{key_error} cluster does not exist"
)
except Failure as xenapi_error:
raise HTTPException(
status_code=500, detail=xenapi_failure_jsonify(xenapi_error)
)
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
# === API Functions ===
_api_router = APIRouter(dependencies=[Depends(verify_cluster_id)])
_api_router.include_router(host_router)
_api_router.include_router(console_router)
_api_router.include_router(sr_router)
_api_router.include_router(vbd_router)
_api_router.include_router(vdi_router)
_api_router.include_router(vif_router)
_api_router.include_router(pif_router)
_api_router.include_router(network_router)
_api_router.include_router(vm_router)
_api_router.include_router(guest_router)
# === Add ===
v1_router.include_router(_api_router)
| 30.646154
| 73
| 0.771084
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.