hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
675c672977a46ec740f1a913b376d847b4aabb59
| 4,212
|
py
|
Python
|
examples/turbulent_condensate/run.py
|
chrisjbillington/parpde
|
4f882cbbb9ad6c57814e4422e9ba063fa27886a0
|
[
"BSD-2-Clause"
] | null | null | null |
examples/turbulent_condensate/run.py
|
chrisjbillington/parpde
|
4f882cbbb9ad6c57814e4422e9ba063fa27886a0
|
[
"BSD-2-Clause"
] | null | null | null |
examples/turbulent_condensate/run.py
|
chrisjbillington/parpde
|
4f882cbbb9ad6c57814e4422e9ba063fa27886a0
|
[
"BSD-2-Clause"
] | null | null | null |
# An example of a turbulent BEC in a harmonic trap. The groundstate is found
# and then some vortices randomly printed about with a phase printing. Some
# evolution in imaginary time is then performed to smooth things out before
# evolving the BEC in time.
# Run with 'mpirun -n <N CPUs> python run_example.py'
from __future__ import division, print_function
import sys
# sys.path.insert(0, '../..') # The location of the modules we need to import
import numpy as np
from parPDE import Simulator2D, LAPLACIAN
from parPDE.BEC2D import BEC2D
def get_number_and_trap(rhomax, R):
"""Gives the 2D normalisation constant and trap frequency required for the
specified maximum density and radius of a single-component condensate in
the Thomas-Fermi approximation"""
N = pi * rhomax * R**2 / 2
omega = np.sqrt(2 * g * rhomax / (m * R**2))
return N, omega
# Constants:
pi = np.pi
hbar = 1.054571726e-34 # Reduced Planck's constant
a_0 = 5.29177209e-11 # Bohr radius
u = 1.660539e-27 # unified atomic mass unit
m = 86.909180*u # 87Rb atomic mass
a = 98.98*a_0 # 87Rb |2,2> scattering length
g = 4*pi*hbar**2*a/m # 87Rb self interaction constant
rhomax = 2.5e14 * 1e6 # Desired peak condensate density
R = 7.5e-6 # Desired condensate radius
mu = g * rhomax # Approximate chemical potential for desired max density
# (assuming all population is in in mF=+1 or mF=-1)
N_2D, omega = get_number_and_trap(rhomax, R) # 2D normalisation constant and trap frequency
# required for specified radius and peak density
# Space:
nx_global = ny_global = 256
x_max_global = y_max_global = 10e-6
simulator = Simulator2D(-x_max_global, x_max_global, -y_max_global, y_max_global, nx_global, ny_global,
periodic_x=True, periodic_y=True, operator_order=6)
bec2d = BEC2D(simulator, natural_units=False, use_ffts=True)
x = simulator.x
y = simulator.y
dx = simulator.dx
dy = simulator.dy
r2 = x**2.0 + y**2.0
r = np.sqrt(r2)
# A Harmonic trap:
V = 0.5 * m * omega**2 * R**2.0 * (r/R)**2
dispersion_timescale = dx**2 * m / (pi * hbar)
chemical_potential_timescale = 2*pi*hbar/mu
potential_timescale = 2*pi*hbar/V.max()
K = -hbar**2/(2*m)*LAPLACIAN
def H(t, psi):
"""The Hamiltonian for single-component wavefunction psi. Returns the
kinetic term as an OperatorSum instance, and the local terms separately."""
H_local_lin = V
H_local_nonlin = g * abs(psi)**2
return K, H_local_lin, H_local_nonlin
if __name__ == '__main__':
# The initial Thomas-Fermi guess:
psi = rhomax * (1 - (x**2 + y**2) / R**2)
psi[psi < 0] = 0
psi = np.sqrt(psi)
# Find the groundstate:
psi = bec2d.find_groundstate(H, mu, psi, relaxation_parameter=1.7, convergence=1e-13,
output_interval=100, output_directory='groundstate', convergence_check_interval=10)
# psi is real so far, convert it to complex:
psi = np.array(psi, dtype=complex)
# Print some vortices, seeding the pseudorandom number generator so that
# MPI processes all agree on where the vortices are:
np.random.seed(42)
for i in range(30):
sign = np.sign(np.random.normal())
x_vortex = np.random.normal(0, scale=R)
y_vortex = np.random.normal(0, scale=R)
psi[:] *= np.exp(sign * 1j*np.arctan2(x - y_vortex, y - x_vortex))
# Smooth it a bit in imaginary time:
psi = bec2d.evolve(dt=dispersion_timescale/2, t_final=chemical_potential_timescale,
H=H, psi=psi, mu=mu, method='rk4', imaginary_time=True,
output_interval=100, output_directory='smoothing')
# And evolve it in time for 10ms:
psi = bec2d.evolve(dt=dispersion_timescale/2, t_final=10e-3,
H=H, psi=psi, mu=mu, method='rk4', imaginary_time=False,
output_interval=100, output_directory='evolution')
| 39.364486
| 116
| 0.626068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,514
| 0.359449
|
675c80c6427e7f597a41119f9db761e49256c6ca
| 3,918
|
py
|
Python
|
src/Test_Sfepy_NavierStokes.py
|
somu15/Small_Pf_code
|
35f3d28faab2aa80f2332499f5e7ab19b040eabe
|
[
"MIT"
] | null | null | null |
src/Test_Sfepy_NavierStokes.py
|
somu15/Small_Pf_code
|
35f3d28faab2aa80f2332499f5e7ab19b040eabe
|
[
"MIT"
] | null | null | null |
src/Test_Sfepy_NavierStokes.py
|
somu15/Small_Pf_code
|
35f3d28faab2aa80f2332499f5e7ab19b040eabe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 09:33:53 2020
@author: dhulls
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct, Struct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.probes_vtk import ProbeFromFile, Probe
import numpy as np
helps = {
'show' : 'show the results figure',
}
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/3d/fluid_mesh.inp')
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field_1 = Field.from_args(name='3_velocity', dtype=nm.float64, shape=3, region=omega, approx_order=1)
field_2 = Field.from_args(name='pressure', dtype=nm.float64, shape=1, region=omega, approx_order=1)
region_0 = domain.create_region(name='Walls1', select='vertices in (y < -0.049)', kind='facet')
region_1 = domain.create_region(name='Walls2', select='vertices in (y > 0.049)', kind='facet')
region_2 = domain.create_region(name='Inlet', select='vertices in (x < -0.499)', kind='facet')
region_3 = domain.create_region(name='Outlet', select='vertices in (x > -0.499)', kind='facet')
ebc_1 = EssentialBC(name='Walls1', region=region_0, dofs={'u.[0,1,2]' : 0.0})
ebc_2 = EssentialBC(name='Walls2', region=region_1, dofs={'u.[0,1,2]' : 0.0})
ebc_3 = EssentialBC(name='Inlet', region=region_2, dofs={'u.0' : 1.0, 'u.[1,2]' : 0.0})
ebc_4 = EssentialBC(name='Outlet', region=region_3, dofs={'p':0.0, 'u.[1,2]' : 0.0})
viscosity = Material(name='viscosity', value=1.25e-3)
variable_1 = FieldVariable('u', 'unknown', field_1)
variable_2 = FieldVariable(name='v', kind='test', field=field_1, primary_var_name='u')
variable_3 = FieldVariable(name='p', kind='unknown', field=field_2)
variable_4 = FieldVariable(name='q', kind='test', field=field_2, primary_var_name='p')
integral_1 = Integral('i1', order=2)
integral_2 = Integral('i2', order=3)
t1 = Term.new(name='dw_div_grad(viscosity.value, v, u)',
integral=integral_2, region=omega, viscosity=viscosity, v=variable_2, u=variable_1)
t2 = Term.new(name='dw_convect(v, u)',
integral=integral_2, region=omega, v=variable_2, u=variable_1)
t3 = Term.new(name='dw_stokes(v, p)',
integral=integral_1, region=omega, v=variable_2, p=variable_3)
t4 = Term.new(name='dw_stokes(u, q)',
integral=integral_1, region=omega, u=variable_1, q=variable_4)
eq1 = Equation('balance', t1+t2-t3)
eq2 = Equation('incompressibility', t4)
eqs = Equations([eq1,eq2])
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'i_max' : 20, 'eps_a' : 1e-8, 'eps_r' : 1.0, 'macheps' : 1e-16, 'lin_red' : 1e-2, 'ls_red' : 0.1, 'ls_red_warp' : 0.001, 'ls_on' : 0.99999, 'ls_min' : 1e-5, 'check' : 0, 'delta' : 1e-6}, lin_solver=ls, status=nls_status)
pb = Problem('Navier-Stokes', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc_1, ebc_2, ebc_3]))
pb.set_solver(nls)
status = IndexedStruct()
state = pb.solve(status=status, save_results=True)
out = state.create_output_dict()
pb.save_state('Navier_Stokes.vtk', out=out)
view = Viewer('Navier_Stokes.vtk')
view(rel_scaling=2,
is_scalar_bar=True, is_wireframe=True)
| 41.242105
| 234
| 0.70342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 840
| 0.214395
|
675d0fb6b3b1a21973d25abe79cafce5b94844f8
| 4,063
|
py
|
Python
|
nd_customization/api/lab_test.py
|
libermatic/nd_customization
|
4ee14c661651b09ef16aaf64952ceedc67bb602d
|
[
"MIT"
] | null | null | null |
nd_customization/api/lab_test.py
|
libermatic/nd_customization
|
4ee14c661651b09ef16aaf64952ceedc67bb602d
|
[
"MIT"
] | 10
|
2018-11-12T21:53:56.000Z
|
2019-04-27T06:24:13.000Z
|
nd_customization/api/lab_test.py
|
libermatic/nd_customization
|
4ee14c661651b09ef16aaf64952ceedc67bb602d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
import json
from frappe.utils import now, cint
from functools import partial
from toolz import compose
@frappe.whitelist()
def deliver_result(lab_test, revert=0, delivery_time=None):
doc = frappe.get_doc("Lab Test", lab_test)
if doc and doc.docstatus == 1:
if cint(revert):
doc.delivery_time = None
else:
doc.delivery_time = delivery_time or now()
doc.save()
_get_subsections = compose(
partial(map, lambda x: x.get("test_event") or x.get("particulars")),
partial(filter, lambda x: cint(x.is_subsection) == 1),
)
def change_test_loading(doc, template):
if template.test_template_type == "Compound":
subsections = _get_subsections(template.normal_test_templates)
if subsections:
for item in doc.normal_test_items:
if item.test_name in subsections:
frappe.db.set_value(
"Normal Test Items", item.name, "require_result_value", 0
)
elif item.test_name and not item.test_event:
frappe.db.set_value(
"Normal Test Items", item.name, "test_name", None
)
frappe.db.set_value(
"Normal Test Items", item.name, "test_event", item.test_name
)
if template.test_template_type == "Descriptive":
subsections = _get_subsections(template.special_test_template)
if subsections:
for item in doc.special_test_items:
if item.test_particulars in subsections:
frappe.db.set_value(
"Special Test Items", item.name, "require_result_value", 0
)
if template.test_template_type == "Grouped":
for item in doc.normal_test_items:
if item.test_name and item.template and item.template != doc.template:
test_comment = frappe.db.get_value(
"Lab Test Template", item.template, "test_comment"
)
if test_comment:
frappe.db.set_value(
"Normal Test Items", item.name, "test_comment", test_comment
)
def load_result_format(lab_test, template, prescription, invoice):
from erpnext.healthcare.doctype.lab_test.lab_test import load_result_format
load_result_format(lab_test, template, prescription, invoice)
change_test_loading(lab_test, template)
@frappe.whitelist()
def create_invoice(company, patient, lab_tests, prescriptions):
from erpnext.healthcare.doctype.lab_test.lab_test import create_invoice
si_name = create_invoice(company, patient, lab_tests, prescriptions)
test_ids = json.loads(lab_tests)
if test_ids:
si = frappe.get_doc("Sales Invoice", si_name)
si.patient = patient
find_item = _find_item(si.items)
for test_id in test_ids:
test = frappe.get_doc("Lab Test", test_id)
item_code = frappe.db.get_value("Lab Test Template", test.template, "item")
item = find_item(item_code)
item.reference_dt = "Lab Test"
item.reference_dn = test_id
item.lab_test_result_date = test.result_date
si.save()
return si_name
def _find_item(items):
def fn(item_code):
for item in items:
if item.item_code == item_code:
return item
return fn
@frappe.whitelist()
def link_invoice(lab_test, sales_invoice):
test_doc = frappe.get_doc("Lab Test", lab_test)
invoice_doc = frappe.get_doc("Sales Invoice", sales_invoice)
if test_doc.docstatus == 2 or invoice_doc.docstatus == 2:
frappe.throw("Cannot link cancelled documents.")
if test_doc.patient != invoice_doc.patient:
frappe.throw("Lab Test and Sales Invoice belong to different Patients.")
frappe.db.set_value("Lab Test", lab_test, "invoice", sales_invoice)
| 36.603604
| 87
| 0.628846
| 0
| 0
| 0
| 0
| 1,614
| 0.397243
| 0
| 0
| 496
| 0.122077
|
675e10e80e4d185d1bb67fc4f8ca4f7d8148f472
| 2,283
|
py
|
Python
|
build-flask-app.py
|
Abdur-rahmaanJ/build-flask-app
|
476d1f0e0c505a60acadde13397b2787f49bd7dc
|
[
"MIT"
] | 1
|
2020-02-24T04:09:25.000Z
|
2020-02-24T04:09:25.000Z
|
build-flask-app.py
|
Abdur-rahmaanJ/build-flask-app
|
476d1f0e0c505a60acadde13397b2787f49bd7dc
|
[
"MIT"
] | null | null | null |
build-flask-app.py
|
Abdur-rahmaanJ/build-flask-app
|
476d1f0e0c505a60acadde13397b2787f49bd7dc
|
[
"MIT"
] | 1
|
2020-07-15T05:03:18.000Z
|
2020-07-15T05:03:18.000Z
|
#!/usr/bin/env python3
from scripts.workflow import get_app_name, is_name_valid
from scripts.workflow import get_args, is_args_valid
from scripts.workflow import create_dir, create_app, create_templates_folder, create_static_folder, create_dockerfile
from scripts.manual import print_manual
from scripts.messages import empty_name, success_msg, failure_msg
import sys
app_name = get_app_name()
args = get_args()
args.remove(app_name)
# validate name of app!!
if (is_name_valid(app_name)):
# validate all arguments first!!
if(is_args_valid(args)):
# Create folder named app_name
create_dir(app_name)
# Arguments
debugger_mode = False
import_css_js = False
use_docker = False
if '-d' in args or '--debugger' in args:
debugger_mode = True
print("- Debugger mode on")
print(" |__ added debug=True")
else:
print("- Debugger mode off")
if '-cj' in args or '--css-js' in args:
import_css_js = True
create_static_folder(app_name)
print("- Css and Js mode on")
print(" |__ import static/stylesheet/style.css")
print(" |__ import static/js/app.css")
else:
print("- Css and Js mode off")
if '-dc' in args or '--docker-container' in args:
use_docker = True
print("- Docker mode on")
print(' |__ cd %s' % app_name)
print(' |__ \"docker-compose up -d\" to start app')
else:
print("- Docker mode off")
# create templates folder to hold index.html
create_templates_folder(app_name, import_css_js)
# create app.py in root directory(app_name)
create_app(app_name, debugger_mode)
# move application to docker container;
if (use_docker):
# generate Dockerfile
create_dockerfile(app_name)
success_msg(app_name)
else:
print('Unknown argument detected! Please check the help section\n')
print_manual()
failure_msg(app_name)
else:
if (app_name == '-h' or app_name == '--help'):
print_manual()
else:
print('Please choose another app name')
failure_msg(app_name)
| 31.708333
| 117
| 0.616732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 706
| 0.309242
|
675e7374895c08103fdfc9d9f90f2f45da303fe7
| 2,960
|
py
|
Python
|
stacker/assembler.py
|
unrahul/stacker
|
f94e9e6ad9351fd8fa94bef4ae0c4ed0afc8305d
|
[
"Apache-2.0"
] | null | null | null |
stacker/assembler.py
|
unrahul/stacker
|
f94e9e6ad9351fd8fa94bef4ae0c4ed0afc8305d
|
[
"Apache-2.0"
] | 11
|
2020-01-23T16:45:07.000Z
|
2020-02-08T16:53:22.000Z
|
stacker/assembler.py
|
unrahul/stacker
|
f94e9e6ad9351fd8fa94bef4ae0c4ed0afc8305d
|
[
"Apache-2.0"
] | 2
|
2020-01-29T18:18:20.000Z
|
2020-01-29T19:55:25.000Z
|
import os
from pathlib import Path
from jinja2 import Template
import parser
from utils import write_to_file
from utils import mkdir_p
parser.init()
# parse and assign to vars
spec = parser.spec
def _concat(slice: str) -> str:
"""helper to concatenate each template slice."""
return "{}\n".format(slice)
def slices_filename_content_hash() -> dict:
"""create a dict of filename: content for slices"""
docker_slices = {}
path = Path.cwd().joinpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "slices")
)
for file in path.iterdir():
docker_slices[file.name] = file.read_text()
return docker_slices
def concat_slices(component: str = "tensorflow", flavor: str = "mkl") -> str:
"""concatenate templates based on the what user want"""
docker_slices = slices_filename_content_hash()
names = ["os.dockerfile"]
dockerfile = ""
if component == "tensorflow" and flavor == "mkl":
names.append("tensorflow.dockerfile")
names.append("horovod.dockerfile")
if component == "pytorch" and flavor == "mkl":
names.append("pytorch.dockerfile")
names.append("horovod.dockerfile")
for name in names:
dockerfile += _concat(docker_slices[name])
return "".join(dockerfile)
def insert_template_values(dockerfile: str, kwargs: dict):
dockerfile = Template(dockerfile)
dockerfile = dockerfile.render(**kwargs)
return dockerfile
def generate_dockerfile(os: str, framework: str, file_name: str = "Dockerfile"):
"""generate and write to dir dockerfiles per `os` and `framework`"""
dlrs = spec["stack"]["dlrs"]
os_version = dlrs[os]["version"]
pkgs = dlrs[os]["os_pkgs"]
tf_version = dlrs[os]["tensorflow"]["mkl"]["version"]
hvd_version = dlrs[os]["horovod"]["version"]
torch_version = dlrs[os]["pytorch"]["mkl"]["version"]
pkg_installer = "apt-get install -y" if os == "ubuntu" else "swupd bundle-add"
kwargs = {
"os": "{}:{}".format(os, os_version),
"pkg_install": "{} {}".format(pkg_installer, " ".join(pkgs)),
"tf_version": tf_version,
"hvd_version": hvd_version,
"torch_version": torch_version,
}
dockerfile_template = concat_slices(framework)
dockerfile = insert_template_values(dockerfile_template, kwargs)
write_to_file(file_name, dockerfile)
def generate_all_dockerfiles(generate: bool = True, build: bool = False) -> None:
"""generate all dockerfiles for all frameworks and OSes"""
if generate:
base_dir = "./dockerfiles"
for framework in ["pytorch", "tensorflow"]:
for _os in ["ubuntu", "clearlinux"]:
save_to_dir = mkdir_p(os.path.join(base_dir, _os, framework))
save_to_file = os.path.join(save_to_dir, "Dockerfile")
generate_dockerfile(_os, framework, save_to_file)
if build:
# TOOD(unrahul) build the dockerfiles
pass
| 33.636364
| 82
| 0.65777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 805
| 0.271959
|
675e9236debc2ccf610756e1ddfa6942ba31102c
| 621
|
py
|
Python
|
akshare/fx/cons.py
|
PKUuu/akshare
|
03967312b6c8afdec32e081fb23ae5916b674936
|
[
"MIT"
] | 1
|
2020-05-14T13:20:48.000Z
|
2020-05-14T13:20:48.000Z
|
akshare/fx/cons.py
|
13767849/akshare
|
5b7e4daaa80b1ccaf3f5a980a1205848e2e8570d
|
[
"MIT"
] | null | null | null |
akshare/fx/cons.py
|
13767849/akshare
|
5b7e4daaa80b1ccaf3f5a980a1205848e2e8570d
|
[
"MIT"
] | 2
|
2020-09-23T08:50:14.000Z
|
2020-09-28T09:57:07.000Z
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2019/10/20 10:58
contact: jindaxiang@163.com
desc: 外汇配置文件
"""
# headers
SHORT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36'
}
# url
FX_SPOT_URL = "http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/fx/rfx-sp-quot.json"
FX_SWAP_URL = "http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/fx/rfx-sw-quot.json"
FX_PAIR_URL = "http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/fx/cpair-quot.json"
# payload
SPOT_PAYLOAD = {
"t": {}
}
| 29.571429
| 134
| 0.698873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 527
| 0.832543
|
675ffb2c535d8805575601fc596c61d52191a22a
| 1,283
|
py
|
Python
|
entropylab/tests/test_issue_204.py
|
qguyk/entropy
|
e43077026c83fe84de022cf8636b2c9d42f1d330
|
[
"BSD-3-Clause"
] | null | null | null |
entropylab/tests/test_issue_204.py
|
qguyk/entropy
|
e43077026c83fe84de022cf8636b2c9d42f1d330
|
[
"BSD-3-Clause"
] | null | null | null |
entropylab/tests/test_issue_204.py
|
qguyk/entropy
|
e43077026c83fe84de022cf8636b2c9d42f1d330
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T11:47:31.000Z
|
2022-03-29T11:47:31.000Z
|
import os
from datetime import datetime
import pytest
from entropylab import ExperimentResources, SqlAlchemyDB, PyNode, Graph
@pytest.mark.skipif(
datetime.utcnow() > datetime(2022, 6, 25),
reason="Please remove after two months have passed since the fix was merged",
)
def test_issue_204(initialized_project_dir_path, capsys):
# arrange
# remove DB files because when they are present, issue does not occur
db_files = [".entropy/params.db", ".entropy/entropy.db", ".entropy/entropy.hdf5"]
for file in db_files:
full_path = os.path.join(initialized_project_dir_path, file)
if os.path.exists(full_path):
os.remove(full_path)
# experiment to run
experiment_resources = ExperimentResources(
SqlAlchemyDB(initialized_project_dir_path)
)
def root_node():
print("root node")
# error that should be logged to stderr:
print(a)
return {}
node0 = PyNode(label="root_node", program=root_node)
experiment = Graph(resources=experiment_resources, graph={node0}, story="run_a")
# act
try:
experiment.run()
except RuntimeError:
pass
# assert
captured = capsys.readouterr()
assert "message: name 'a' is not defined" in captured.err
| 26.729167
| 85
| 0.681216
| 0
| 0
| 0
| 0
| 1,152
| 0.897896
| 0
| 0
| 346
| 0.26968
|
67607806f4f757a440672ca409795cb6fc24a8c8
| 97
|
py
|
Python
|
src/__init__.py
|
PY-GZKY/fconversion
|
f1da069ac258444c8a6b2a5fe77d0e1295a0d4e4
|
[
"Apache-2.0"
] | 1
|
2022-02-11T09:39:08.000Z
|
2022-02-11T09:39:08.000Z
|
src/__init__.py
|
PY-GZKY/fconversion
|
f1da069ac258444c8a6b2a5fe77d0e1295a0d4e4
|
[
"Apache-2.0"
] | null | null | null |
src/__init__.py
|
PY-GZKY/fconversion
|
f1da069ac258444c8a6b2a5fe77d0e1295a0d4e4
|
[
"Apache-2.0"
] | null | null | null |
from .file_core import FileEngine
from src.utils.utils import *
from .version import __version__
| 24.25
| 33
| 0.824742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
676261a506ad81b93b8c0f929316b27e9a10621d
| 169
|
py
|
Python
|
app/app/calc.py
|
benning55/recipe-app-api
|
a63366c7bb576fefbc755fe873731d2edf3e74d2
|
[
"MIT"
] | null | null | null |
app/app/calc.py
|
benning55/recipe-app-api
|
a63366c7bb576fefbc755fe873731d2edf3e74d2
|
[
"MIT"
] | null | null | null |
app/app/calc.py
|
benning55/recipe-app-api
|
a63366c7bb576fefbc755fe873731d2edf3e74d2
|
[
"MIT"
] | null | null | null |
#
# def add(x, y):
# """
# Add Number Together
# """
# return x+y
#
#
# def subtract(x, y):
# """
# Subtract x from y
# """
# return x-y
| 12.071429
| 25
| 0.402367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 156
| 0.923077
|
676411e3c65abd02fa317570d558db02833381e4
| 7,673
|
py
|
Python
|
open_anafi/lib/indicator_tools.py
|
Cour-des-comptes/open-anafi-backend
|
1d3ebcfe7b46315e91618f540ef1c95b4e20d9af
|
[
"MIT"
] | 7
|
2020-01-10T09:34:52.000Z
|
2020-01-27T13:51:12.000Z
|
open_anafi/lib/indicator_tools.py
|
Cour-des-comptes/open-anafi-backend
|
1d3ebcfe7b46315e91618f540ef1c95b4e20d9af
|
[
"MIT"
] | 6
|
2020-01-26T20:38:07.000Z
|
2022-02-10T12:12:53.000Z
|
open_anafi/lib/indicator_tools.py
|
Cour-des-comptes/open-anafi-backend
|
1d3ebcfe7b46315e91618f540ef1c95b4e20d9af
|
[
"MIT"
] | 4
|
2020-01-27T16:44:31.000Z
|
2021-02-11T16:52:26.000Z
|
from open_anafi.models import Indicator, IndicatorParameter, IndicatorLibelle
from open_anafi.serializers import IndicatorSerializer
from .frame_tools import FrameTools
from open_anafi.lib import parsing_tools
from open_anafi.lib.ply.parsing_classes import Indic
import re
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist
class IndicatorTools:
@staticmethod
def calculate_max_depth(indicator):
"""Calculates the depth of an indicator (the max depth of all its parameters)
:param indicator: The indicator to evaluate
:type indicator: class:`open_anafi.models.Indicator`
"""
depth = 0
for parameter in indicator.parameters.all():
if parameter.depth > depth:
depth = parameter.depth
indicator.max_depth = depth
indicator.save()
@staticmethod
def update_depth(indicator):
"""Updates the depth of an indicator after an update.
Recursively updates all the affected indicators/frames
:param indicator: The indicator to evaluate
:type indicator: class:`open_anafi.models.Indicator`
"""
parameters = IndicatorParameter.objects.filter(original_equation__contains=indicator.name)
indicators_to_update = list(set([param.indicator for param in parameters]))
frames_to_update = list(indicator.frames.all())
if len(indicators_to_update) > 0:
for indic in indicators_to_update:
for frame in indic.frames.all(): frames_to_update.append(frame)
# For each indicator, we update the depth of all the parameters, then we calculate the max depth of the indicator
for param in indic.parameters.all(): IndicatorParameterTools.calculate_depth(param)
IndicatorTools.calculate_max_depth(indic)
for indic in indicators_to_update: IndicatorTools.update_depth(indic)
# We update the depth of the frames
frames_to_update = list(set(frames_to_update))
if len(frames_to_update) > 0:
for frame in frames_to_update: FrameTools.calculate_depth(frame)
#This method can be optimized
@staticmethod
def update_indicator(equation, description, id, libelle=None):
"""Update an indicator.
Note that we cannot modify the indicator's name.
:param equation: The updated equation (updated or not)
:type equation: str
:param description: The updated description
:type description: str
:param id: The indicator's id
:type id: int
:param libelle: An extra libelle for the indicator
:type libelle: str
:return: The updated indicator
:rtype: class:`open_anafi.models.Indicator`
"""
indic = Indicator.objects.get(id=id)
if libelle is not None:
indicator_libelle = IndicatorLibelle.objects.filter(indicator=indic)
if len(indicator_libelle) > 1:
raise Exception('Cet indicateur possède plusieurs libellés')
elif len(indicator_libelle) == 0:
indicator_libelle = IndicatorLibelle.objects.create(libelle=libelle, indicator=indic)
indicator_libelle.save()
else:
indicator_libelle = indicator_libelle[0]
indicator_libelle.libelle = libelle
indicator_libelle.save()
if description is not None :
with transaction.atomic():
indic.description = description
indic.save()
if equation is not None:
#
with transaction.atomic():
backup_indicator = IndicatorSerializer(indic).data
old_params = IndicatorParameter.objects.filter(indicator=indic)
old_params_ids = [ p.id for p in old_params].copy()
if len(backup_indicator.get('libelles')) > 1:
raise Exception('Cet indicateur possède plusieurs libellés')
parsing_tools.update_formula(equation, indic)
for parameter in IndicatorParameter.objects.filter(id__in=old_params_ids):
parameter.delete()
indic = Indicator.objects.get(name=backup_indicator.get('name'))
indic.save()
IndicatorTools.update_depth(indic)
return indic.name
@staticmethod
def check_equation_element(element):
if type(element) is Indic:
try:
Indicator.objects.get(name=element.name)
except ObjectDoesNotExist:
raise Exception(f"L'indicateur {element.name} n'existe pas.")
@staticmethod
def check_equation(equation):
try:
parsed_indicator = parsing_tools.parse_equation(equation)
for eq in parsed_indicator:
if type(eq['tree']) is tuple:
for element in eq['tree']:
IndicatorTools.check_equation_element(element)
else:
IndicatorTools.check_equation_element(eq['tree'])
except Exception as e:
raise Exception(f"Erreur dans la formule : {str(e)}")
@staticmethod
def check_indicator_usages_in_formulas(indicator):
"""
Checks if an indicator is part of a formula of any other indicator.
Used to check if an indicator is safe to remove.
:param indicator: The indicator to check
:type indicator: :class:`open_anafi.models.Indicator`
"""
result = [indicator_parameter.indicator.name for indicator_parameter in
IndicatorParameter.objects.filter(original_equation__icontains=indicator.name)]
return result
class IndicatorParameterTools:
@staticmethod
def calculate_depth(indicator_parameter):
"""Calculates the depth of an indicator parameter,
given that all the indicators present in its equation already exist and have the correct depth.
:param indicator_parameter: The indicator parameter to evaluate
:type indicator_parameter: class:`open_anafi.models.IndicatorParameter`
"""
depth = 0
indicators = IndicatorParameterTools.extract_indicators_from_equation(indicator_parameter.original_equation)
if len(indicators) == 0:
indicator_parameter.depth = 1
indicator_parameter.save()
for indicator in indicators:
if indicator.max_depth > depth:
depth = indicator.max_depth
indicator_parameter.depth = depth + 1
indicator_parameter.save()
@staticmethod
def extract_indicators_from_equation(equation):
"""Retrieves all the indicator objects contained in a equation
:param equation: An equation according to the defined language
:type equation: str
:return: The list of all the indicator objects present in the equation
:rtype: list of class:`open_anafi.models.Indicator`
"""
exp = re.compile('[\-+/*^(\[)\]]')
is_indicator = re.compile('[A-Z0-9]+(_[A-Z0-9]+)+')
split_equation = list(filter(None, map(str.strip, exp.split(equation))))
indicators = []
for item in split_equation:
if not is_indicator.match(item) : continue
try:
indic = Indicator.objects.get(name = item)
indicators.append(indic)
except ObjectDoesNotExist:
raise Exception(f"L'indicateur {item} n'existe pas.")
return indicators
| 37.985149
| 129
| 0.639776
| 7,309
| 0.952065
| 0
| 0
| 7,177
| 0.93487
| 0
| 0
| 2,360
| 0.307412
|
67656a05cc2aa8785f99e903c16b411d139ad81d
| 3,576
|
py
|
Python
|
src/python/commands/LikeImpl.py
|
plewis/phycas
|
9f5a4d9b2342dab907d14a46eb91f92ad80a5605
|
[
"MIT"
] | 3
|
2015-09-24T23:12:57.000Z
|
2021-04-12T07:07:01.000Z
|
src/python/commands/LikeImpl.py
|
plewis/phycas
|
9f5a4d9b2342dab907d14a46eb91f92ad80a5605
|
[
"MIT"
] | null | null | null |
src/python/commands/LikeImpl.py
|
plewis/phycas
|
9f5a4d9b2342dab907d14a46eb91f92ad80a5605
|
[
"MIT"
] | 1
|
2015-11-23T10:35:43.000Z
|
2015-11-23T10:35:43.000Z
|
import os,sys,math,random
from phycas import *
from MCMCManager import LikelihoodCore
from phycas.utilities.PhycasCommand import *
from phycas.readnexus import NexusReader
from phycas.utilities.CommonFunctions import CommonFunctions
class LikeImpl(CommonFunctions):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
To be written.
"""
def __init__(self, opts):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Initializes the LikeImpl object by assigning supplied phycas object
to a data member variable.
"""
CommonFunctions.__init__(self, opts)
self.starting_tree = None
self.taxon_labels = None
self.data_matrix = None
self.ntax = None
self.nchar = None
self.reader = NexusReader()
self.npatterns = [] # Will hold the actual number of patterns for each subset after data file has been read
def _loadData(self, matrix):
self.data_matrix = matrix
if matrix is None:
self.taxon_labels = []
self.ntax = 0
self.nchar = 0 # used for Gelfand-Ghosh simulations only
else:
self.taxon_labels = matrix.taxa
self.ntax = self.data_matrix.getNTax()
self.nchar = self.data_matrix.getNChar() # used for Gelfand-Ghosh simulations only
self.phycassert(len(self.taxon_labels) == self.ntax, "Number of taxon labels does not match number of taxa.")
def getStartingTree(self):
if self.starting_tree is None:
try:
tr_source = self.opts.tree_source
tr_source.setActiveTaxonLabels(self.taxon_labels)
i = iter(tr_source)
self.starting_tree = i.next()
except:
self.stdout.error("A tree could not be obtained from the tree_source")
raise
return self.starting_tree
def run(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Computes the log-likelihood based on the current tree and current
model.
"""
ds = self.opts.data_source
mat = ds and ds.getMatrix() or None
self.phycassert(self.opts.data_source is not None, "specify data_source before calling like()")
self._loadData(mat)
self.starting_tree = self.getStartingTree()
if self.opts.preorder_edgelens is not None:
self.starting_tree.replaceEdgeLens(self.opts.preorder_edgelens)
print '@@@@@@@@@@ self.starting_tree.makeNewick() =',self.starting_tree.makeNewick()
core = LikelihoodCore(self)
core.setupCore()
core.prepareForLikelihood()
if self.opts.store_site_likes:
core.likelihood.storeSiteLikelihoods(True)
self.opts.pattern_counts = None
self.opts.char_to_pattern = None
self.opts.site_likes = None
self.opts.site_uf = None
else:
core.likelihood.storeSiteLikelihoods(False)
lnL = core.calcLnLikelihood()
if self.opts.store_site_likes:
self.opts.pattern_counts = core.likelihood.getPatternCounts()
self.opts.char_to_pattern = core.likelihood.getCharIndexToPatternIndex()
self.opts.site_likes = core.likelihood.getSiteLikelihoods()
self.opts.site_uf = core.likelihood.getSiteUF()
return lnL
| 39.733333
| 131
| 0.576622
| 3,341
| 0.934284
| 0
| 0
| 0
| 0
| 0
| 0
| 837
| 0.23406
|
67659e478a5e5c7c61b17fe40c449153891a0e5c
| 291
|
py
|
Python
|
app/models.py
|
dangger/awesome-flask-todo
|
8eb2ec5357a028a76015035940d6f7844623ff98
|
[
"MIT"
] | null | null | null |
app/models.py
|
dangger/awesome-flask-todo
|
8eb2ec5357a028a76015035940d6f7844623ff98
|
[
"MIT"
] | null | null | null |
app/models.py
|
dangger/awesome-flask-todo
|
8eb2ec5357a028a76015035940d6f7844623ff98
|
[
"MIT"
] | null | null | null |
from app import db
import datetime
from flask_mongoengine.wtf import model_form
class Todo(db.Document):
content = db.StringField(required=True, max_length=20)
time = db.DateTimeField(default=datetime.datetime.now())
status = db.IntField(default=0)
TodoForm = model_form(Todo)
| 26.454545
| 60
| 0.766323
| 180
| 0.618557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6767a8053401b419268988cde796fcad2ed726b3
| 157
|
py
|
Python
|
Python/Mundo01/teste/teste2.py
|
eStev4m/CursoPython
|
8b52a618e67c80d66518ef91c1d4596a2bfddc22
|
[
"MIT"
] | null | null | null |
Python/Mundo01/teste/teste2.py
|
eStev4m/CursoPython
|
8b52a618e67c80d66518ef91c1d4596a2bfddc22
|
[
"MIT"
] | null | null | null |
Python/Mundo01/teste/teste2.py
|
eStev4m/CursoPython
|
8b52a618e67c80d66518ef91c1d4596a2bfddc22
|
[
"MIT"
] | null | null | null |
dia = int(input('Dia = '))
mes = str(input('Mês = '))
ano = int(input('Ano = '))
print('Você nasceu no dia {} de {} de {}. Correto?' .format(dia, mes, ano))
| 31.4
| 75
| 0.56051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.446541
|
6767ec882d17e62fa49469a5e7630e14c022c42d
| 16,089
|
py
|
Python
|
firebirdsql/services.py
|
dand-oss/pyfirebirdsql
|
1b8148f8937929cdd74774fef2611dd55ea6a757
|
[
"BSD-2-Clause"
] | 31
|
2015-03-28T09:43:53.000Z
|
2022-02-27T18:20:06.000Z
|
firebirdsql/services.py
|
dand-oss/pyfirebirdsql
|
1b8148f8937929cdd74774fef2611dd55ea6a757
|
[
"BSD-2-Clause"
] | 24
|
2015-01-16T03:00:33.000Z
|
2022-02-08T00:06:05.000Z
|
firebirdsql/services.py
|
dand-oss/pyfirebirdsql
|
1b8148f8937929cdd74774fef2611dd55ea6a757
|
[
"BSD-2-Clause"
] | 21
|
2015-01-15T23:00:26.000Z
|
2020-11-04T08:30:13.000Z
|
##############################################################################
# Copyright (c) 2009-2021, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
from firebirdsql.consts import * # noqa
from firebirdsql.utils import * # noqa
from firebirdsql.fbcore import Connection
class Services(Connection):
def sweep(self, database_name, callback=None):
spb = bs([isc_spb_rpr_validate_db | isc_spb_rpr_sweep_db])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
optionMask = 0
optionMask |= 0x02
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def bringOnline(self, database_name, callback=None):
spb = bs([isc_action_svc_properties])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
optionMask = 0
optionMask |= 0x0200
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def shutdown(
self, database_name, timeout=0, shutForce=True,
shutDenyNewAttachments=False, shutDenyNewTransactions=False,
callback=None
):
spb = bs([isc_action_svc_properties])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
if shutForce:
spb += bs([isc_spb_prp_shutdown_db]) + int_to_bytes(timeout, 4)
if shutDenyNewAttachments:
spb += bs([isc_spb_prp_deny_new_attachments]) + int_to_bytes(timeout, 4)
if shutDenyNewTransactions:
spb += bs([isc_spb_prp_deny_new_transactions]) + int_to_bytes(timeout, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def repair(
self, database_name,
readOnlyValidation=True, ignoreChecksums=False,
killUnavailableShadows=False, mendDatabase=False,
validateDatabase=False, validateRecordFragments=False, callback=None
):
spb = bs([isc_action_svc_repair])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
optionMask = 0
if readOnlyValidation:
optionMask |= isc_spb_rpr_check_db
if ignoreChecksums:
optionMask |= isc_spb_rpr_ignore_checksum
if killUnavailableShadows:
optionMask |= isc_spb_rpr_kill_shadows
if mendDatabase:
optionMask |= isc_spb_rpr_mend_db
if validateDatabase:
optionMask |= isc_spb_rpr_validate_db
if validateRecordFragments:
optionMask |= isc_spb_rpr_full
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def backup_database(
self, database_name, backup_filename,
transportable=True, metadataOnly=False, garbageCollect=True,
ignoreLimboTransactions=False, ignoreChecksums=False,
convertExternalTablesToInternalTables=True, expand=False, callback=None
):
spb = bs([isc_action_svc_backup])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
s = self.str_to_bytes(backup_filename)
spb += bs([isc_spb_bkp_file]) + int_to_bytes(len(s), 2) + s
optionMask = 0
if ignoreChecksums:
optionMask |= isc_spb_bkp_ignore_checksums
if ignoreLimboTransactions:
optionMask |= isc_spb_bkp_ignore_limbo
if metadataOnly:
optionMask |= isc_spb_bkp_metadata_only
if not garbageCollect:
optionMask |= isc_spb_bkp_no_garbage_collect
if not transportable:
optionMask |= isc_spb_bkp_non_transportable
if convertExternalTablesToInternalTables:
optionMask |= isc_spb_bkp_convert
if expand:
optionMask |= isc_spb_bkp_expand
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
if callback:
spb += bs([isc_spb_verbose])
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def restore_database(
self, restore_filename, database_name,
replace=False, create=False, deactivateIndexes=False,
doNotRestoreShadows=False, doNotEnforceConstraints=False,
commitAfterEachTable=False, useAllPageSpace=False, pageSize=None,
cacheBuffers=None, callback=None
):
spb = bs([isc_action_svc_restore])
s = self.str_to_bytes(restore_filename)
spb += bs([isc_spb_bkp_file]) + int_to_bytes(len(s), 2) + s
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
optionMask = 0
if replace:
optionMask |= isc_spb_res_replace
if create:
optionMask |= isc_spb_res_create
if deactivateIndexes:
optionMask |= isc_spb_res_deactivate_idx
if doNotRestoreShadows:
optionMask |= isc_spb_res_no_shadow
if doNotEnforceConstraints:
optionMask |= isc_spb_res_no_validity
if commitAfterEachTable:
optionMask |= isc_spb_res_one_at_a_time
if useAllPageSpace:
optionMask |= isc_spb_res_use_all_space
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
if pageSize:
spb += bs([isc_spb_res_page_size]) + int_to_bytes(pageSize, 4)
if cacheBuffers:
spb += bs([isc_spb_res_buffers]) + int_to_bytes(cacheBuffers, 4)
if callback:
spb += bs([isc_spb_verbose])
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_start(self, name=None, cfg=None, callback=None):
spb = bs([isc_action_svc_trace_start])
if name:
s = self.str_to_bytes(name)
spb += bs([isc_spb_trc_name]) + int_to_bytes(len(s), 2) + s
if cfg:
s = self.str_to_bytes(cfg)
spb += bs([isc_spb_trc_cfg]) + int_to_bytes(len(s), 2) + s
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_stop(self, id, callback=None):
id = int(id)
spb = bs([isc_action_svc_trace_stop])
spb += bs([isc_spb_trc_id]) + int_to_bytes(id, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_suspend(self, id, callback=None):
id = int(id)
spb = bs([isc_action_svc_trace_suspend])
spb += bs([isc_spb_trc_id]) + int_to_bytes(id, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_resume(self, id, callback=None):
id = int(id)
spb = bs([isc_action_svc_trace_resume])
spb += bs([isc_spb_trc_id]) + int_to_bytes(id, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_list(self, callback=None):
spb = bs([isc_action_svc_trace_list])
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def _getIntegerVal(self, item_id):
self._op_service_info(bs([]), bs([item_id]))
(h, oid, buf) = self._op_response()
assert byte_to_int(buf[0]) == item_id
return byte_to_int(buf[1])
def _getStringVal(self, item_id):
self._op_service_info(bs([]), bs([item_id]))
(h, oid, buf) = self._op_response()
assert byte_to_int(buf[0]) == item_id
ln = bytes_to_int(buf[1:3])
return self.bytes_to_str(buf[3:3+ln])
def _getSvrDbInfo(self):
self._op_service_info(bs([]), bs([isc_info_svc_svr_db_info]))
(h, oid, buf) = self._op_response()
assert byte_to_int(buf[0]) == isc_info_svc_svr_db_info
db_names = []
i = 1
while i < len(buf) and byte_to_int(buf[i]) != isc_info_flag_end:
if byte_to_int(buf[i]) == isc_spb_num_att:
num_attach = bytes_to_int(buf[i+1:i+5])
i += 5
elif byte_to_int(buf[i]) == isc_spb_num_db:
bytes_to_int(buf[7:11]) # db_num
i += 5
elif byte_to_int(buf[i]) == isc_spb_dbname:
ln = bytes_to_int(buf[i+1:i+3])
db_name = self.bytes_to_str(buf[i+3:i+3+ln])
db_names.append(db_name)
i += 3 + ln
return (num_attach, db_names)
def _getLogLines(self, spb):
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
logs = ''
while True:
self._op_service_info(bs([]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
ln = bytes_to_int(buf[1:2])
logs += self.bytes_to_str(buf[3:3+ln]) + '\n'
return logs
def getServiceManagerVersion(self):
return self._getIntegerVal(isc_info_svc_version)
def getServerVersion(self):
return self._getStringVal(isc_info_svc_server_version)
def getArchitecture(self):
return self._getStringVal(isc_info_svc_implementation)
def getHomeDir(self):
return self._getStringVal(isc_info_svc_get_env)
def getSecurityDatabasePath(self):
return self._getStringVal(isc_info_svc_user_dbpath)
def getLockFileDir(self):
return self._getStringVal(isc_info_svc_get_env_lock)
def getCapabilityMask(self):
return self._getIntegerVal(isc_info_svc_capabilities)
def getMessageFileDir(self):
return self._getStringVal(isc_info_svc_get_env_msg)
def getConnectionCount(self):
return self._getSvrDbInfo()[0]
def getAttachedDatabaseNames(self):
return self._getSvrDbInfo()[1]
def getLog(self):
spb = bs([isc_action_svc_get_fb_log])
return self._getLogLines(spb)
def getStatistics(
self, dbname, showOnlyDatabaseLogPages=False,
showOnlyDatabaseHeaderPages=False,
showUserDataPages=True,
showUserIndexPages=True,
showSystemTablesAndIndexes=False
):
optionMask = 0
if showUserDataPages:
optionMask |= isc_spb_sts_data_pages
if showOnlyDatabaseLogPages:
optionMask |= isc_spb_sts_db_log
if showOnlyDatabaseHeaderPages:
optionMask |= isc_spb_sts_hdr_pages
if showUserIndexPages:
optionMask |= isc_spb_sts_idx_pages
if showSystemTablesAndIndexes:
optionMask |= isc_spb_sts_sys_relations
spb = bs([isc_spb_res_length])
s = self.str_to_bytes(dbname)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
return self._getLogLines(spb)
def connect(**kwargs):
kwargs['is_services'] = True
return Services(**kwargs)
| 38.675481
| 85
| 0.604139
| 14,306
| 0.889179
| 0
| 0
| 0
| 0
| 0
| 0
| 1,576
| 0.097955
|
6768a012fa3b71acafcce223de6b3ec16122e616
| 763
|
py
|
Python
|
source/utils/converters.py
|
GoBoopADog/maelstrom
|
fce79fa964578dfee5d7beb4ec440deec5f8f25d
|
[
"MIT"
] | 2
|
2021-03-02T15:37:01.000Z
|
2021-04-21T10:45:32.000Z
|
source/utils/converters.py
|
GoBoopADog/maelstrom
|
fce79fa964578dfee5d7beb4ec440deec5f8f25d
|
[
"MIT"
] | 1
|
2021-02-28T20:26:04.000Z
|
2021-03-01T17:55:55.000Z
|
source/utils/converters.py
|
GoBoopADog/maelstrom
|
fce79fa964578dfee5d7beb4ec440deec5f8f25d
|
[
"MIT"
] | 4
|
2021-02-28T04:08:03.000Z
|
2021-09-05T17:16:44.000Z
|
from discord.ext import commands
from typing import Union
from types import ModuleType
from .context import Context
class SourceConverter(commands.Converter):
"""A Converter that converts a string to a Command, Cog or Extension."""
async def convert(
self, ctx: Context, argument: str
) -> Union[commands.Command, commands.Cog, ModuleType]:
if command := ctx.bot.get_command(argument):
if command.name == "help":
return ctx.bot.help_command
return command
if cog := ctx.bot.get_cog(argument):
return cog
if extension := ctx.bot.extensions.get(argument):
return extension
raise commands.BadArgument("Not a valid Command, Cog nor Extension.")
| 30.52
| 77
| 0.655308
| 643
| 0.842726
| 0
| 0
| 0
| 0
| 518
| 0.678899
| 119
| 0.155963
|
6769164b195db417c53c603f5e118948e48af7f8
| 8,230
|
py
|
Python
|
credstuffer/db/creator.py
|
bierschi/credstuffer
|
1a37aef30654028885d0d2caa456f38f58af4def
|
[
"MIT"
] | null | null | null |
credstuffer/db/creator.py
|
bierschi/credstuffer
|
1a37aef30654028885d0d2caa456f38f58af4def
|
[
"MIT"
] | null | null | null |
credstuffer/db/creator.py
|
bierschi/credstuffer
|
1a37aef30654028885d0d2caa456f38f58af4def
|
[
"MIT"
] | 1
|
2020-10-05T12:10:32.000Z
|
2020-10-05T12:10:32.000Z
|
import logging
from credstuffer.db.connector import DBConnector
from credstuffer.exceptions import DBCreatorError
class Database:
""" class Database to build a sql string for Database creation
USAGE:
Database(name="web")
"""
def __init__(self, name):
self.logger = logging.getLogger('credstuffer')
self.logger.info('create class Database')
self.name = name
def __str__(self):
""" string representation of database creation
:return: sql string for database creation
"""
return "create database {}".format(self.name)
def __repr__(self):
""" string representation of database object
:return: sql string for database creation
"""
return "create database {}".format(self.name)
class Schema:
""" class Schema to build a sql string for Schema creation
USAGE:
Schema(name="credstuffer")
"""
def __init__(self, name):
self.logger = logging.getLogger('credstuffer')
self.name = name
self.sql_schema = "create schema if not exists {}".format(self.name)
def __str__(self):
""" string representation of schema creation
:return: sql string for schema creation
"""
return self.sql_schema
def __repr__(self):
""" string representation of schema object
:return: sql string for schema creation
"""
return self.sql_schema
class Table:
""" class Table to build a sql string for Table creation
USAGE:
Table("page", Column('id', 'int', False, False), schema="web")
Table("page")
"""
def __init__(self, name, *columns, schema=None):
self.logger = logging.getLogger('credstuffer')
self.name = name
self.schema = schema
self.sql_table = ""
if schema is None:
self.sql_table = "create table if not exists {} ".format(self.name)
else:
self.sql_table = "create table if not exists {}.{} ".format(self.schema, self.name)
if len(columns) == 0:
self.sql_table = self.sql_table + "()"
elif len(columns) == 1:
self.sql_table = self.sql_table + str(columns).replace(',', '')
elif len(columns) > 1:
self.sql_table = self.sql_table + str(columns)
def __call__(self, *columns):
""" implicit method to invoke table instances to create new sql strings with variable *columns objects
:param columns: objects of type Column()
:return: sql table creation string
"""
if len(columns) == 1:
self.sql_table = self.sql_table + str(columns).replace(',', '')
return self.sql_table
elif len(columns) > 1:
self.sql_table = self.sql_table + str(columns)
return self.sql_table
def __str__(self):
""" string representation of table creation
:return: sql string for table creation
"""
return self.sql_table
def __repr__(self):
""" string representation of table object
:return: sql string for table creation
"""
return self.sql_table
class Column:
""" class Column to build a sql string for Column creation
USAGE:
Column(name='id', type='int', not_null=False, prim_key=True)
"""
def __init__(self, name, type, not_null=False, prim_key=False, exist_table=False, table_name=None, schema=None):
self.logger = logging.getLogger('credstuffer')
self.name = name
self.type = type
self.not_null = not_null
self.prim_key = prim_key
self.exist_table = exist_table
self.table_name = table_name
self.schema = schema
if self.schema is None:
self.alter_table = "alter table {} ".format(self.table_name)
else:
self.alter_table = "alter table {}.{} ".format(self.schema, self.table_name)
self.name_type_str = "{} {} ".format(self.name, self.type)
self.add_column_str = "add column "
self.primary_key_str = "primary key "
self.not_null_str = "not null "
self.if_not_exists = "if not exists "
self.sql_primary_key = self.name_type_str + self.primary_key_str
self.sql_not_null = self.name_type_str + self.not_null_str
self.sql_exist_table = self.alter_table + self.add_column_str + self.if_not_exists + self.name_type_str
self.sql_exist_table_prim_key = self.sql_exist_table + self.primary_key_str
self.sql_exit_table_not_null = self.sql_exist_table + self.not_null_str
def __str__(self):
""" string representation of column creation
:return: sql string for column creation
"""
if self.prim_key and not self.not_null and not self.exist_table:
return self.sql_primary_key
if self.not_null and not self.prim_key and not self.exist_table:
return self.sql_not_null
if self.exist_table and not self.prim_key and not self.not_null:
return self.sql_exist_table
if self.exist_table and self.prim_key:
return self.sql_exist_table_prim_key
if self.exist_table and self.not_null:
return self.sql_exit_table_not_null
else:
return "{} {} ".format(self.name, self.type)
def __repr__(self):
""" string representation of column object
:return: sql string for column creation
"""
if self.prim_key and not self.not_null and not self.exist_table:
return self.sql_primary_key
if self.not_null and not self.prim_key and not self.exist_table:
return self.sql_not_null
if self.exist_table and not self.prim_key and not self.not_null:
return self.sql_exist_table
if self.exist_table and self.prim_key:
return self.sql_exist_table_prim_key
if self.exist_table and self.not_null:
return self.sql_exit_table_not_null
else:
return "{} {} ".format(self.name, self.type)
class DBCreator(DBConnector):
""" class DBCreator to build database, table or column
USAGE:
creator = DBCreator()
creator.connect(host, port, username, password, dbname, minConn=1, maxConn=10)
creator.build(obj=Database(name="web"))
creator.build(obj=Table("gps", Column('did', 'text'), Column('ts', 'text')))
"""
def __init__(self):
self.logger = logging.getLogger('credstuffer')
self.logger.info('create class DBCreator')
# init connector base class
super().__init__()
def build(self, obj):
""" build object depending on given object 'obj'
"""
if isinstance(obj, Database):
self.__database(obj)
elif isinstance(obj, Schema):
self.__schema(obj)
elif isinstance(obj, Table):
self.__table(obj)
elif isinstance(obj, Column):
self.__column(obj)
else:
raise DBCreatorError("Provide either a Database, Schema, Table or Column object")
def __database(self, database_obj):
""" creates a database
:param database_obj: database object
"""
with self.get_cursor(autocommit=True) as cursor:
cursor.execute(str(database_obj))
def __schema(self, schema_obj):
"""creates a Schema
:param schema_obj: schema object
"""
with self.get_cursor() as cursor:
cursor.execute(str(schema_obj))
def __table(self, table_obj):
""" creates a table
:param table_obj: table object
"""
with self.get_cursor() as cursor:
cursor.execute(str(table_obj))
def __column(self, column_obj):
""" creates a column
:param column_obj: column object
"""
# only possible in existing table
if column_obj.exist_table:
with self.get_cursor() as cursor:
cursor.execute(str(column_obj))
else:
raise DBCreatorError("Creation of column object is only possible in existing tables")
| 31.776062
| 116
| 0.615188
| 8,100
| 0.984204
| 0
| 0
| 0
| 0
| 0
| 0
| 2,872
| 0.348967
|
67692e8a3e167b8004f399714ed1c11e30cf9ebb
| 897
|
py
|
Python
|
src/poetry/core/masonry/builder.py
|
DavidVujic/poetry-core
|
d7b5572aabc762f138e4d15f461f13a28c8258d6
|
[
"MIT"
] | null | null | null |
src/poetry/core/masonry/builder.py
|
DavidVujic/poetry-core
|
d7b5572aabc762f138e4d15f461f13a28c8258d6
|
[
"MIT"
] | null | null | null |
src/poetry/core/masonry/builder.py
|
DavidVujic/poetry-core
|
d7b5572aabc762f138e4d15f461f13a28c8258d6
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from poetry.core.poetry import Poetry
class Builder:
def __init__(self, poetry: Poetry) -> None:
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.core.masonry.builders.wheel import WheelBuilder
self._poetry = poetry
self._formats = {
"sdist": SdistBuilder,
"wheel": WheelBuilder,
}
def build(self, fmt: str, executable: str | Path | None = None) -> None:
if fmt in self._formats:
builders = [self._formats[fmt]]
elif fmt == "all":
builders = list(self._formats.values())
else:
raise ValueError(f"Invalid format: {fmt}")
for builder in builders:
builder(self._poetry, executable=executable).build()
| 27.181818
| 76
| 0.630992
| 738
| 0.822742
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.047938
|
676b193319b9f06972fcafcb462e36e367c9d59d
| 659
|
py
|
Python
|
migrations/versions/429d596c43a7_users_country.py
|
bilginfurkan/Anonimce
|
7d73c13ae8d5c873b6863878370ad83ec9ee5acc
|
[
"Apache-2.0"
] | 2
|
2021-02-15T12:56:58.000Z
|
2021-02-21T12:38:47.000Z
|
migrations/versions/429d596c43a7_users_country.py
|
bilginfurkan/Anonimce
|
7d73c13ae8d5c873b6863878370ad83ec9ee5acc
|
[
"Apache-2.0"
] | null | null | null |
migrations/versions/429d596c43a7_users_country.py
|
bilginfurkan/Anonimce
|
7d73c13ae8d5c873b6863878370ad83ec9ee5acc
|
[
"Apache-2.0"
] | null | null | null |
"""users.country
Revision ID: 429d596c43a7
Revises: 77e0c0edaa04
Create Date: 2020-10-23 21:26:55.598146
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '429d596c43a7'
down_revision = '77e0c0edaa04'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('country', sa.String(length=4), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'country')
# ### end Alembic commands ###
| 22.724138
| 84
| 0.691958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 392
| 0.594841
|
676bce0736ccad204cb3cef87d200632b75f487f
| 4,535
|
py
|
Python
|
tweet_processor.py
|
cristynhoward/connectfour
|
a6727cbe47696a0a3dd278a3929d81dc6e158999
|
[
"MIT"
] | 1
|
2018-06-28T09:45:59.000Z
|
2018-06-28T09:45:59.000Z
|
tweet_processor.py
|
cristynhoward/connectfour
|
a6727cbe47696a0a3dd278a3929d81dc6e158999
|
[
"MIT"
] | null | null | null |
tweet_processor.py
|
cristynhoward/connectfour
|
a6727cbe47696a0a3dd278a3929d81dc6e158999
|
[
"MIT"
] | null | null | null |
""" Module for processing mentions of the bot via the Twitter API.
"""
from ConnectFourGame import *
from databasehelpers import *
from helpers import *
from minimax import *
def process_mentions():
""" Scan through recent mentions and send them to be processed.
"""
api = get_twitter_api()
first = True
since_id = get_read_since()
newest_tweet_id = None
for tweet in limit_handled(tweepy.Cursor(api.mentions_timeline).items()):
if int(tweet.id_str) <= int(since_id): # if tweet has already been processed...
if first is True: # & we haven't seen any other tweets yet:
log("No new mentions to process.")
else: # we have processed other tweets, thus:
log("Processed mentions from " + str(since_id) + " to " + str(newest_tweet_id) + ".")
set_read_since(newest_tweet_id)
return
if first is True: # Collect ID of first tweet processed.
newest_tweet_id = tweet.id_str
first = False
if tweet.in_reply_to_status_id is None: # Check if mention starts a game thread.
result_newgame = try_newgame(tweet)
if result_newgame is not None:
record_outgoing_tweet(result_newgame)
else: # Check if mention is a valid play on an existing game thread.
doc = get_active_game(str(tweet.in_reply_to_status_id))
if doc is not None:
result_game = try_playturn(tweet, doc)
if result_game is not None:
record_outgoing_tweet(result_game)
remove_active_game(str(tweet.in_reply_to_status_id))
def try_newgame(tweet):
""" Process a single attempted new game.
:param tweet: The tweet to be processed as new game.
:type tweet: Tweepy.Status, dict
:return: The resulting new game, or None if no new game made.
:rtype: None, ConnectFourGame
"""
if tweet.in_reply_to_status_id is None: # not reply to another tweet
if tweet.text.split(" ")[1] == "new": # second word is 'new'
user1 = tweet.user.screen_name
# TWO PLAYER GAME
if len(tweet.entities[u'user_mentions']) > 1:
user2 = tweet.entities[u'user_mentions'][1][u'screen_name']
newgame = ConnectFourGame.new_game(get_next_game_id(), user1, user2, int(tweet.id_str))
log("Created two player game: " + newgame.game_to_string())
return newgame
# ONE PLAYER GAME
if tweet.text.split(" ")[2] == "singleplayer":
user2 = " mimimax_ai_alpha"
newgame = ConnectFourGame.new_game(get_next_game_id(), user1, user2, int(tweet.id_str))
newgame.play_turn(int(tweet.id_str), minimax(newgame, 3))
log("Created one player game: " + newgame.game_to_string())
return newgame
def try_playturn(tweet, doc):
""" Process a single tweet as an attempted move on an open game.
:param tweet: The tweet to be processed as an attempted move on an open game.
:type tweet: Tweepy.Status, dict
:param doc: The database item storing the game onto which the turn is played.
:type doc: dict
:return: The resulting game after the move is played, or None if move not played.
:rtype: ConnectFourGame, None
"""
game = ConnectFourGame.game_from_string(doc["game"])
active_user = game.user2
if game.user1_is_playing == 1:
active_user = game.user1
move_index = 2
if game.user1 == game.user2 or game.user2 == " mimimax_ai_alpha":
move_index = 1
tweet_text = tweet.text.split(" ")
if len(tweet_text) >= move_index + 1:
column_played = tweet_text[move_index]
if any(column_played == s for s in ["1", "2", "3", "4", "5", "6", "7"]):
if (tweet.user.screen_name == active_user) & game.can_play(int(column_played)):
# PLAY TURN
game.play_turn(int(tweet.id_str), int(column_played))
log(active_user + " played a " + column_played + " resulting in game: " + game.game_to_string())
if game.user2 == ' mimimax_ai_alpha':
ai_move = minimax(game, 3)
game.play_turn(int(tweet.id_str), ai_move)
log("mimimax_ai_v1 played a " + str(ai_move) + " resulting in game: " + game.game_to_string())
return game
if __name__ == '__main__':
process_mentions()
| 39.780702
| 114
| 0.613892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,521
| 0.335391
|
676c11480ace0b3ea4cda5237879a07a2c1fe362
| 3,448
|
py
|
Python
|
code/seasonality.py
|
geangohn/RecSys
|
f53d0322fed414caa820cebf23bef5a0a9237517
|
[
"MIT"
] | 2
|
2019-07-22T09:42:25.000Z
|
2021-03-31T09:29:29.000Z
|
code/seasonality.py
|
geangohn/RecSys
|
f53d0322fed414caa820cebf23bef5a0a9237517
|
[
"MIT"
] | null | null | null |
code/seasonality.py
|
geangohn/RecSys
|
f53d0322fed414caa820cebf23bef5a0a9237517
|
[
"MIT"
] | null | null | null |
import pandas as pd
def get_seasonality_weekly(bills, date_column='dates', group_column='level_4_name',
regular_only=False, promo_fact_column=None):
bills['week'] = pd.to_datetime(bills[date_column]).dt.week
bills['year'] = pd.to_datetime(bills[date_column]).dt.year
# - Группируем по неделя-год, суммируем. Группируем по неделям, усредняем. (Если данные неравномерные)
if not regular_only:
num_per_week = bills.groupby([group_column, 'week', 'year'])[group_column].count().reset_index(name='num_sold')
num_per_week = num_per_week.groupby([group_column, 'week'])['num_sold'].mean().reset_index(name='num_sold')
else:
# - Выбираем только регулярные продажи, считаем кол-во продаж и кол-во plu продававшихся регулярно на неделе
num_per_week = bills[bills[promo_fact_column] == 0].groupby([group_column, 'week', 'year']).agg(
{group_column: 'count', 'PLU_ID': 'nunique'})
num_per_week = num_per_week.rename(columns={group_column: 'total_sold', 'PLU_ID': 'unique_plu'}).reset_index()
# - Берем среднее по кол-ву рег. продаж и кол-ву рег. PLU по неделям между годами
num_per_week = num_per_week.groupby([group_column, 'week'])[['total_sold', 'unique_plu']].mean().reset_index()
# - Считаем кол-во регулярных продаж на кол-во рег. PLU (другими словами, если будет много товаров в категории
# - На промо, то мы всё равно получим адекватную цифру.
# - +10 - регуляризация
num_per_week['num_sold'] = num_per_week['total_sold'] / (num_per_week['unique_plu']+10)
num_per_week.drop(['total_sold', 'unique_plu'], axis=1, inplace=True)
# - Делаем таблицу в которой есть все Категории и для каждого есть 52 недели
new_table = pd.concat(
[pd.DataFrame({group_column: x, 'week': [x + 1 for x in range(52)]}) for x in bills[group_column].unique()])
# - Добавляем туда фактические продажи и если продаж нет то заполняем нулями
new_table = new_table.merge(num_per_week, on=[group_column, 'week'], how='left').fillna(0)
# - Добавляем общее кол-во проданных PLU за всё время
total_sold = new_table.groupby([group_column])['num_sold'].sum().reset_index(name='total_sold')
new_table = new_table.merge(total_sold, on=group_column, how='left')
# - Добавляем кол-во проданных на следующей и предыдущей неделе
new_table['num_sold_prev'] = new_table.sort_values('week').groupby([group_column]).num_sold.shift(1)
new_table['num_sold_next'] = new_table.sort_values('week').groupby([group_column]).num_sold.shift(-1)
# - Обрабатываем граничные условия (52 и 1 неделя года)
plu_52_week_sales = dict(new_table[new_table['week'] == 52].set_index([group_column])['num_sold'])
plu_1_week_sales = dict(new_table[new_table['week'] == 1].set_index([group_column])['num_sold'])
new_table.loc[new_table['week'] == 1, 'num_sold_prev'] = new_table[new_table['week'] == 1][group_column].map(
lambda x: plu_52_week_sales[x])
new_table.loc[new_table['week'] == 52, 'num_sold_next'] = new_table[new_table['week'] == 52][group_column].map(
lambda x: plu_1_week_sales[x])
# - Считаем скользящее среднее
new_table['rolling_average'] = (new_table['num_sold_prev'] + new_table['num_sold'] + new_table['num_sold_next']) / \
(3 * new_table['total_sold'])
return new_table[[group_column, 'week', 'rolling_average']]
| 74.956522
| 120
| 0.690255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,967
| 0.482108
|
676d15fe9000290c81a06864a2972f44722d480f
| 1,729
|
py
|
Python
|
discord_api/applications.py
|
tuna2134/discord-api.py
|
0e5e9f469d852f81e6fc0b561c54a78ea6fe8fcb
|
[
"MIT"
] | 10
|
2021-11-30T06:22:20.000Z
|
2021-12-16T00:36:14.000Z
|
discord_api/applications.py
|
tuna2134/discord-api.py
|
0e5e9f469d852f81e6fc0b561c54a78ea6fe8fcb
|
[
"MIT"
] | 5
|
2021-12-03T10:21:15.000Z
|
2022-01-18T11:08:48.000Z
|
discord_api/applications.py
|
tuna2134/discord-api.py
|
0e5e9f469d852f81e6fc0b561c54a78ea6fe8fcb
|
[
"MIT"
] | 3
|
2021-12-10T08:34:28.000Z
|
2022-01-21T11:59:46.000Z
|
from .command import Command, ApiCommand
class Application:
def __init__(self, client):
self.client = client
self.http = client.http
self.__commands = []
async def fetch_commands(self) -> List[ApiCommand]:
"""
This can fetch discord application commands from discord api.
Returns
-------
List[Command] : list of command.
"""
datas = await self.http.fetch_commands()
return [ApiCommand.from_dict(self, data) for data in datas]
def _check_command(self, command, api):
if command.description != api.description:
return True
else:
return False
async def setup_command(self) -> None:
"""
set up a application command.
"""
apis = await self.fetch_commands()
cmds = []
for command in self.__commands:
cmds.append(command.name)
update = False
for api in apis:
if api.name in cmds:
if api.name == command.name:
break
else:
await api.delete()
else:
update = True
if update:
data = await self.http.add_command(command)
def add_command(self, command:Command) -> None:
"""
This can add discord application commannd.
Examples
--------
```python
from discord_api import Client, Command
client = Client()
client.add_command(Command(name = "ping", description = "pong"))
client.run("ToKeN")
```
"""
self.__commands.append(command)
| 28.816667
| 72
| 0.520532
| 1,686
| 0.97513
| 0
| 0
| 0
| 0
| 947
| 0.547715
| 534
| 0.308849
|
676d466a108d99b100b2c3a5a8c5c61b4428733b
| 280
|
py
|
Python
|
SinglePackage/tests/test_single.py
|
CJosephides/PythonApplicationStructures
|
b82385f7a35f3097eac08011d24d9d1429cee171
|
[
"RSA-MD"
] | 1
|
2019-02-05T11:45:11.000Z
|
2019-02-05T11:45:11.000Z
|
SinglePackage/tests/test_single.py
|
CJosephides/PythonApplicationStructures
|
b82385f7a35f3097eac08011d24d9d1429cee171
|
[
"RSA-MD"
] | null | null | null |
SinglePackage/tests/test_single.py
|
CJosephides/PythonApplicationStructures
|
b82385f7a35f3097eac08011d24d9d1429cee171
|
[
"RSA-MD"
] | null | null | null |
from unittest import TestCase, main
from single_package.single import Single
class SingleTests(TestCase):
def setUp(self):
self.single = Single()
def test_Single(self):
self.assertIsInstance(self.single, Single)
if __name__ == "__main__":
main()
| 17.5
| 50
| 0.692857
| 160
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.035714
|
676d7655b19bd0498b46ef17e54ab70538bcef0d
| 1,563
|
py
|
Python
|
tests/spot/sub_account/test_sub_account_deposit_address.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 512
|
2021-06-15T08:52:44.000Z
|
2022-03-31T09:49:53.000Z
|
tests/spot/sub_account/test_sub_account_deposit_address.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 75
|
2021-06-20T13:49:50.000Z
|
2022-03-30T02:45:31.000Z
|
tests/spot/sub_account/test_sub_account_deposit_address.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 156
|
2021-06-18T11:56:36.000Z
|
2022-03-29T16:34:22.000Z
|
import responses
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
from binance.lib.utils import encoded_string
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
params = {
"email": "alice@test.com",
"coin": "BNB",
"network": "BNB",
"recvWindow": 1000,
}
def test_sub_account_deposit_address_without_email():
"""Tests the API endpoint to get deposit address without email"""
params = {"email": "", "coin": "BNB", "network": "BNB", "recvWindow": 1000}
client = Client(key, secret)
client.sub_account_deposit_address.when.called_with(**params).should.throw(
ParameterRequiredError
)
def test_sub_account_deposit_address_without_coin():
"""Tests the API endpoint to get deposit address without coin"""
params = {
"email": "alice@test.com",
"coin": "",
"network": "BNB",
"recvWindow": 1000,
}
client = Client(key, secret)
client.sub_account_deposit_address.when.called_with(**params).should.throw(
ParameterRequiredError
)
@mock_http_response(
responses.GET,
"/sapi/v1/capital/deposit/subAddress\\?" + encoded_string(params),
mock_item,
200,
)
def test_sub_account_deposit_address():
"""Tests the API endpoint to get deposit address"""
client = Client(key, secret)
response = client.sub_account_deposit_address(**params)
response.should.equal(mock_item)
| 26.05
| 79
| 0.690339
| 0
| 0
| 0
| 0
| 363
| 0.232246
| 0
| 0
| 415
| 0.265515
|
676e003414de3f2f5ddecf2d26540316287d4189
| 6,232
|
py
|
Python
|
tools/telemetry/telemetry/results/page_test_results.py
|
Fusion-Rom/android_external_chromium_org
|
d8b126911c6ea9753e9f526bee5654419e1d0ebd
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2020-01-25T09:58:49.000Z
|
2020-01-25T09:58:49.000Z
|
tools/telemetry/telemetry/results/page_test_results.py
|
Fusion-Rom/android_external_chromium_org
|
d8b126911c6ea9753e9f526bee5654419e1d0ebd
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/telemetry/telemetry/results/page_test_results.py
|
Fusion-Rom/android_external_chromium_org
|
d8b126911c6ea9753e9f526bee5654419e1d0ebd
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2020-11-04T06:34:36.000Z
|
2020-11-04T06:34:36.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import traceback
from telemetry import value as value_module
from telemetry.results import page_run
from telemetry.results import progress_reporter as progress_reporter_module
from telemetry.value import failure
from telemetry.value import skip
class PageTestResults(object):
def __init__(self, output_stream=None, output_formatters=None,
progress_reporter=None, trace_tag=''):
"""
Args:
output_stream: The output stream to use to write test results.
output_formatters: A list of output formatters. The output
formatters are typically used to format the test results, such
as CsvOutputFormatter, which output the test results as CSV.
progress_reporter: An instance of progress_reporter.ProgressReporter,
to be used to output test status/results progressively.
trace_tag: A string to append to the buildbot trace
name. Currently only used for buildbot.
"""
# TODO(chrishenry): Figure out if trace_tag is still necessary.
super(PageTestResults, self).__init__()
self._output_stream = output_stream
self._progress_reporter = (
progress_reporter if progress_reporter is not None
else progress_reporter_module.ProgressReporter())
self._output_formatters = (
output_formatters if output_formatters is not None else [])
self._trace_tag = trace_tag
self._current_page_run = None
self._all_page_runs = []
self._representative_value_for_each_value_name = {}
self._all_summary_values = []
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if isinstance(v, collections.Container):
v = copy.copy(v)
setattr(result, k, v)
return result
@property
def all_page_specific_values(self):
values = []
for run in self._all_page_runs:
values += run.values
if self._current_page_run:
values += self._current_page_run.values
return values
@property
def all_summary_values(self):
return self._all_summary_values
@property
def current_page(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run.page
@property
def current_page_run(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run
@property
def all_page_runs(self):
return self._all_page_runs
@property
def pages_that_succeeded(self):
"""Returns the set of pages that succeeded."""
pages = set(run.page for run in self.all_page_runs)
pages.difference_update(self.pages_that_failed)
return pages
@property
def pages_that_failed(self):
"""Returns the set of failed pages."""
failed_pages = set()
for run in self.all_page_runs:
if run.failed:
failed_pages.add(run.page)
return failed_pages
@property
def failures(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, failure.FailureValue)]
@property
def skipped_values(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, skip.SkipValue)]
def _GetStringFromExcInfo(self, err):
return ''.join(traceback.format_exception(*err))
def WillRunPage(self, page):
assert not self._current_page_run, 'Did not call DidRunPage.'
self._current_page_run = page_run.PageRun(page)
self._progress_reporter.WillRunPage(self)
def DidRunPage(self, page, discard_run=False): # pylint: disable=W0613
"""
Args:
page: The current page under test.
discard_run: Whether to discard the entire run and all of its
associated results.
"""
assert self._current_page_run, 'Did not call WillRunPage.'
self._progress_reporter.DidRunPage(self)
if not discard_run:
self._all_page_runs.append(self._current_page_run)
self._current_page_run = None
def WillAttemptPageRun(self, attempt_count, max_attempts):
"""To be called when a single attempt on a page run is starting.
This is called between WillRunPage and DidRunPage and can be
called multiple times, one for each attempt.
Args:
attempt_count: The current attempt number, start at 1
(attempt_count == 1 for the first attempt, 2 for second
attempt, and so on).
max_attempts: Maximum number of page run attempts before failing.
"""
self._progress_reporter.WillAttemptPageRun(
self, attempt_count, max_attempts)
# Clear any values from previous attempts for this page run.
self._current_page_run.ClearValues()
def AddValue(self, value):
assert self._current_page_run, 'Not currently running test.'
self._ValidateValue(value)
# TODO(eakuefner/chrishenry): Add only one skip per pagerun assert here
self._current_page_run.AddValue(value)
self._progress_reporter.DidAddValue(value)
def AddSummaryValue(self, value):
assert value.page is None
self._ValidateValue(value)
self._all_summary_values.append(value)
def _ValidateValue(self, value):
assert isinstance(value, value_module.Value)
if value.name not in self._representative_value_for_each_value_name:
self._representative_value_for_each_value_name[value.name] = value
representative_value = self._representative_value_for_each_value_name[
value.name]
assert value.IsMergableWith(representative_value)
def PrintSummary(self):
self._progress_reporter.DidFinishAllTests(self)
for output_formatter in self._output_formatters:
output_formatter.Format(self)
def FindPageSpecificValuesForPage(self, page, value_name):
values = []
for value in self.all_page_specific_values:
if value.page == page and value.name == value_name:
values.append(value)
return values
def FindAllPageSpecificValuesNamed(self, value_name):
values = []
for value in self.all_page_specific_values:
if value.name == value_name:
values.append(value)
return values
| 33.869565
| 75
| 0.72914
| 5,789
| 0.928915
| 0
| 0
| 1,372
| 0.220154
| 0
| 0
| 1,736
| 0.278562
|
676fd905727818efa8eda82566b5e796e9f06ce8
| 11,273
|
py
|
Python
|
src/utils/gradcam.py
|
xmuyzz/IVContrast
|
f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c
|
[
"MIT"
] | 3
|
2022-02-23T09:05:45.000Z
|
2022-02-23T20:18:18.000Z
|
src/utils/gradcam.py
|
xmuyzz/IVContrast
|
f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c
|
[
"MIT"
] | null | null | null |
src/utils/gradcam.py
|
xmuyzz/IVContrast
|
f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c
|
[
"MIT"
] | null | null | null |
from tensorflow.keras.models import Model
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
import tensorflow as tf
import os
#---------------------------------------------------------------------------------
# get data
#---------------------------------------------------------------------------------
def data(input_channel, i, val_save_dir, test_save_dir):
### load train data based on input channels
if run_type == 'val':
if input_channel == 1:
fn = 'val_arr_1ch.npy'
elif input_channel == 3:
fn = 'val_arr_3ch.npy'
data = np.load(os.path.join(pro_data_dir, fn))
df = pd.read_csv(os.path.join(val_save_dir, 'val_pred_df.csv'))
elif run_type == 'test':
if input_channel == 1:
fn = 'test_arr_1ch.npy'
elif input_channel == 3:
fn = 'test_arr_3ch.npy'
data = np.load(os.path.join(pro_data_dir, fn))
df = pd.read_csv(os.path.join(test_save_dir, 'test_pred_df.csv'))
elif run_type == 'exval':
if input_channel == 1:
fn = 'exval_arr_1ch.npy'
elif input_channel == 3:
fn = 'exval_arr_3ch.npy'
data = np.load(os.path.join(pro_data_dir, fn))
df = pd.read_csv(os.path.join(exval_save_dir, 'exval_pred_df.csv'))
### load label
y_true = df['label']
y_pred_class = df['y_pred_class']
y_pred = df['y_pred']
ID = df['fn']
### find the ith image to show grad-cam map
img = data[i, :, :, :]
img = img.reshape((1, 192, 192, 3))
label = y_true[i]
pred_index = y_pred_class[i]
y_pred = y_pred[i]
ID = ID[i]
return img, label, pred_index, y_pred, ID
#------------------------------------------------------------------------------------
# find last conv layer
#-----------------------------------------------------------------------------------
def find_target_layer(model, saved_model):
# find the final conv layer by looping layers in reverse order
for layer in reversed(model.layers):
# check to see if the layer has a 4D output
if len(layer.output_shape) == 4:
return layer.name
raise ValueError("Could not find 4D layer. Cannot apply GradCAM.")
#----------------------------------------------------------------------------------
# calculate gradient class actiavtion map
#----------------------------------------------------------------------------------
def compute_heatmap(model, saved_model, image, pred_index, last_conv_layer):
"""
construct our gradient model by supplying (1) the inputs
to our pre-trained model, (2) the output of the (presumably)
final 4D layer in the network, and (3) the output of the
softmax activations from the model
"""
gradModel = Model(
inputs=[model.inputs],
outputs=[model.get_layer(last_conv_layer).output, model.output]
)
# record operations for automatic differentiation
with tf.GradientTape() as tape:
"""
cast the image tensor to a float-32 data type, pass the
image through the gradient model, and grab the loss
associated with the specific class index
"""
print(pred_index)
inputs = tf.cast(image, tf.float32)
print(image.shape)
last_conv_layer_output, preds = gradModel(inputs)
print(preds)
print(preds.shape)
# class_channel = preds[:, pred_index]
class_channel = preds
# use automatic differentiation to compute the gradients
grads = tape.gradient(class_channel, last_conv_layer_output)
"""
This is a vector where each entry is the mean intensity of the gradient
over a specific feature map channel
"""
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
"""
We multiply each channel in the feature map array
by "how important this channel is" with regard to the top predicted class
then sum all the channels to obtain the heatmap class activation
"""
last_conv_layer_output = last_conv_layer_output[0]
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
heatmap = tf.squeeze(heatmap)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
heatmap = heatmap.numpy()
return heatmap
#------------------------------------------------------------------------------------
# save gradcam heat map
#-----------------------------------------------------------------------------------
def save_gradcam(image, heatmap, val_gradcam_dir, test_gradcam_dir, alpha, i):
# print('heatmap:', heatmap.shape)
# Rescale heatmap to a range 0-255
heatmap = np.uint8(255 * heatmap)
# Use jet colormap to colorize heatmap
jet = cm.get_cmap("jet")
# Use RGB values of the colormap
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
# resize heatmap
jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap0 = jet_heatmap.resize(re_size)
jet_heatmap1 = keras.preprocessing.image.img_to_array(jet_heatmap0)
# print('jet_heatmap:', jet_heatmap1.shape)
# resize background CT image
img = image.reshape((192, 192, 3))
img = keras.preprocessing.image.array_to_img(img)
img0 = img.resize(re_size)
img1 = keras.preprocessing.image.img_to_array(img0)
# print('img shape:', img1.shape)
# Superimpose the heatmap on original image
superimposed_img = jet_heatmap1 * alpha + img1
superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)
# Save the superimposed image
if run_type == 'val':
save_dir = val_gradcam_dir
elif run_type == 'test':
save_dir = test_gradcam_dir
elif run_type == 'exval':
save_dir = exval_gradcam_dir
fn1 = str(conv_n) + '_' + str(i) + '_' + 'gradcam.png'
fn2 = str(conv_n) + '_' + str(i) + '_' + 'heatmap.png'
fn3 = str(conv_n) + '_' + str(i) + '_' + 'heatmap_raw.png'
fn4 = str(i) + '_' + 'CT.png'
superimposed_img.save(os.path.join(save_dir, fn1))
# jet_heatmap0.save(os.path.join(save_dir, fn2))
# jet_heatmap.save(os.path.join(save_dir, fn3))
# img0.save(os.path.join(save_dir, fn4))
if __name__ == '__main__':
train_img_dir = '/media/bhkann/HN_RES1/HN_CONTRAST/train_img_dir'
val_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/val'
test_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test'
exval_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/exval'
val_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/val/gradcam'
test_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test/gradcam'
exval_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test/gradcam'
pro_data_dir = '/home/bhkann/zezhong/git_repo/IV-Contrast-CNN-Project/pro_data'
model_dir = '/mnt/aertslab/USERS/Zezhong/contrast_detection/model'
input_channel = 3
re_size = (192, 192)
i = 72
crop = True
alpha = 0.9
saved_model = 'ResNet_2021_07_18_06_28_40'
show_network = False
conv_n = 'conv5'
run_type = 'val'
#---------------------------------------------------------
# run main function
#--------------------------------------------------------
if run_type == 'val':
save_dir = val_save_dir
elif run_type == 'test':
save_dir = test_save_dir
## load model and find conv layers
model = load_model(os.path.join(model_dir, saved_model))
# model.summary()
list_i = [100, 105, 110, 115, 120, 125]
for i in list_i:
image, label, pred_index, y_pred, ID = data(
input_channel=input_channel,
i=i,
val_save_dir=val_save_dir,
test_save_dir=test_save_dir
)
conv_list = ['conv2', 'conv3', 'conv4', 'conv5']
conv_list = ['conv4']
for conv_n in conv_list:
if conv_n == 'conv2':
last_conv_layer = 'conv2_block3_1_conv'
elif conv_n == 'conv3':
last_conv_layer = 'conv3_block4_1_conv'
elif conv_n == 'conv4':
last_conv_layer = 'conv4_block6_1_conv'
elif conv_n == 'conv5':
last_conv_layer = 'conv5_block3_out'
heatmap = compute_heatmap(
model=model,
saved_model=saved_model,
image=image,
pred_index=pred_index,
last_conv_layer=last_conv_layer
)
save_gradcam(
image=image,
heatmap=heatmap,
val_gradcam_dir=val_gradcam_dir,
test_gradcam_dir=test_gradcam_dir,
alpha=alpha,
i=i
)
print('label:', label)
print('ID:', ID)
print('y_pred:', y_pred)
print('prediction:', pred_index)
print('conv layer:', conv_n)
# if last_conv_layer is None:
# last_conv_layer = find_target_layer(
# model=model,
# saved_model=saved_model
# )
# print(last_conv_layer)
#
# if show_network == True:
# for idx in range(len(model.layers)):
# print(model.get_layer(index = idx).name)
# # compute the guided gradients
# castConvOutputs = tf.cast(convOutputs > 0, "float32")
# castGrads = tf.cast(grads > 0, "float32")
# guidedGrads = castConvOutputs * castGrads * grads
# # the convolution and guided gradients have a batch dimension
# # (which we don't need) so let's grab the volume itself and
# # discard the batch
# convOutputs = convOutputs[0]
# guidedGrads = guidedGrads[0]
#
# # compute the average of the gradient values, and using them
# # as weights, compute the ponderation of the filters with
# # respect to the weights
# weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
# cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
#
# # grab the spatial dimensions of the input image and resize
# # the output class activation map to match the input image
# # dimensions
## (w, h) = (image.shape[2], image.shape[1])
## heatmap = cv2.resize(cam.numpy(), (w, h))
# heatmap = cv2.resize(heatmap.numpy(), (64, 64))
# # normalize the heatmap such that all values lie in the range
## # [0, 1], scale the resulting values to the range [0, 255],
## # and then convert to an unsigned 8-bit integer
# numer = heatmap - np.min(heatmap)
# eps = 1e-8
# denom = (heatmap.max() - heatmap.min()) + eps
# heatmap = numer / denom
# heatmap = (heatmap * 255).astype("uint8")
# colormap=cv2.COLORMAP_VIRIDIS
# heatmap = cv2.applyColorMap(heatmap, colormap)
# print('heatmap shape:', heatmap.shape)
## img = image[:, :, :, 0]
## print('img shape:', img.shape)
# img = image.reshape((64, 64, 3))
# print(img.shape)
# output = cv2.addWeighted(img, 0.5, heatmap, 0.5, 0)
#
#
# return heatmap, output
| 37.327815
| 86
| 0.593808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,739
| 0.509093
|
677019eb7c18145cccb4dc9a2d50f339eddc7e89
| 5,038
|
py
|
Python
|
start.py
|
xylovedd/yangyang
|
4cb99491c0f046da9a39f7c916e0c85cb473c002
|
[
"Apache-2.0"
] | 20
|
2019-11-14T02:53:53.000Z
|
2022-03-26T02:44:04.000Z
|
start.py
|
janlle/12306
|
73b1d5423492013447ebdbbfcc6f1fe3a719ee0b
|
[
"Apache-2.0"
] | 9
|
2019-11-17T09:16:37.000Z
|
2022-03-12T00:07:14.000Z
|
start.py
|
xylovedd/yangyang
|
4cb99491c0f046da9a39f7c916e0c85cb473c002
|
[
"Apache-2.0"
] | 7
|
2019-12-05T09:26:09.000Z
|
2020-11-15T15:13:16.000Z
|
# coding:utf-8
"""
start rob task good luck!
> python start.py
"""
import datetime
import time
from sys import version_info
import threadpool
import ticket_config as config
from config.stations import check_station_exists
from train.login import Login
from train.order import Order
from train.ticket import Ticket
from util.app_util import current_date, validate_date_str, current_hour, current_timestamp, datetime_str_timestamp, \
validate_time_str
from util.logger import Logger
log = Logger('INFO')
if __name__ == '__main__':
if version_info.major != 3 or version_info.minor != 6:
log.error("请使用Python3.6版本运行此程序")
# Checking config information
if not validate_date_str(config.DATE):
log.error('出发时间格式不正确')
exit(0)
today = datetime.datetime.strptime(current_date(), '%Y-%m-%d')
depart_day = datetime.datetime.strptime(config.DATE, '%Y-%m-%d')
difference = (depart_day - today).days
if difference > 29 or difference < 0:
log.error('出发时间超出了12306的售票时间范围')
exit(0)
if not check_station_exists(config.FROM_STATION) or not check_station_exists(config.TO_STATION):
log.error('车站不存在')
exit(0)
if config.SELL_TIME != '':
if not validate_time_str(config.SELL_TIME):
log.error('车票开售时间格式不正确')
exit(0)
login = Login()
while True:
hour = current_hour()
if hour > 22 or hour < 6:
time.sleep(1.5)
continue
else:
login.login()
order = Order(None)
if not order.search_unfinished_order():
break
count = 0
# Sell time
if config.SELL_TIME != '':
start_time = datetime_str_timestamp(config.DATE + ' ' + config.SELL_TIME)
log.info('Waiting for sell ticket...')
while True:
current_time = current_timestamp() + 2505600
if start_time - current_time < 0:
break
log.info('Starting...')
while True:
ticket_list = Ticket.search_stack(from_station=config.FROM_STATION, to_station=config.TO_STATION,
train_date=config.DATE)
# Filter unable ticket
ticket_list = list(filter(lambda x: x.sell_time == '预订', ticket_list))
if len(ticket_list) < 1:
log.info('暂无可预定车票')
continue
count += 1
if config.SEAT_TYPE:
ticket_list = [i for i in ticket_list if i.train_no in config.TRAINS_NO]
Ticket.show_tickets(ticket_list)
seat_level_all = [([0] * len(config.TRAINS_NO)) for i in range(len(config.SEAT_TYPE))]
for j, ticket in enumerate(ticket_list):
ticket_seat = ticket.get_seat_level(config.SEAT_TYPE)
for i, seat in enumerate(ticket_seat):
seat_level_all[i][j] = seat
# Choose a ticket that you can order
usable_ticket = {}
for i in seat_level_all:
for j in i:
train_no = j['train_no']
usable = j['usable']
seat_type = j['type']
if usable == '--' or usable == 'no' or usable == '*':
usable = 0
elif usable == 'yes':
usable = 21
usable = int(usable)
if usable > 0:
usable_ticket = {'train_no': train_no, 'type': seat_type, 'seat_count': usable}
break
else:
continue
break
if usable_ticket:
order_ticket = None
for ticket in ticket_list:
if ticket.train_no == usable_ticket['train_no']:
order_ticket = ticket
break
order_ticket.seat_type = usable_ticket['type']
order_ticket.seat_count = usable_ticket['seat_count']
order = Order(order_ticket)
order.submit()
log.info(order)
log.info('车票订单提交成功,请稍后...')
order.order_callback()
break
else:
log.warning('没有找到合适的车票,正尝试再次查询,查询次数: {}'.format(count))
time.sleep(1)
break
def start_thread_pool():
pool = threadpool.ThreadPool(10)
reqs = threadpool.makeRequests(None, None)
[pool.putRequest(req) for req in reqs]
pool.wait()
pass
| 37.044118
| 117
| 0.502382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 659
| 0.126197
|
6770f980c35e8599c5cad58c26a50fad3654f206
| 2,769
|
py
|
Python
|
frameworks/PHP/cakephp/setup.py
|
idlewan/FrameworkBenchmarks
|
f187ec69752f369d84ef5a262efaef85c3a6a5ab
|
[
"BSD-3-Clause"
] | null | null | null |
frameworks/PHP/cakephp/setup.py
|
idlewan/FrameworkBenchmarks
|
f187ec69752f369d84ef5a262efaef85c3a6a5ab
|
[
"BSD-3-Clause"
] | null | null | null |
frameworks/PHP/cakephp/setup.py
|
idlewan/FrameworkBenchmarks
|
f187ec69752f369d84ef5a262efaef85c3a6a5ab
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
import os
import setup_util
from os.path import expanduser
def start(args, logfile, errfile):
fwroot = args.fwroot
setup_util.replace_text("cakephp/app/Config/database.php", "'host' => '.*',", "'host' => '" + args.database_host + "',")
setup_util.replace_text("cakephp/app/Config/core.php", "'REDISSERVER'", "'" + args.database_host + "'")
setup_util.replace_text("cakephp/deploy/cake", "\".*\/FrameworkBenchmarks/cakephp", "\"%s" % args.troot)
setup_util.replace_text("cakephp/deploy/cake", "Directory .*\/FrameworkBenchmarks/cakephp", "Directory %s" % args.troot)
setup_util.replace_text("cakephp/deploy/nginx.conf", "root .*\/FrameworkBenchmarks/cakephp", "root %s" % args.troot)
try:
if os.name == 'nt':
setup_util.replace_text("cakephp/app/Config/core.php", "'Redis'", "'Wincache'")
subprocess.check_call('icacls "C:\\FrameworkBenchmarks\\cakephp" /grant "IIS_IUSRS:(OI)(CI)F"', shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call('appcmd add site /name:PHP /bindings:http/*:8080: /physicalPath:"C:\\FrameworkBenchmarks\\cakephp\\app\\webroot"', shell=True, stderr=errfile, stdout=logfile)
return 0
#subprocess.check_call("sudo cp cake/deploy/cake /etc/apache2/sites-available/", shell=True)
#subprocess.check_call("sudo a2ensite cake", shell=True)
subprocess.check_call("sudo chown -R www-data:www-data cakephp", shell=True, stderr=errfile, stdout=logfile)
# Sudo needed to switch to correct user
# This is a bit tricky as sudo normally resets the PATH for security
# To work around that in this one case, we use the full
# path to the php-fpm binary we setup in bash_profile.sh
subprocess.check_call("sudo $PHP_FPM --fpm-config $FWROOT/config/php-fpm.conf -g $TROOT/deploy/php-fpm.pid", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c $TROOT/deploy/nginx.conf", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
if os.name == 'nt':
subprocess.call('appcmd delete site PHP', shell=True, stderr=errfile, stdout=logfile)
return 0
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True, stderr=errfile, stdout=logfile)
subprocess.call("sudo kill -QUIT $( cat $TROOT/deploy/php-fpm.pid )", shell=True, stderr=errfile, stdout=logfile)
#subprocess.check_call("sudo a2dissite cake", shell=True)
#subprocess.check_call("sudo /etc/init.d/apache2 stop", shell=True)
subprocess.check_call("sudo chown -R $USER:$USER cakephp", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
| 57.6875
| 186
| 0.717226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,412
| 0.509931
|
67719e766692980e9b9fa0f337632160d3b1343e
| 624
|
py
|
Python
|
Functions/parsetool.py
|
AlessandroChen/KindleHelper
|
7b102fec44e80585ba7a4b425429f11f0c2ca4e1
|
[
"Apache-2.0"
] | 19
|
2019-02-23T02:17:28.000Z
|
2022-03-17T16:27:10.000Z
|
Functions/parsetool.py
|
AlessandroChen/KindleHelper
|
7b102fec44e80585ba7a4b425429f11f0c2ca4e1
|
[
"Apache-2.0"
] | 1
|
2019-05-05T09:11:22.000Z
|
2019-06-15T04:48:29.000Z
|
Functions/parsetool.py
|
AlessandroChen/KindleHelper
|
7b102fec44e80585ba7a4b425429f11f0c2ca4e1
|
[
"Apache-2.0"
] | 3
|
2019-06-09T01:53:48.000Z
|
2019-09-09T07:04:51.000Z
|
import os, stat
def addPermission(Filename):
os.chmod(Filename, os.stat(Filename).st_mode | stat.S_IXUSR);
def transform(content):
name = '';
for i in range(0, len(content)):
if (content[i] == ' ' and content[i + 1] == ' '):
name += '\n';
else:
name += content[i];
return name;
def getContent(name):
j = 0;
for i in range(0, len(name)):
if name[i:i+7] == 'content':
j = i + 9;
for j in range(i + 9, len(name)):
if (name[j] == "'" or name[j] == '"'):
break;
return name[i+9:j];
| 26
| 65
| 0.464744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.043269
|
6772f47be90751a8ab2cbacfba1c7b99baa2b64a
| 102
|
py
|
Python
|
caiman/models.py
|
Rockstreet/usman_min
|
c15145a444cbc913a1349b69dffc0b8a45e38dbb
|
[
"MIT"
] | null | null | null |
caiman/models.py
|
Rockstreet/usman_min
|
c15145a444cbc913a1349b69dffc0b8a45e38dbb
|
[
"MIT"
] | null | null | null |
caiman/models.py
|
Rockstreet/usman_min
|
c15145a444cbc913a1349b69dffc0b8a45e38dbb
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
| 10.2
| 65
| 0.784314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
677367dc85c6f920d38d59e7cc33a0e5eafc5a8c
| 6,987
|
py
|
Python
|
Code/utils.py
|
minna-ust/SemanticMapGeneration
|
ab50ed853552713d4d4447b4c1d44e0b8f147318
|
[
"BSD-3-Clause"
] | 8
|
2020-01-15T02:49:35.000Z
|
2021-11-26T08:29:50.000Z
|
Code/utils.py
|
Hezip/SemanticMapGeneration
|
98920045c1da5812f6691e6eb75bcc3413406035
|
[
"BSD-3-Clause"
] | null | null | null |
Code/utils.py
|
Hezip/SemanticMapGeneration
|
98920045c1da5812f6691e6eb75bcc3413406035
|
[
"BSD-3-Clause"
] | 6
|
2020-03-05T06:40:24.000Z
|
2022-02-16T04:56:38.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Common utility functions
Created on Sun May 27 16:37:42 2018
@author: chen
"""
import math
import cv2
import os
from imutils import paths
import numpy as np
import scipy.ndimage
def rotate_cooridinate(cooridinate_og,rotate_angle,rotate_center):
"""
calculate the coordinates after rotation
"""
rotate_angle = rotate_angle*(math.pi/180)
rotated_x = (cooridinate_og[0]-rotate_center[0])*math.cos(rotate_angle)\
-(cooridinate_og[1]-rotate_center[1])*math.sin(rotate_angle)+rotate_center[0]
rotated_y = (cooridinate_og[0]-rotate_center[0])*math.sin(rotate_angle)\
+(cooridinate_og[1]-rotate_center[1])*math.cos(rotate_angle)+rotate_center[1]
rotated_coordinate = np.array([rotated_x,rotated_y])
rotated_coordinate = np.round(rotated_coordinate).astype(np.int)
return rotated_coordinate
def mkdir(path):
"""
create new folder automatically
"""
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
def load_data(path):
"""
load data from specified folder
"""
print("[INFO] loading images...")
imgs = []
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images(path)))
for imagePath in imagePaths:
# load the image, pre-process it, and store it in the data list
image = cv2.imread(imagePath,cv2.IMREAD_GRAYSCALE)
imgs.append(image)
return imgs
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def normfun(x,sigma):
"""
function of normal distribution
"""
mu = 45
pdf = np.exp(-((x - mu)**2)/(2*sigma**2)) / (sigma * np.sqrt(2*np.pi))
return pdf
def calc_box(box,x_gap,y_gap,rotate_angle,center):
"""
calculate the size of the required surrounding environment for doorway segmentation
box: four corners' coordinates of doorway
x_gap: remained space in the vertical way
y_gap: remained space in the horizontal way
"""
door_box = np.array([box[0][::-1]+[y_gap,x_gap],box[1][::-1]+[y_gap,-x_gap],
box[2][::-1]-[y_gap,x_gap],box[3][::-1]-[y_gap,-x_gap]])
rotated_box = []
for coordinate in door_box:
box_coordinate = rotate_cooridinate(coordinate,rotate_angle,center)
rotated_box.append(box_coordinate)
rotated_box = np.array(rotated_box)
box = [np.min(rotated_box[:,0]),np.min(rotated_box[:,1]),np.max(rotated_box[:,0]),np.max(rotated_box[:,1])]
return box
def calc_IoU(candidateBound, groundTruthBounds):
"""
calculate the intersection over union
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
carea = (cx2 - cx1) * (cy2 - cy1)
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
ious = area / (carea + garea - area)
return ious
def overlapp(candidateBound, groundTruthBounds):
"""
calculate the proportion of prediction to groundtruth
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
reious = area / garea
return reious
def calc_corner(door_center,door_size,door_depth,side):
"""
calculate the corners' coordinates from the centroid, size and depth of doorway
door_corners_inside is a list of coordinates of corners close to the corridor
door_corners_outside is a list of coordinates of corners close to the room
"""
door_corners_inside = [door_center-np.array([np.int(door_size/2),0]),
door_center+np.array([door_size-np.int(door_size/2),0])]
door_corners_outside = [x-np.array([0,np.power(-1,side)*door_depth[side]])
for x in door_corners_inside]
door_corners_outside = np.array(door_corners_outside)
return door_corners_inside,door_corners_outside
def draw_door(mask,complete_map,door,door_depth,side):
"""
label the doorway on the mask and add some error inside the doorway region
"""
door_size = abs(door[1,0]-door[0,0])
door_area_inside = door+np.array([0,np.power(-1,side)*door_depth[side]])
# label the doorway on the mask
cv2.rectangle(mask,tuple(door[0][::-1]),tuple(door_area_inside[1][::-1]),255,-1)
# add a small point to emulate the error in the doorway region
if door_size>20:
if np.random.randint(4)==0:
if side ==0:
pt_center = [np.random.randint(door[0,0]+4,door[1,0]-3),np.random.randint(door[0,1],door_area_inside[0,1])]
else:
pt_center = [np.random.randint(door[0,0]+3,door[1,0]-2),np.random.randint(door_area_inside[0,1],door[0,1])]
cv2.circle(complete_map,tuple(pt_center[::-1]),np.random.choice([1,2,3]),0,-1)
return door_size
def room_division(room_space,num_room):
"""
assign the lengths of rooms according to the length of corridor and number of rooms
room_space: coordinates of corridor's side
num_room: the number of rooms on one side
rooms: a list of the coordinates belonging to different rooms
rooms_corners: a list of only the top and bottom cooridnates of different rooms
"""
rooms = []
rooms_corners=[]
a = num_room
thickness = np.random.randint(2,5)
length = room_space.shape[0]-(num_room-1)*thickness
start_point = 0
for i in range(num_room-1):
room_size = np.random.randint(length/(a+0.7),length/(a-0.7))
room = room_space[start_point:start_point+room_size,:]
rooms.append(room)
start_point +=room_size+thickness
room = room_space[start_point:,:]
rooms.append(room)
rooms = [room.astype(np.int) for room in rooms]
for x in rooms:
rooms_corner = np.concatenate((x[0,:][np.newaxis,:],x[-1,:][np.newaxis,:]),axis = 0)
rooms_corners.append(rooms_corner)
return rooms,rooms_corners
def calc_gradient(gmap):
"""
calculate the gradient of image to find the contour
"""
kernel = np.array([[1,1,1],[1,-8,1],[1,1,1]])
img = gmap.astype(np.int16)
gradient = scipy.ndimage.correlate(img,kernel,mode = 'constant',cval =127)
return gradient
| 32.347222
| 123
| 0.640046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,683
| 0.240876
|
6773e2cae4ca1a7fe539b33cf15047934bd21fc6
| 1,225
|
py
|
Python
|
py_git/working_with_github/main.py
|
gabrieldemarmiesse/my_work_environment
|
6175afbee154d0108992259633a1c89e560fd12f
|
[
"MIT"
] | 1
|
2021-02-27T19:34:43.000Z
|
2021-02-27T19:34:43.000Z
|
py_git/working_with_github/main.py
|
gabrieldemarmiesse/my_work_environment
|
6175afbee154d0108992259633a1c89e560fd12f
|
[
"MIT"
] | null | null | null |
py_git/working_with_github/main.py
|
gabrieldemarmiesse/my_work_environment
|
6175afbee154d0108992259633a1c89e560fd12f
|
[
"MIT"
] | null | null | null |
import os
import sys
from subprocess import CalledProcessError
from working_with_github.utils import run
def checkout_pr():
user, branch = sys.argv[1].split(":")
_checkout_pr(user, branch)
def _checkout_pr(user, branch):
run(f"git remote add {user} git@github.com:{user}/addons.git", fail_ok=True)
run(f"git fetch {user}")
try:
run(f"git checkout -b {user}_{branch} {user}/{branch}")
except CalledProcessError:
run(f"git checkout {user}_{branch}")
run(f"git pull")
run(f"git branch --set-upstream-to {user}/{branch}", fail_ok=True)
def setup_oss():
repo = sys.argv[1]
assert "/" in repo
org, repo = repo.split("/")
url_upstream = f"https://github.com/{org}/{repo}.git"
url_origin = f"git@github.com:gabrieldemarmiesse/{repo}.git"
run(f"git clone {url_origin}")
os.chdir(f"./{repo}")
run(f"git remote add upstream {url_upstream}")
run("git fetch upstream")
run("git branch --set-upstream-to upstream/master")
run("git pull")
run("git push origin master")
def update_pr():
user, branch = sys.argv[1].split(":")
_checkout_pr(user, branch)
run("git merge master")
run(f"git push {user} HEAD:{branch}")
| 26.630435
| 80
| 0.646531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 539
| 0.44
|
67752967909d812410a7c0a4e3e611d417d432d0
| 4,144
|
py
|
Python
|
main.py
|
Lee-Kevin/Danboard
|
28b4b0ecada4f29a7106bb3af38f608c0bd681b2
|
[
"MIT"
] | null | null | null |
main.py
|
Lee-Kevin/Danboard
|
28b4b0ecada4f29a7106bb3af38f608c0bd681b2
|
[
"MIT"
] | null | null | null |
main.py
|
Lee-Kevin/Danboard
|
28b4b0ecada4f29a7106bb3af38f608c0bd681b2
|
[
"MIT"
] | null | null | null |
import logging
import time
import re
import serial
from threading import Thread, Event
from respeaker import Microphone
from respeaker import BingSpeechAPI
from respeaker import PixelRing,pixel_ring
BING_KEY = '95e4fe8b3a324389be4595bd1813121c'
ser = serial.Serial('/dev/ttyS1',115200,timeout=0)
data=[0xAA,0x01,0x64,0x55]
data1=[0xAA,0x01,0x00,0x55]
data2=[0xAA,0x01,0x00,0x55,0xAA,0x00,0x00,0x55]
data3=[0xAA,0x01,0x64,0x55,0xAA,0x00,0x64,0x55]
lefthand = [0xAA,0x00,0x32,0x55]
righthand = [0xAA,0x01,0x32,0x55]
nodhead = [0xAA,0x02,0x32,0x55]
shakehead = [0xAA,0x03,0x32,0x55]
wakeup = [0xAA,0x02,0x64,0x55,0xAA,0x03,0x64,0x55]
origin = [lefthand,righthand,nodhead,shakehead]
def robot(action):
if action == "LeftHand":
data[1] = 0x00
data1[1] = 0x00
for i in range(0,3):
ser.write(data)
time.sleep(.4)
ser.write(data1)
time.sleep(.4)
ser.write(lefthand)
elif action == "RightHand":
data[1] = 0x01
data1[1] = 0x01
for i in range(0,3):
ser.write(data)
time.sleep(.4)
ser.write(data1)
time.sleep(.4)
ser.write(righthand)
elif action == "NodHead":
data[1] = 0x02
data1[1] = 0x02
for i in range(0,3):
ser.write(data)
time.sleep(.4)
ser.write(data1)
time.sleep(.4)
ser.write(nodhead)
elif action == "ShakeHead":
data[1] = 0x03
data1[1] = 0x03
for i in range(0,3):
ser.write(data)
time.sleep(.4)
ser.write(data1)
time.sleep(.4)
ser.write(shakehead)
elif action == "ShakeHand":
for i in range(0,3):
ser.write(data2)
time.sleep(.5)
ser.write(data3)
time.sleep(.5)
elif action == "WakeUp":
ser.write(wakeup)
time.sleep(.5)
for i in range(0,4):
ser.write(origin[i])
def task(quit_event):
mic = Microphone(quit_event=quit_event)
bing = BingSpeechAPI(BING_KEY)
while not quit_event.is_set():
if mic.wakeup('respeaker'):
print('Wake up')
pixel_ring.listen()
robot("WakeUp")
time.sleep(.1)
data = mic.listen()
try:
pixel_ring.wait()
text = bing.recognize(data, language='en-US')
# spi.write('answer\n')
print('\nBing:' + text.encode('utf-8'))
if re.search(r'shake', text) and re.search(r'left hand', text):
robot("LeftHand")
print("Shake Left hand")
elif re.search(r'shake',text) and re.search(r'right hand',text):
robot("RightHand")
print("Shake right hand")
elif re.search(r'shake.*(head).*',text):
robot("ShakeHead")
print("Shake head")
elif re.search(r'head',text) or re.search(r'had',text):
robot("NodHead")
print("Nod head")
elif re.search(r'hand',text) :
robot("ShakeHand")
print("Shake hand")
elif re.search(r'hello',text):
robot("RightHand")
print("Hello")
else:
print("Other")
except Exception as e:
print("\nCould not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
# if text:
# print('Recognized %s' % text)
pixel_ring.off()
def main():
logging.basicConfig(level=logging.DEBUG)
quit_event = Event()
thread = Thread(target=task, args=(quit_event,))
thread.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print('Quit')
quit_event.set()
ser.close()
break
thread.join()
if __name__ == '__main__':
main()
| 30.925373
| 129
| 0.516651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 550
| 0.132722
|
677586a1690b5ab7c02ad679b07e602f0cadd49c
| 1,063
|
py
|
Python
|
apis/vote_message/account_voteCredit.py
|
DerWalundDieKatze/Yumekui
|
cb3174103ced7474ce6d1abd774b399557dcaf4f
|
[
"Apache-2.0"
] | null | null | null |
apis/vote_message/account_voteCredit.py
|
DerWalundDieKatze/Yumekui
|
cb3174103ced7474ce6d1abd774b399557dcaf4f
|
[
"Apache-2.0"
] | null | null | null |
apis/vote_message/account_voteCredit.py
|
DerWalundDieKatze/Yumekui
|
cb3174103ced7474ce6d1abd774b399557dcaf4f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
'''
@author: caroline
@license: (C) Copyright 2019-2022, Node Supply Chain Manager Corporation Limited.
@contact: caroline.fang.cc@gmail.com
@software: pycharm
@file: account_voteCredit.py
@time: 2020/1/8 11:23 上午
@desc:
'''
from apis.API import request_Api
def voteCredit(api_name, params):
'''
投票
curl -H "Content-Type: application/json" -X post --data '{"jsonrpc":"2.0","method":"account_voteCredit","params":["0x300fc5a14e578be28c64627c0e7e321771c58cd4","0x0ad472fd967eb77fb6e36ec40901790065155d5e","0xf4240","0x110","0x30000"],"id":1}' http://127.0.0.1:15645
:param api_name:
:param params:投票参数from地址与to地址 投多少钱 gas价格 手续费
:return:交易hash
'''
try:
result = request_Api(api_name, params)
print("投票api返回值为{}".format(result))
except Exception as e:
print("投票api返回错误:{}".format(e))
if __name__ == '__main__':
api_name = "account_voteCredit"
params = ["0xaD3dC2D8aedef155eabA42Ab72C1FE480699336c", "0xef32f718642426fba949b42e3aff6c56fe08b23c", "0xf4240", "0x110", "0x30000"]
voteCredit(api_name, params)
| 31.264706
| 265
| 0.74318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 862
| 0.759471
|
67761a50a32aba1e5e8aa2095f886f17d951b648
| 1,582
|
py
|
Python
|
src/pla.py
|
socofels/ML_base_alg
|
2f84a2a35b0217d31cbcd39a881ab5eb2eff1772
|
[
"MIT"
] | null | null | null |
src/pla.py
|
socofels/ML_base_alg
|
2f84a2a35b0217d31cbcd39a881ab5eb2eff1772
|
[
"MIT"
] | null | null | null |
src/pla.py
|
socofels/ML_base_alg
|
2f84a2a35b0217d31cbcd39a881ab5eb2eff1772
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
def sign(y_pred):
y_pred = (y_pred >= 0) * 2 - 1
return y_pred
def plot(x, w):
plt.scatter(x[:, 1][pos_index], x[:, 2][pos_index], marker="P")
plt.scatter(x[:, 1][neg_index], x[:, 2][neg_index], marker=0)
x = [-1, 100]
y = -(w[0] + w[1] * x) / w[2]
plt.plot(x, y)
plt.show()
# 当没有最优解时,循环一百次后结束
def pla(x, y,epochs=100):
w = np.random.random((x.shape[1], 1))
plot(x, w)
best_w =w
for i in range(epochs):
if not (sign(y)==sign(np.dot(x, w))).all():
for index in np.where(sign(y)!=sign(np.dot(x, w)))[0]:
y_pred = sign(np.dot(x[index,:], w))
y_true = sign(y[index])
# 如果预测的y与实际的y不相等,就更新w。y*x
if not y_pred == y_true:
temp = x[index,:] * y[index]
temp = temp.reshape(-1, 1)
w = w + temp
plot(x, w)
if np.sum(sign(y) == sign(np.dot(x, w))) > np.sum(sign(y) == sign(np.dot(x, best_w))):
best_w = w
else:
break
return best_w
np.random.seed(3)
shape = (100, 2)
x = (np.random.random((shape[0], shape[1])) * 100).astype(int)
x = np.c_[np.ones((shape[0], 1)), x]
w = np.array([-5, -2, 2])
w = w.reshape(-1, 1)
y = np.dot(x, w)
pos_index = np.where(y > 10)[0]
neg_index = np.where(y < 10)[0]
plt.scatter(x[:, 1][pos_index], x[:, 2][pos_index], marker="P")
plt.scatter(x[:, 1][neg_index], x[:, 2][neg_index], marker=0)
plt.show()
best_w = pla(x, y,100)
print(best_w)
| 28.763636
| 98
| 0.506953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.069782
|
6776496cc3fbe1aa360c8eaeeea056808934a9e1
| 5,974
|
py
|
Python
|
pupa/scrape/vote_event.py
|
azban/pupa
|
158378e19bcc322796aa4fb766784cbd4fd08413
|
[
"BSD-3-Clause"
] | 62
|
2015-01-08T05:46:46.000Z
|
2022-01-31T03:27:14.000Z
|
pupa/scrape/vote_event.py
|
azban/pupa
|
158378e19bcc322796aa4fb766784cbd4fd08413
|
[
"BSD-3-Clause"
] | 199
|
2015-01-10T03:19:37.000Z
|
2021-05-21T20:34:58.000Z
|
pupa/scrape/vote_event.py
|
azban/pupa
|
158378e19bcc322796aa4fb766784cbd4fd08413
|
[
"BSD-3-Clause"
] | 35
|
2015-03-09T19:41:42.000Z
|
2021-06-22T20:01:35.000Z
|
from ..utils import _make_pseudo_id
from .base import BaseModel, cleanup_list, SourceMixin
from .bill import Bill
from .popolo import pseudo_organization
from .schemas.vote_event import schema
from pupa.exceptions import ScrapeValueError
import re
class VoteEvent(BaseModel, SourceMixin):
_type = 'vote_event'
_schema = schema
def __init__(self, *, motion_text, start_date, classification, result,
legislative_session=None, identifier='',
bill=None, bill_chamber=None, bill_action=None,
organization=None, chamber=None
):
super(VoteEvent, self).__init__()
self.legislative_session = legislative_session
self.motion_text = motion_text
self.motion_classification = cleanup_list(classification, [])
self.start_date = start_date
self.result = result
self.identifier = identifier
self.bill_action = bill_action
self.set_bill(bill, chamber=bill_chamber)
if isinstance(bill, Bill) and not self.legislative_session:
self.legislative_session = bill.legislative_session
if not self.legislative_session:
raise ScrapeValueError('must set legislative_session or bill')
self.organization = pseudo_organization(organization, chamber, 'legislature')
self.votes = []
self.counts = []
def __str__(self):
return '{0} - {1} - {2}'.format(self.legislative_session, self.start_date,
self.motion_text)
def set_bill(self, bill_or_identifier, *, chamber=None):
if not bill_or_identifier:
self.bill = None
elif isinstance(bill_or_identifier, Bill):
if chamber:
raise ScrapeValueError("set_bill takes no arguments when using a `Bill` object")
self.bill = bill_or_identifier._id
else:
if chamber is None:
chamber = 'legislature'
kwargs = {'identifier': bill_or_identifier,
'from_organization__classification': chamber,
'legislative_session__identifier': self.legislative_session
}
self.bill = _make_pseudo_id(**kwargs)
def vote(self, option, voter, *, note=''):
self.votes.append({"option": option, "voter_name": voter,
"voter_id": _make_pseudo_id(name=voter), 'note': note})
def yes(self, name, *, id=None, note=''):
return self.vote('yes', name, note=note)
def no(self, name, *, id=None, note=''):
return self.vote('no', name, note=note)
def set_count(self, option, value):
for co in self.counts:
if co['option'] == option:
co['value'] = value
break
else:
self.counts.append({'option': option, 'value': value})
class OrderVoteEvent:
""" A functor for applying order to voteEvents.
A single OrderVoteEvent instance should be used for all bills in a scrape.
The vote events of each bill must be processed in chronological order,
but the processing of bills may be interleaved (needed in e.g. NH).
Currently, it only fudges midnight dates (start_date and end_date)
by adding the event sequence number in seconds
to the start_date and end_date (if they are well-formed string dates)
In the future, when there is an 'order' field on voteEvents,
it should fill that as well.
This fails softly and silently;
if a valid string date is not found in start_date or end_date, the date is not touched.
This assumes that times are reported as local time, not UTC.
A UTC time that is local midnight will not be touched.
Sometimes one chamber reports the time of a vote,
but the other chamber reports only the date. This is handled.
See the unit tests for examples and more behavior.
"""
_midnight = r'\d\d\d\d-\d\d-\d\dT00:00:00.*'
_timeless = r'\d\d\d\d-\d\d-\d\d'
class OrderBillVoteEvent:
""" Order VoteEvents for a single bill
"""
def __init__(self):
self.order = 0 # voteEvent sequence number. 1st voteEvent is 1.
def __call__(self, voteEvent):
self.order += 1
voteEvent.start_date = self._adjust_date(voteEvent.start_date)
if hasattr(voteEvent, 'end_date'):
voteEvent.end_date = self._adjust_date(voteEvent.end_date)
def _adjust_date(self, date):
if not isinstance(date, str):
return date
if re.fullmatch(OrderVoteEvent._timeless, date):
d2 = date + 'T00:00:00'
elif re.fullmatch(OrderVoteEvent._midnight, date):
d2 = date
else:
return date
assert self.order <= 60*60
mins = '{:02d}'.format(self.order // 60)
secs = '{:02d}'.format(self.order % 60)
# yyyy-mm-ddThh:mm:dd+05:00
# 0123456789012345678
return d2[:14] + mins + ':' + secs + d2[19:]
def __init__(self):
self.orderers = {}
def __call__(self, session_id, bill_id, voteEvent):
"""
Record order of voteEvent within bill.
The "order" field is not yet implemented; this fudges voteEvent start_date and end_date.
See OrderVoteEvent docstring for details.
:param session_id: session id
:param bill_id: an identifier for the vote's bill
that is at least unique within the session.
:param voteEvent:
:return: None
"""
bill_orderer = self.orderers.get((session_id, bill_id))
if not bill_orderer:
bill_orderer = self.OrderBillVoteEvent()
self.orderers[(session_id, bill_id)] = bill_orderer
bill_orderer(voteEvent)
| 37.810127
| 99
| 0.610311
| 5,720
| 0.957482
| 0
| 0
| 0
| 0
| 0
| 0
| 2,053
| 0.343656
|
6776771ca007095afc605ceffe189d17a91d3508
| 2,472
|
py
|
Python
|
Q/questionnaire/models/models_publications.py
|
ES-DOC/esdoc-questionnaire
|
9301eda375c4046323265b37ba96d94c94bf8b11
|
[
"MIT"
] | null | null | null |
Q/questionnaire/models/models_publications.py
|
ES-DOC/esdoc-questionnaire
|
9301eda375c4046323265b37ba96d94c94bf8b11
|
[
"MIT"
] | 477
|
2015-01-07T18:22:27.000Z
|
2017-07-17T15:05:48.000Z
|
Q/questionnaire/models/models_publications.py
|
ES-DOC/esdoc-questionnaire
|
9301eda375c4046323265b37ba96d94c94bf8b11
|
[
"MIT"
] | null | null | null |
####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from django.db import models
from django.conf import settings
import os
from Q.questionnaire import APP_LABEL, q_logger
from Q.questionnaire.q_fields import QVersionField
from Q.questionnaire.q_utils import EnumeratedType, EnumeratedTypeList
from Q.questionnaire.q_constants import *
###################
# local constants #
###################
PUBLICATION_UPLOAD_DIR = "publications"
PUBLICATION_UPLOAD_PATH = os.path.join(APP_LABEL, PUBLICATION_UPLOAD_DIR)
class QPublicactionFormat(EnumeratedType):
def __str__(self):
return "{0}".format(self.get_type())
QPublicationFormats = EnumeratedTypeList([
QPublicactionFormat("CIM2_XML", "CIM2 XML"),
])
####################
# the actual class #
####################
class QPublication(models.Model):
class Meta:
app_label = APP_LABEL
abstract = False
unique_together = ("name", "version")
verbose_name = "Questionnaire Publication"
verbose_name_plural = "Questionnaire Publications"
name = models.UUIDField(blank=False)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
version = QVersionField(blank=False)
format = models.CharField(max_length=LIL_STRING, blank=False, choices=[(pf.get_type(), pf.get_name()) for pf in QPublicationFormats])
model = models.ForeignKey("QModelRealization", blank=False, null=False, related_name="publications")
content = models.TextField()
def __str__(self):
return "{0}_{1}".format(self.name, self.get_version_major())
def get_file_path(self):
file_name = "{0}.xml".format(str(self))
file_path = os.path.join(
settings.MEDIA_ROOT,
PUBLICATION_UPLOAD_PATH,
self.model.project.name,
file_name
)
return file_path
def write(self):
publication_path = self.get_file_path()
if not os.path.exists(os.path.dirname(publication_path)):
os.makedirs(os.path.dirname(publication_path))
with open(publication_path, "w") as f:
f.write(self.content)
f.closed
| 29.783133
| 137
| 0.666667
| 1,533
| 0.620146
| 0
| 0
| 0
| 0
| 0
| 0
| 581
| 0.235032
|
67779dcfb1a4b8df315b4a6173872f0c4446530e
| 3,902
|
py
|
Python
|
tests/management/commands/test_create_command.py
|
kaozdl/django-extensions
|
bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36
|
[
"MIT"
] | null | null | null |
tests/management/commands/test_create_command.py
|
kaozdl/django-extensions
|
bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36
|
[
"MIT"
] | null | null | null |
tests/management/commands/test_create_command.py
|
kaozdl/django-extensions
|
bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import shutil
from django.conf import settings
from django.core.management import call_command
from django.test import TestCase
from six import StringIO
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class CreateCommandTests(TestCase):
"""Tests for create_command command."""
def setUp(self): # noqa
self.management_command_path = os.path.join(
settings.BASE_DIR, 'tests/testapp/management')
self.command_template_path = os.path.join(
settings.BASE_DIR, 'django_extensions/conf/command_template')
self.files = [
'__init__.py',
'commands/__init__.py',
'commands/sample.py',
]
def tearDown(self): # noqa
shutil.rmtree(self.management_command_path,
ignore_errors=True)
shutil.rmtree(os.path.join(self.command_template_path, '.hidden'),
ignore_errors=True)
test_pyc_path = os.path.join(self.command_template_path, 'test.pyc')
if os.path.isfile(test_pyc_path):
os.remove(test_pyc_path)
def _create_management_command_with_empty_files(self):
os.mkdir(self.management_command_path)
os.mkdir(os.path.join(self.management_command_path, 'commands'))
for f in self.files:
os.mknod(os.path.join(self.management_command_path, f))
def _create__pycache__in_command_template_directory(self):
os.mknod(os.path.join(self.command_template_path, 'test.pyc'))
def _create_hidden_directory_in_command_template_directory(self):
os.mkdir(os.path.join(self.command_template_path, '.hidden'))
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_management_command_files_only_on_dry_run(self, m_stdout): # noqa
call_command('create_command', 'testapp', '--dry-run', verbosity=2)
for f in self.files:
filepath = os.path.join(self.management_command_path, f)
self.assertIn(filepath, m_stdout.getvalue())
self.assertFalse(os.path.isfile(filepath))
@patch('sys.stdout', new_callable=StringIO)
def test_should_create_management_command_files_and_print_filepaths(self, m_stdout): # noqa
call_command('create_command', 'testapp', verbosity=2)
for f in self.files:
filepath = os.path.join(self.management_command_path, f)
self.assertIn(filepath, m_stdout.getvalue())
self.assertTrue(os.path.isfile(filepath))
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_that_filepaths_already_exists(self, m_stdout): # noqa
self._create_management_command_with_empty_files()
call_command('create_command', 'testapp', verbosity=2)
for f in self.files:
filepath = os.path.join(self.management_command_path, f)
self.assertIn(
'{} already exists'.format(filepath), m_stdout.getvalue())
self.assertTrue(os.path.isfile(filepath))
self.assertEqual(os.path.getsize(filepath), 0)
@patch('sys.stderr', new_callable=StringIO)
@patch('django_extensions.management.commands.create_command._make_writeable') # noqa
def test_should_print_error_on_OSError_exception(self, m__make_writeable, m_stderr): # noqa
m__make_writeable.side_effect = OSError
self._create__pycache__in_command_template_directory()
self._create_hidden_directory_in_command_template_directory()
call_command('create_command', 'testapp')
for f in self.files:
filepath = os.path.join(self.management_command_path, f)
self.assertIn("Notice: Couldn't set permission bits on {}. You're probably using an uncommon filesystem setup. No problem.\n".format(filepath), # noqa
m_stderr.getvalue())
| 41.073684
| 163
| 0.686315
| 3,622
| 0.928242
| 0
| 0
| 2,168
| 0.555613
| 0
| 0
| 639
| 0.163762
|
6777f51fd9e946ab36c26ec73ae09aa80a69635c
| 4,032
|
py
|
Python
|
pca.py
|
mghaffarynia/PCA
|
4f6a041b56bcba0d772c696dc83500b83fbc0215
|
[
"Apache-2.0"
] | null | null | null |
pca.py
|
mghaffarynia/PCA
|
4f6a041b56bcba0d772c696dc83500b83fbc0215
|
[
"Apache-2.0"
] | null | null | null |
pca.py
|
mghaffarynia/PCA
|
4f6a041b56bcba0d772c696dc83500b83fbc0215
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
from cvxopt import matrix
from cvxopt import solvers
import math
def read_csv_input(filename):
df = pd.read_csv(filename, header = None).to_numpy()
y = df[:, [-1]]
X = df[:, range(df.shape[1]-1)]
return X, y
def opt(X, y, c):
m, n = X.shape
P_top = np.concatenate((np.identity(n), np.zeros((n, m+1))), axis=1)
P = matrix(np.concatenate((P_top, np.zeros((m+1, n+m+1))), axis=0))
np_q = np.concatenate((np.zeros((n+1, 1)), np.ones((m, 1))*c), axis=0)
q = matrix(np_q)
G_top_left = (-1)*y*np.concatenate((X, np.ones((m, 1))), axis=1)
G_top = np.concatenate((G_top_left, (-1)*np.identity(m)), axis=1)
G_down = np.concatenate((np.zeros((m, n+1)), np.identity(m)*-1), axis=1)
np_G = np.concatenate((G_top, G_down), axis = 0)
G = matrix(np_G)
np_h = np.concatenate((np.ones((m, 1))*(-1), np.zeros((m, 1))), axis=0)
h = matrix(np_h)
solvers.options['show_progress'] = False
sol = np.array(solvers.qp(P, q, G, h)['x'])
return sol[:n, :], sol[n][0]
def normalize(X):
means = np.mean(X, axis=0)
return (X-means), means
def get_eigen(W):
cov = np.dot(W, W.T)
lambdas, v = np.linalg.eig(cov)
return lambdas, v
def predict(w, b, X):
predicts = np.where(np.dot(X, w) + b < 0, -1, 1)
return predicts
def accuracy(prediction, y):
return np.sum(np.where(prediction*y <= 0, 0, 1))/y.shape[0]*100
def print_errors(accuracies, c_list):
print("\t c:", end='')
for c in c_list:
print(f"{c:6}", end=' ')
print()
for k, accs in enumerate(accuracies):
print(f"\tk:{k+1}", end='\t')
for acc in accs:
print(f"{100-acc:6.2f}", end=' ')
print()
print()
def pi_j(lambdas, v, k):
print(f"v shape is : {v.shape}")
sorted_indices = np.argsort(lambdas)
sq = np.multiply(v[:, sorted_indices[::-1][:k]], v[:, sorted_indices[::-1][:k]])
norm = (1/k)*(np.sum(sq, axis=1, keepdims=True))
print(f"The shape of pi_j matrix is : {norm.shape}")
s = int(k * math.log(k))
samples = np.random.choice(norm.shape[1], s)
return norm, samples
def main():
# np.set_printoptions(linewidth=np.inf)
X, y = read_csv_input("madelon.data")
m, n = X.shape
X_train, y_train = X[:int(0.6*m), :], y[:int(0.6*m), :]
X_val, y_val = X[int(0.6*m):int(0.9*m) , :], y[int(0.6*m):int(0.9*m) , :]
X_test, y_test = X[int(0.9*m): , :], y[int(0.9*m): , :]
# X_train, y_train = read_csv_input("sonar_train.csv")
# X_val, y_val = read_csv_input("sonar_valid.csv")
# X_test, y_test = read_csv_input("sonar_test.csv")
# m, n = X_train.shape
X_train_normalized, means = normalize(X_train)
X_val_normalized = (X_val-means)
X_test_normalized = (X_test-means)
lambdas, v = get_eigen(X_train_normalized.T)
sorted_indices = np.argsort(lambdas)
print("Top six eigenvalues are:")
print("\t", lambdas[sorted_indices[::-1][:6]])
accuracies_train, accuracies_val, accuracies_test = [], [], []
K, c_list = 100, [0.001, 0.01, 0.1, 1, 1e12]
projections_train = np.dot(X_train_normalized, v[:, sorted_indices[::-1][:K]])
projections_val = np.dot(X_val_normalized, v[:, sorted_indices[::-1][:K]])
projections_test = np.dot(X_test_normalized, v[:, sorted_indices[::-1][:K]])
for k in range(K):
accs_train, accs_val, accs_test = [], [], []
for c in c_list:
w, b = opt(projections_train[:, :k+1], y_train, c)
predictions_train = predict(w, b, projections_train[:, :k+1])
accs_train.append(accuracy(predictions_train, y_train))
predictions_val = predict(w, b, projections_val[:, :k+1])
accs_val.append(accuracy(predictions_val, y_val))
predictions_test = predict(w, b, projections_test[:, :k+1])
accs_test.append(accuracy(predictions_test, y_test))
accuracies_train.append(accs_train)
accuracies_val.append(accs_val)
accuracies_test.append(accs_test)
print("Train errors:")
print_errors(accuracies_train, c_list)
print("Validation errors:")
print_errors(accuracies_val, c_list)
print("Test errors:")
print_errors(accuracies_test, c_list)
main()
| 33.6
| 82
| 0.640377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 458
| 0.113591
|
6778560530351b13b5aa71d380046a6c4d5f1c9f
| 307
|
py
|
Python
|
pyadds/__init__.py
|
wabu/pyadds
|
a09ac4ca89a809fecffe4e9f63b29b20df7c2872
|
[
"MIT"
] | null | null | null |
pyadds/__init__.py
|
wabu/pyadds
|
a09ac4ca89a809fecffe4e9f63b29b20df7c2872
|
[
"MIT"
] | null | null | null |
pyadds/__init__.py
|
wabu/pyadds
|
a09ac4ca89a809fecffe4e9f63b29b20df7c2872
|
[
"MIT"
] | null | null | null |
class AnythingType(set):
def __contains__(self, other):
return True
def intersection(self, other):
return other
def union(self, other):
return self
def __str__(self):
return '*'
def __repr__(self):
return "Anything"
Anything = AnythingType()
| 17.055556
| 34
| 0.602606
| 279
| 0.908795
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.042345
|
6778c22f5231a134154a3cc716c3a2ed3620a01a
| 626
|
py
|
Python
|
lookup.py
|
apinkney97/IP2Location-Python
|
5841dcdaf826f7f0ef3e26e91524319552f4c7f8
|
[
"MIT"
] | 90
|
2015-01-21T01:15:56.000Z
|
2022-02-25T05:12:16.000Z
|
lookup.py
|
Guantum/IP2Location-Python
|
dfa5710cd527ddbd446bbd2206242de6c62758fc
|
[
"MIT"
] | 17
|
2015-11-09T12:48:44.000Z
|
2022-03-21T00:29:00.000Z
|
lookup.py
|
Guantum/IP2Location-Python
|
dfa5710cd527ddbd446bbd2206242de6c62758fc
|
[
"MIT"
] | 36
|
2016-01-12T11:33:56.000Z
|
2021-10-02T12:34:39.000Z
|
import os, IP2Location, sys, ipaddress
# database = IP2Location.IP2Location(os.path.join("data", "IPV6-COUNTRY.BIN"), "SHARED_MEMORY")
database = IP2Location.IP2Location(os.path.join("data", "IPV6-COUNTRY.BIN"))
try:
ip = sys.argv[1]
if ip == '' :
print ('You cannot enter an empty IP address.')
sys.exit(1)
else:
try:
ipaddress.ip_address(ip)
except ValueError:
print ('Invalid IP address')
sys.exit(1)
rec = database.get_all(ip)
print (rec)
except IndexError:
print ("Please enter an IP address to continue.")
database.close()
| 25.04
| 95
| 0.618211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.353035
|
677a3f9b4fdf1b1623975d077e5ac1590631e821
| 1,927
|
py
|
Python
|
ADTs/ADT_of_staff.py
|
hitachinsk/DataStructure
|
91214dd56d9c0493458e8a36af27a46b0a2fdc03
|
[
"MIT"
] | null | null | null |
ADTs/ADT_of_staff.py
|
hitachinsk/DataStructure
|
91214dd56d9c0493458e8a36af27a46b0a2fdc03
|
[
"MIT"
] | null | null | null |
ADTs/ADT_of_staff.py
|
hitachinsk/DataStructure
|
91214dd56d9c0493458e8a36af27a46b0a2fdc03
|
[
"MIT"
] | null | null | null |
import ADT_of_person as AP
import datetime as dm
#ADT Staff()
# Staff(self, str name, str sex, tuple birthday, tuple entey_date, int salary, str position)
# name(self)
# sex(self)
# en_year(self)
# salary(self)
# set_salary(self, new_salary)
# position(self)
# set_position(self, new_position)
# birthday(self)
# detail(self)
class Staff(AP.Person):
_id_num = 0
@classmethod
def _id_gen(cls, birthday):
cls._id_num += 1
birth_year = dm.date(*birthday).year
return '0{:04}{:05}'.format(birth_year, cls._id_num)
def __init__(self, name, sex, birthday, entry_date, salary, position):
if not isinstance(name, str) or sex not in ('male', 'female') or\
not isinstance(salary, int):
raise AP.PersonValueError()
try:
birth = dm.date(*birthday)
entry = dm.date(*enter_date)
except:
raise AP.PersonValueError()
self._name = name
self._sex = sex
self._birthday = birth
self._entry_date = entry
self._position = position
self._salary = salary
def name(self):
return self._name
def sex(self):
return self._sex
def en_year(self):
return self._entry_date.year
def set_salary(self, new_salary):
if not isinstance(new_salary, int):
raise TypeError
self._salaey = new_salary
def position(self):
return self._position
def set_position(self, new_position):
self._position = new_position
def birthday(self):
return self._birthday
def detail(self):
return ','.join((super.detail(),
'entry_date' + str(self._entry_date),
'position' + str(self._position),
'salary' + str(self._salary)))
| 28.338235
| 95
| 0.570317
| 1,562
| 0.810586
| 0
| 0
| 179
| 0.092891
| 0
| 0
| 357
| 0.185262
|
677a7514628e1106435199d272ca3cc1956ae53f
| 5,734
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/tests/completion_integration/test_handlers.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/tests/completion_integration/test_handlers.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/tests/completion_integration/test_handlers.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Test signal handlers for completion.
"""
from datetime import datetime
from unittest.mock import patch
import ddt
import pytest
from completion import handlers
from completion.models import BlockCompletion
from completion.test_utils import CompletionSetUpMixin
from django.test import TestCase
from pytz import utc
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from lms.djangoapps.grades.api import signals as grades_signals
from openedx.core.djangolib.testing.utils import skip_unless_lms
class CustomScorableBlock(XBlock):
"""
A scorable block with a custom completion strategy.
"""
has_score = True
has_custom_completion = True
completion_mode = XBlockCompletionMode.COMPLETABLE
class ExcludedScorableBlock(XBlock):
"""
A scorable block that is excluded from completion tracking.
"""
has_score = True
has_custom_completion = False
completion_mode = XBlockCompletionMode.EXCLUDED
@ddt.ddt
@skip_unless_lms
class ScorableCompletionHandlerTestCase(CompletionSetUpMixin, TestCase):
"""
Test the signal handler
"""
COMPLETION_SWITCH_ENABLED = True
def setUp(self):
super().setUp()
self.block_key = self.context_key.make_usage_key(block_type='problem', block_id='red')
def call_scorable_block_completion_handler(self, block_key, score_deleted=None):
"""
Call the scorable completion signal handler for the specified block.
Optionally takes a value to pass as score_deleted.
"""
if score_deleted is None:
params = {}
else:
params = {'score_deleted': score_deleted}
handlers.scorable_block_completion(
sender=self,
user_id=self.user.id,
course_id=str(self.context_key),
usage_id=str(block_key),
weighted_earned=0.0,
weighted_possible=3.0,
modified=datetime.utcnow().replace(tzinfo=utc),
score_db_table='submissions',
**params
)
@ddt.data(
(True, 0.0),
(False, 1.0),
(None, 1.0),
)
@ddt.unpack
def test_handler_submits_completion(self, score_deleted, expected_completion):
self.call_scorable_block_completion_handler(self.block_key, score_deleted)
completion = BlockCompletion.objects.get(
user=self.user,
context_key=self.context_key,
block_key=self.block_key,
)
assert completion.completion == expected_completion
@XBlock.register_temp_plugin(CustomScorableBlock, 'custom_scorable')
def test_handler_skips_custom_block(self):
custom_block_key = self.context_key.make_usage_key(block_type='custom_scorable', block_id='green')
self.call_scorable_block_completion_handler(custom_block_key)
completion = BlockCompletion.objects.filter(
user=self.user,
context_key=self.context_key,
block_key=custom_block_key,
)
assert not completion.exists()
@XBlock.register_temp_plugin(ExcludedScorableBlock, 'excluded_scorable')
def test_handler_skips_excluded_block(self):
excluded_block_key = self.context_key.make_usage_key(block_type='excluded_scorable', block_id='blue')
self.call_scorable_block_completion_handler(excluded_block_key)
completion = BlockCompletion.objects.filter(
user=self.user,
context_key=self.context_key,
block_key=excluded_block_key,
)
assert not completion.exists()
def test_handler_skips_discussion_block(self):
discussion_block_key = self.context_key.make_usage_key(block_type='discussion', block_id='blue')
self.call_scorable_block_completion_handler(discussion_block_key)
completion = BlockCompletion.objects.filter(
user=self.user,
context_key=self.context_key,
block_key=discussion_block_key,
)
assert not completion.exists()
def test_signal_calls_handler(self):
with patch('completion.handlers.BlockCompletion.objects.submit_completion') as mock_handler:
grades_signals.PROBLEM_WEIGHTED_SCORE_CHANGED.send_robust(
sender=self,
user_id=self.user.id,
course_id=str(self.context_key),
usage_id=str(self.block_key),
weighted_earned=0.0,
weighted_possible=3.0,
modified=datetime.utcnow().replace(tzinfo=utc),
score_db_table='submissions',
)
mock_handler.assert_called()
@skip_unless_lms
class DisabledCompletionHandlerTestCase(CompletionSetUpMixin, TestCase):
"""
Test that disabling the ENABLE_COMPLETION_TRACKING waffle switch prevents
the signal handler from submitting a completion.
"""
COMPLETION_SWITCH_ENABLED = False
def setUp(self):
super().setUp()
self.block_key = self.context_key.make_usage_key(block_type='problem', block_id='red')
def test_disabled_handler_does_not_submit_completion(self):
handlers.scorable_block_completion(
sender=self,
user_id=self.user.id,
course_id=str(self.context_key),
usage_id=str(self.block_key),
weighted_earned=0.0,
weighted_possible=3.0,
modified=datetime.utcnow().replace(tzinfo=utc),
score_db_table='submissions',
)
with pytest.raises(BlockCompletion.DoesNotExist):
BlockCompletion.objects.get(
user=self.user,
context_key=self.context_key,
block_key=self.block_key
)
| 34.751515
| 109
| 0.678758
| 5,145
| 0.897279
| 0
| 0
| 4,750
| 0.828392
| 0
| 0
| 767
| 0.133764
|
677b8b180da6f57636a31d49b5e83be1a6466cab
| 907
|
py
|
Python
|
objects/moving_wall.py
|
krzysztofarendt/ballroom
|
7e99d14278e71be873edaf415e7253e87bc81724
|
[
"MIT"
] | null | null | null |
objects/moving_wall.py
|
krzysztofarendt/ballroom
|
7e99d14278e71be873edaf415e7253e87bc81724
|
[
"MIT"
] | 1
|
2020-04-05T16:46:16.000Z
|
2020-04-05T16:46:16.000Z
|
objects/moving_wall.py
|
krzysztofarendt/ballroom
|
7e99d14278e71be873edaf415e7253e87bc81724
|
[
"MIT"
] | null | null | null |
from typing import Tuple
import pygame
import numpy as np
from .wall import Wall
class MovingWall(Wall):
def __init__(self,
top: int = 0,
left: int = 0,
bottom: int = 1,
right: int = 1):
super().__init__(top, left, bottom, right)
def update(self, left, top, right, bottom):
# Change size -> new surface
width = right - left
height = bottom - top
self.surf = pygame.Surface((width, height)).convert_alpha()
self.surf.fill((0, 255, 0, 90))
# Mask used for collision detection
self.mask = pygame.mask.from_surface(self.surf, 50)
# Rectangle and initial position
self.rect = self.surf.get_rect()
# New rectangle to fit in
new_rect = pygame.Rect(left, top, width, height)
# Fit
self.rect = self.rect.fit(new_rect)
| 25.194444
| 67
| 0.566703
| 821
| 0.905182
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.137817
|
677b969f256bb511f2d6671783f23985dd593352
| 1,962
|
py
|
Python
|
src/example/4.Color_sensor/color_sensor.light_up.py
|
rundhall/ESP-LEGO-SPIKE-Simulator
|
dc83b895ff2aac5cf2fe576d0ba98426fea60827
|
[
"MIT"
] | null | null | null |
src/example/4.Color_sensor/color_sensor.light_up.py
|
rundhall/ESP-LEGO-SPIKE-Simulator
|
dc83b895ff2aac5cf2fe576d0ba98426fea60827
|
[
"MIT"
] | null | null | null |
src/example/4.Color_sensor/color_sensor.light_up.py
|
rundhall/ESP-LEGO-SPIKE-Simulator
|
dc83b895ff2aac5cf2fe576d0ba98426fea60827
|
[
"MIT"
] | null | null | null |
light_up(light_1, light_2, light_3)
Sets the brightness of the individual lights on the Color Sensor.
This causes the Color Sensor to change modes, which can affect your program in unexpected ways. For example, the Color Sensor can't read colors when it's in light up mode.
Parameters
light_1
The desired brightness of light 1.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
light_2
The desired brightness of light 2.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
light_3
The desired brightness of light 3.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
Errors
TypeError
light_1, light_2, or light_3 is not an integer.
RuntimeError
The sensor has been disconnected from the Port.
Example
light_up(light_1, light_2, light_3)
Sets the brightness of the individual lights on the Color Sensor.
This causes the Color Sensor to change modes, which can affect your program in unexpected ways. For example, the Color Sensor can't read colors when it's in light up mode.
Parameters
light_1
The desired brightness of light 1.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
light_2
The desired brightness of light 2.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
light_3
The desired brightness of light 3.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
Errors
TypeError
light_1, light_2, or light_3 is not an integer.
RuntimeError
The sensor has been disconnected from the Port.
Example
| 22.044944
| 171
| 0.761468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 94
| 0.04791
|
677c27e42ac69be12805363f7ae3e1fa6d495b1b
| 7,711
|
py
|
Python
|
utils.py
|
gbene/pydip
|
e16647c46611f597910a10651b38cd62191a9eaf
|
[
"MIT"
] | null | null | null |
utils.py
|
gbene/pydip
|
e16647c46611f597910a10651b38cd62191a9eaf
|
[
"MIT"
] | null | null | null |
utils.py
|
gbene/pydip
|
e16647c46611f597910a10651b38cd62191a9eaf
|
[
"MIT"
] | null | null | null |
'''
Script by: Gabriele Bendetti
date: 25/06/2021
Utilities functions. This file is used to have a more organized main script. It contains:
+ Random plane orientation generator that can be used to practice plane attitude interpretation
+ Random fold generator
+ Plotter
+ Data converter from pandas dataframe to dict following the format used in plane_plot
'''
import numpy as np
import matplotlib.pyplot as plt
import mplstereonet
import obspy.imaging.beachball as bb
import mplstereonet as mpl
def random_plane_gen(sets=1, n_planes=1):
r_dipdir = np.random.randint(0,361,sets) #random dipdir
r_dip = np.random.randint(0,91,sets) #random dip
r_std = np.random.randint(5,20,sets) #random std
planes_dict = {x:{'dd':0,'d':0} for x in range(sets)}
for nset,dd,d,std in zip(list(range(sets)),r_dipdir,r_dip,r_std):
planes_dict[nset]['s'] = np.abs(np.round(np.random.normal(dd,std,n_planes),2))
planes_dict[nset]['d'] = np.abs(np.round(np.random.normal(d,std,n_planes),2))
#print(f'set {nset}:{(dd+90)%360}/{d}')
#print(planes_dict)
return planes_dict
def random_folds_gen(sets=1,n_planes=1):
def axial_plane_finder(profile_plane,limb_s,limb_d):
'''
The axial plane can be defined in the pi plot by calculating the bisector of the interlimb angle. The bisector is the pole of the axial plane. To find the bisector position (as a pole we can calculate the angle between the two poles and compare it with the interlimb angle. The angle value can be added to the pole2_index to find the index of the bisector pole.
caveat: this method needs to have prior knowledge of the approximate interlimb angle.
'''
angle = int(np.degrees(mpl.angular_distance(mpl.pole(limb_s[0],limb_d[0]),mpl.pole(limb_s[1],limb_d[1]))))
if not np.isclose(angle,i,atol=5):
angle = int(180-np.degrees(mpl.angular_distance(mpl.pole(limb_s[0],limb_d[0]),mpl.pole(limb_s[1],limb_d[1]))))
bisector_s,bisector_d = s[int(pole2_index+(angle/2))],d[int(pole2_index+(angle/2))]
return bisector_s,bisector_d
'''
The axial plane inclination and hinge line plunge are correlated by the fleuty diagram.
This is a triangle with ax_angle as the base and plunge as the height. By choosing at random an axial plane inclination the plunge of the hinge line can be calculated by
h = b*tan(alpha)
where alpha is the inclination angle of the hypothenuse in respect to the horizontal.
If alpha is 0 --> the fold is horizontal
If alpha is 45 --> the fold is reclined (plunge=ax_inclination)
'''
r_ax_angle = np.random.uniform(0,1,sets)*90
alpha = np.deg2rad(np.random.randint(0,46,sets))
plunge = r_ax_angle*np.tan(alpha)
r_fold_profile_dd = np.random.randint(0,361,sets) #dip direction of the profile plane
r_fold_profile_s = (r_fold_profile_dd-90)%360
r_fold_profile_d = np.abs(plunge-90) #dip of the profile plane. This implicitly controls the plunge of the hinge line
'''
To plot the position of a pole we can use the functions mpl.plane and geographic2pole. The first funcion gives an array of values rappresenting points coordinate (lat, lon) of the profile plane (the angular resolution is given by the [segment] parameter). After obtaining the array the (lat, lon) values are converted to trend and plunge with the function mpl.geographic2pole giving all the possibile poles lying ont he profile plane.
With the list of possible poles is just a matter of obtaining the array indicies.
The first step (seg) is to determine the index based only on the interlimb angle. This is calculated by interlimb/2 (because the interlimb is the angle between the two poles or planes). Because of this in the index for the second plane seg is negative (symmetrical).
The second step (seg_rot) is to modify the index based on the inclination of the axial plane. This is calculated by subtracting 90° to the ax_angle value (this is because the ax_inclination in the fleuty diagram is calculated from the vertical).
These are the values that determine the indicies (pole1_index and pole2_index) for the generator limb poles.
'''
r_i = np.random.randint(0,181,sets) # interlimb angle
r_std = np.random.randint(5,15,sets)
folds_dict = {x:{'s':0,'d':0,'axial_s':0,'axial_d':0} for x in range(sets)}
for nset,i,ax_angle,fold_profile_s,fold_profile_d,std in zip(list(range(sets)),r_i,r_ax_angle,r_fold_profile_s, r_fold_profile_d, r_std):
seg = int((i/2)) #segment that represent the fold limb given an angle from the center
seg_rot = int(90-ax_angle) # quantity to add to the segment to rotate the limbs given an axial plane inclination
pole1_index = seg+seg_rot
pole2_index = (-seg-1)+seg_rot
lon,lat = mpl.plane(fold_profile_s,fold_profile_d,segments=181)
s,d= mpl.geographic2pole(lon,lat)
fold_limb_s,fold_limb_d = [*s[pole1_index],*s[pole2_index]],[*d[pole1_index],*d[pole2_index]]
bisector_s,bisector_d = axial_plane_finder((fold_profile_s,fold_profile_d),fold_limb_s,fold_limb_d) #this is the axial plane
ndist_limb_dd = np.array([np.random.normal(fold_limb_s[0],std,n_planes),np.random.normal(fold_limb_s[1],std,n_planes)])
ndist_limb_d = np.array([np.random.normal(fold_limb_d[0],std,n_planes),np.random.normal(fold_limb_d[1],std,n_planes)])
folds_dict[nset]['s'] = np.abs(np.round(ndist_limb_dd,2))
folds_dict[nset]['d'] = np.abs(np.round(ndist_limb_d,2))
folds_dict[nset]['axial_s'] = bisector_s
folds_dict[nset]['axial_d'] = bisector_d
'''
By defining the generator limbs we can create two sets of measures with random normal distribuition to give a more "natural" feel to the dataset.
'''
return folds_dict
def plane_plot(self,planes_dict,show_planes=1,show_poles=0,show_axial=0,show_hinge=0):
set_color = ['r','g','b','k','m','c'] #plot with different colors depending on the set (0:red, 1:green, ...)
self.plot.fig.clf()
self.plot.ax = self.plot.fig.add_subplot(111, projection='stereonet')
for sets in planes_dict:
for i in range(len(planes_dict[sets]['s'])):
if show_planes:
self.plot.ax.plane(planes_dict[sets]['s'][i],planes_dict[sets]['d'][i],f'{set_color[sets]}-')
if show_poles:
self.plot.ax.pole(planes_dict[sets]['s'][i],planes_dict[sets]['d'][i],f'{set_color[sets]}o')
if show_axial:
self.plot.ax.plane(planes_dict[sets]['axial_s'],
planes_dict[sets]['axial_d'],f'{set_color[sets]}--')
if show_hinge:
self.plot.ax.pole(planes_dict[sets]['axial_s'],
planes_dict[sets]['axial_d'],f'{set_color[sets]}x')
self.plot.ax.grid()
self.plot.draw()
def random_focal_plot(self,color='k'):
self.plot.fig.clf()
for axis in [221,222,223,224]:
self.plot.ax = self.plot.fig.add_subplot(axis, aspect='equal')
self.plot.ax.axison = False
s = np.random.randint(360)
d = np.random.randint(90)
r = np.random.randint(-90,181)
beach = bb.beach([s,d,r],size=100,facecolor='k',linewidth=1)
self.plot.ax.add_collection(beach)
self.plot.ax.autoscale_view(tight=False, scalex=True, scaley=True)
self.plot.draw()
# Convert CSV in dictionary with valid format such as {nset:{dd:[..],d:[..]},..}
def csv_convert(imported_path):
import pandas as pd
imported_data = pd.read_csv(imported_path)
sets = imported_data['Set'].nunique() #take unique set values in dataframe
nrows = len(imported_data.index)
planes_dict = {x:{'dd':[],'d':[]} for x in range(sets)} #NOTE: not very good solution because it doesn't care about the file data. x always starts from 0.
#print(planes_dict)
for _,v in imported_data.iterrows():
dd,d,s = v.values
#print(dd,d,s)
planes_dict[s]['dd'].append(dd)
planes_dict[s]['d'].append(d)
return planes_dict, nrows
| 36.372642
| 436
| 0.727143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,615
| 0.46875
|
677ca1e5c9f7d3101dacf177a4ff6c8f860424e0
| 3,574
|
py
|
Python
|
debug/free_transition_vi_lofar_dr2_realdata.py
|
Joshuaalbert/bayes_filter
|
2997d60d8cf07f875e42c0b5f07944e9ab7e9d33
|
[
"Apache-2.0"
] | null | null | null |
debug/free_transition_vi_lofar_dr2_realdata.py
|
Joshuaalbert/bayes_filter
|
2997d60d8cf07f875e42c0b5f07944e9ab7e9d33
|
[
"Apache-2.0"
] | 3
|
2019-02-21T16:00:53.000Z
|
2020-03-31T01:33:00.000Z
|
debug/free_transition_vi_lofar_dr2_realdata.py
|
Joshuaalbert/bayes_filter
|
2997d60d8cf07f875e42c0b5f07944e9ab7e9d33
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import os
from bayes_filter import logging
from bayes_filter.filters import FreeTransitionVariationalBayes
from bayes_filter.feeds import DatapackFeed, IndexFeed
from bayes_filter.misc import make_example_datapack, maybe_create_posterior_solsets, get_screen_directions
from bayes_filter.datapack import DataPack, _load_array_file
import numpy as np
if __name__ == '__main__':
output_folder = os.path.join(os.path.abspath('test_filter_vi_P126+65'), 'run15')
os.makedirs(output_folder, exist_ok=True)
# datapack = make_example_datapack(5, 10, 2, name=os.path.join(output_folder, 'test_data.h5'), gain_noise=0.3,
# index_n=1, obs_type='DTEC', clobber=True,
# kernel_hyperparams={'variance': 3.5 ** 2, 'lengthscales': 15., 'a': 250.,
# 'b': 100., 'timescale': 50.})
datapack = DataPack('/net/lofar1/data1/albert/imaging/data/P126+65_compact_raw/P126+65_full_compact_raw.h5')
datapack.current_solset = 'sol000'
actual_antenna_labels, _ = datapack.antennas
antenna_labels, antennas = _load_array_file(DataPack.lofar_array)
antennas = np.stack([antennas[list(antenna_labels).index(a.astype(antenna_labels.dtype)),:] for a in actual_antenna_labels],axis=0)
datapack.set_antennas(antenna_labels, antennas)
patch_names, _ = datapack.directions
_, screen_directions = datapack.get_directions(patch_names)
screen_directions = get_screen_directions('/home/albert/ftp/image.pybdsm.srl.fits', max_N=None)
maybe_create_posterior_solsets(datapack, 'sol000', posterior_name='posterior', screen_directions=screen_directions)
# config = tf.ConfigProto(allow_soft_placement = True)
sess = tf.Session(graph=tf.Graph())#,config=config)
# from tensorflow.python import debug as tf_debug
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
with sess:
with tf.device('/device:CPU:0'):
logging.info("Setting up the index and datapack feeds.")
datapack_feed = DatapackFeed(datapack,
selection={'ant': list(range(1,7,2)) + list(range(45, 62, 1)),'dir':None, 'pol':slice(0,1,1), 'time':slice(0,None,1)},
solset='sol000',
postieror_name='posterior',
index_n=1)
logging.info("Setting up the filter.")
free_transition = FreeTransitionVariationalBayes(datapack_feed=datapack_feed, output_folder=output_folder)
free_transition.init_filter()
filter_op = free_transition.filter(
parallel_iterations=10,
kernel_params={'resolution': 4, 'fed_kernel': 'M52', 'obs_type': 'DTEC'},
num_parallel_filters=10,
solver_params=dict(iters=200,
learning_rate=0.1,
gamma=0.3,
stop_patience=6),
num_mcmc_param_samples_learn=50,
num_mcmc_param_samples_infer=100,
minibatch_size=None,
y_sigma=0.1)
logging.info("Initializing the filter")
sess.run(free_transition.initializer)
# print(sess.run([free_transition.full_block_size, free_transition.datapack_feed.time_feed.slice_size, free_transition.datapack_feed.index_feed.step]))
logging.info("Running the filter")
sess.run(filter_op)
| 56.730159
| 159
| 0.640179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,105
| 0.309177
|
677d0d25d6f511de2789f723ba24d4b56d61d93f
| 13,237
|
py
|
Python
|
train.py
|
Aoi-hosizora/NER-BiLSTM-CRF-Affix-PyTorch
|
2ab7f218c11854f75b3fbb626f257672baaf7572
|
[
"MIT"
] | null | null | null |
train.py
|
Aoi-hosizora/NER-BiLSTM-CRF-Affix-PyTorch
|
2ab7f218c11854f75b3fbb626f257672baaf7572
|
[
"MIT"
] | null | null | null |
train.py
|
Aoi-hosizora/NER-BiLSTM-CRF-Affix-PyTorch
|
2ab7f218c11854f75b3fbb626f257672baaf7572
|
[
"MIT"
] | null | null | null |
import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
import pickle
import time
import torch
from torch import optim
from typing import Tuple, List, Dict
import dataset
from model import BiLSTM_CRF
import utils
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_train', type=str, default='./data/eng.train')
parser.add_argument('--dataset_val', type=str, default='./data/eng.testa')
parser.add_argument('--dataset_test', type=str, default='./data/eng.testb')
parser.add_argument('--pretrained_glove', type=str, default='./data/glove.6B.100d.txt')
parser.add_argument('--output_mapping', type=str, default='./output/mapping.pkl')
parser.add_argument('--output_affix_list', type=str, default='./output/affix_list.json')
parser.add_argument('--use_crf', type=int, default=1)
parser.add_argument('--add_cap_feature', type=int, default=1)
parser.add_argument('--add_affix_feature', type=int, default=1)
parser.add_argument('--use_gpu', type=int, default=1)
parser.add_argument('--model_path', type=str, default='./model')
parser.add_argument('--graph_path', type=str, default='./output')
parser.add_argument('--eval_path', type=str, default='./evaluate/temp')
parser.add_argument('--eval_script', type=str, default='./evaluate/conlleval.pl')
args = parser.parse_args()
args.use_crf = args.use_crf != 0
args.add_cap_feature = args.add_cap_feature != 0
args.add_affix_feature = args.add_affix_feature != 0
args.use_gpu = args.use_gpu != 0
args.use_gpu = args.use_gpu and torch.cuda.is_available()
return args
def load_datasets(train_path: str, val_path: str, test_path: str, pretrained_glove: str, output_mapping: str, output_affix_list: str):
train_sentences = dataset.load_sentences(train_path)[:14000]
val_sentences = dataset.load_sentences(val_path)[:1500] # <<<
test_sentences = dataset.load_sentences(test_path)[-1500:] # <<<
dico_words, _, _ = dataset.word_mapping(train_sentences)
_, char_to_id, _ = dataset.char_mapping(train_sentences)
_, tag_to_id, id_to_tag = dataset.tag_mapping(train_sentences)
_, word_to_id, _, word_embedding = dataset.load_pretrained_embedding(dico_words.copy(), pretrained_glove, word_dim=100)
train_data = dataset.prepare_dataset(train_sentences, word_to_id, char_to_id, tag_to_id)
val_data = dataset.prepare_dataset(val_sentences, word_to_id, char_to_id, tag_to_id)
test_data = dataset.prepare_dataset(test_sentences, word_to_id, char_to_id, tag_to_id)
prefix_dicts, suffix_dicts = dataset.add_affix_to_datasets(train_data, val_data, test_data)
with open(output_mapping, 'wb') as f:
mappings = {'word_to_id': word_to_id, 'tag_to_id': tag_to_id, 'char_to_id': char_to_id, 'word_embedding': word_embedding}
pickle.dump(mappings, f)
with open(output_affix_list, 'w') as f:
json.dump([prefix_dicts, suffix_dicts], f, indent=2)
print('Datasets status:')
print('#train_data: {} / #val_data: {} / #test_data: {}'.format(len(train_data), len(val_data), len(test_data)))
print('#word_to_id: {}, #char_to_id: {}, #tag_to_id: {}, #prefix_dicts: {}, #suffix_dicts: {}, '.format(len(word_to_id), len(char_to_id), len(tag_to_id), len(prefix_dicts), len(suffix_dicts)))
print('#prefixes_2/3/4: [{}, {}, {}], #suffixes_2/3/4: [{}, {}, {}]'.format(len(prefix_dicts[1]), len(prefix_dicts[2]), len(prefix_dicts[3]), len(suffix_dicts[1]), len(suffix_dicts[2]), len(suffix_dicts[3])))
return (train_data, val_data, test_data), (word_to_id, char_to_id, tag_to_id, id_to_tag), word_embedding, (prefix_dicts, suffix_dicts)
def train(model: BiLSTM_CRF, device: str, train_data: List[dataset.Data], val_data: List[dataset.Data], test_data: List[dataset.Data], model_path: str, graph_path: str, **kwargs):
start_timestamp = time.time()
lr = 0.015
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
total_loss_log = 0
total_loss_plot = 0
losses_plot, accuracies_plots, f1scores_plots = [], [[], []], [[], []]
train_count = 0
epochs = 10
batches = len(train_data)
log_every = 100
save_every = int(batches / 2)
plot_every = 100
eval_every = 700
print('\nStart training, totally {} epochs, {} batches...'.format(epochs, batches))
for epoch in range(0, epochs):
for batch, index in enumerate(np.random.permutation(batches)):
model.train()
train_count += 1
data = train_data[index]
words_in = torch.LongTensor(data.words).to(device)
chars_mask = torch.LongTensor(data.chars_mask).to(device)
chars_length = data.chars_length
chars_d = data.chars_d
caps = torch.LongTensor(data.caps).to(device)
tags = torch.LongTensor(data.tags).to(device)
words_prefixes = torch.LongTensor(data.words_prefix_ids).to(device)
words_suffixes = torch.LongTensor(data.words_suffix_ids).to(device)
feats = model(words_in=words_in, chars_mask=chars_mask, chars_length=chars_length, chars_d=chars_d, caps=caps, words_prefixes=words_prefixes, words_suffixes=words_suffixes)
loss = model.calc_loss(feats, tags) / len(data.words)
total_loss_log += loss.item()
total_loss_plot += loss.item()
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
if train_count % log_every == 0:
avg_loss_log = total_loss_log / log_every
total_loss_log = 0
print('{} Epoch: {}/{}, batch: {}/{}, train loss: {:.4f}, time: {}'.format(
utils.time_string(), epoch + 1, epochs, batch + 1, batches, avg_loss_log, utils.time_since(start_timestamp, (epoch * batches + batch) / (epochs * batches))))
if train_count % plot_every == 0:
avg_loss_plot = total_loss_plot / plot_every
total_loss_plot = 0
losses_plot.append(avg_loss_plot)
if (train_count % save_every == 0) or (batch == batches - 1 and epoch == epochs - 1):
torch.save(model, '{}/savepoint_epoch{}_batch{}.pth'.format(model_path, epochs + 1, batches + 1))
if train_count % eval_every == 0:
print('\n{} Evaluating on validating dataset (epoch {}/{}, batch {}/{})...'.format(utils.time_string(), epoch + 1, epochs, batch + 1, batches))
acc1, _, _, f1_score1 = evaluate(model=model, device=device, dataset=val_data, **kwargs)
print('\n{} Evaluating on testing dataset (epoch {}/{}, batch {}/{})...'.format(utils.time_string(), epoch + 1, epochs, batch + 1, batches))
acc2, _, _, f1_score2 = evaluate(model=model, device=device, dataset=test_data, **kwargs)
accuracies_plots[0].append(acc1)
accuracies_plots[1].append(acc2)
f1scores_plots[0].append(f1_score1)
f1scores_plots[1].append(f1_score2)
print("\nContinue training...")
# end batch
# Referred from https://github.com/ZhixiuYe/NER-pytorch.
new_lr = lr / (1 + 0.05 * train_count / len(train_data))
utils.adjust_learning_rate(optimizer, lr=new_lr)
# end epoch
end_timestamp = time.time()
start_time_str = utils.time_string(start_timestamp)
end_time_str = utils.time_string(end_timestamp)
print('Start time: {}, end time: {}, totally spent time: {:d}min'.format(start_time_str, end_time_str, int((end_timestamp - start_timestamp) / 60)))
with open("{}/plots.log".format(graph_path), 'w') as f:
f.write("time: {}\n\n".format(end_time_str))
f.write("loss:\n[{}]\n\n".format(', '.join([str(i) for i in losses_plot])))
f.write("acc1:\n[{}]\n\n".format(', '.join([str(i) for i in accuracies_plots[0]])))
f.write("acc2:\n[{}]\n\n".format(', '.join([str(i) for i in accuracies_plots[1]])))
f.write("f1:\n[{}]\n\n".format(', '.join([str(i) for i in f1scores_plots[0]])))
f.write("f2:\n[{}]\n\n".format(', '.join([str(i) for i in f1scores_plots[1]])))
epochs = list(range(1, len(losses_plot) + 1))
plt.plot(epochs, losses_plot)
plt.legend(['Training'])
plt.xlabel('Index')
plt.ylabel('Loss')
plt.savefig('{}/loss.pdf'.format(graph_path))
plt.clf()
epochs = list(range(1, len(accuracies_plots[0]) + 1))
plt.plot(epochs, accuracies_plots[0], 'b')
plt.plot(epochs, accuracies_plots[1], 'r')
plt.legend(['eng.testa', 'eng.testb'])
plt.xlabel('Index')
plt.ylabel('Accuracy')
plt.savefig('{}/acc.pdf'.format(graph_path))
plt.clf()
epochs = list(range(1, len(f1scores_plots[0]) + 1))
plt.plot(epochs, f1scores_plots[0], 'b')
plt.plot(epochs, f1scores_plots[1], 'r')
plt.legend(['eng.testa', 'eng.testb'])
plt.xlabel('Index')
plt.ylabel('F1-score')
plt.savefig('{}/f1-score.pdf'.format(graph_path))
print("graphs have been saved to {}".format(graph_path))
def evaluate(model: BiLSTM_CRF, device: str, dataset: List[dataset.Data], tag_to_id: Dict[str, int], id_to_tag: Dict[int, str], eval_path: str, eval_script: str) -> Tuple[float, float, float, float]:
prediction = []
confusion_matrix = torch.zeros((len(tag_to_id) - 2, len(tag_to_id) - 2))
model.eval()
for data in dataset:
words_in = torch.LongTensor(data.words).to(device)
chars_mask = torch.LongTensor(data.chars_mask).to(device)
chars_length = data.chars_length
chars_d = data.chars_d
caps = torch.LongTensor(data.caps).to(device)
words_prefixes = torch.LongTensor(data.words_prefix_ids).to(device)
words_suffixes = torch.LongTensor(data.words_suffix_ids).to(device)
feats = model(words_in=words_in, chars_mask=chars_mask, chars_length=chars_length, chars_d=chars_d, caps=caps, words_prefixes=words_prefixes, words_suffixes=words_suffixes)
_, predicted_ids = model.decode_targets(feats)
for (word, true_id, pred_id) in zip(data.str_words, data.tags, predicted_ids):
line = ' '.join([word, id_to_tag[true_id], id_to_tag[pred_id]])
prediction.append(line)
confusion_matrix[true_id, pred_id] += 1
prediction.append('')
eval_lines, acc, pre, rec, f1 = utils.evaluate_by_perl_script(prediction=prediction, eval_path=eval_path, eval_script=eval_script)
print('Accuracy: {:.4f}, precision: {:.4f}, recall: {:.4f}, f1-score: {:.4f}'.format(acc, pre, rec, f1))
print('Detailed result:')
for i, line in enumerate(eval_lines):
print(line)
print('Confusion matrix:')
print(("{: >2}{: >9}{: >15}%s{: >9}" % ("{: >9}" * confusion_matrix.size(0))).format(
"ID", "NE", "Total",
*([id_to_tag[i] for i in range(confusion_matrix.size(0))] + ["Percent"])
))
for i in range(confusion_matrix.size(0)):
print(("{: >2}{: >9}{: >15}%s{: >9}" % ("{: >9}" * confusion_matrix.size(0))).format(
str(i), id_to_tag[i], str(confusion_matrix[i].sum()),
*([confusion_matrix[i][j] for j in range(confusion_matrix.size(0))] +
["%.3f" % (confusion_matrix[i][i] * 100. / max(1, confusion_matrix[i].sum()))])
))
return acc, pre, rec, f1
def main():
args = parse_args()
device = 'cuda' if args.use_gpu else 'cpu'
(train_data, val_data, test_data), (word_to_id, char_to_id, tag_to_id, id_to_tag), word_embedding, (prefix_dicts, suffix_dicts) = load_datasets(
train_path=args.dataset_train,
val_path=args.dataset_val,
test_path=args.dataset_test,
pretrained_glove=args.pretrained_glove,
output_mapping=args.output_mapping,
output_affix_list=args.output_affix_list,
)
model = BiLSTM_CRF(
vocab_size=len(word_to_id),
tag_to_id=tag_to_id,
pretrained_embedding=word_embedding,
word_embedding_dim=100,
char_count=len(char_to_id),
char_embedding_dim=50,
cap_feature_count=4,
cap_embedding_dim=10,
prefix_counts=[len(prefix_dicts[1]) + 1, len(prefix_dicts[2]) + 1, len(prefix_dicts[3]) + 1],
suffix_counts=[len(suffix_dicts[1]) + 1, len(suffix_dicts[2]) + 1, len(suffix_dicts[3]) + 1],
prefix_embedding_dims=[16, 16, 16],
suffix_embedding_dims=[16, 16, 16],
char_lstm_hidden_size=25,
output_lstm_hidden_size=200,
dropout_p=0.5,
device=device,
use_crf=args.use_crf,
add_cap_feature=args.add_cap_feature,
add_affix_feature=args.add_affix_feature,
)
model.to(device)
train(
model=model,
device=device,
train_data=train_data,
val_data=val_data,
test_data=test_data,
model_path=args.model_path,
graph_path=args.graph_path,
**{
'tag_to_id': tag_to_id,
'id_to_tag': id_to_tag,
'eval_path': args.eval_path,
'eval_script': args.eval_script,
},
)
if __name__ == '__main__':
main()
| 46.939716
| 212
| 0.64637
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,746
| 0.131903
|
677d56032178efeb016755dc92a217e0030b9013
| 926
|
py
|
Python
|
utils/exceptions.py
|
acatiadroid/util-bot
|
2a91aa4335c4a844f5335d70cb7c7c32dd8010be
|
[
"MIT"
] | 1
|
2021-06-02T18:59:34.000Z
|
2021-06-02T18:59:34.000Z
|
utils/exceptions.py
|
acatiadroid/util-bot
|
2a91aa4335c4a844f5335d70cb7c7c32dd8010be
|
[
"MIT"
] | null | null | null |
utils/exceptions.py
|
acatiadroid/util-bot
|
2a91aa4335c4a844f5335d70cb7c7c32dd8010be
|
[
"MIT"
] | 1
|
2021-05-22T19:53:43.000Z
|
2021-05-22T19:53:43.000Z
|
from pymongo.errors import PyMongoError
class IdNotFound(PyMongoError):
"""Raised when _id was not found in the database collection."""
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = self.__doc__
def __str__(self):
return self.message
class plural:
def __init__(self, value):
self.value = value
def __format__(self, format_spec):
v = self.value
singular, sep, plural = format_spec.partition('|')
plural = plural or f'{singular}s'
if abs(v) != 1:
return f'{v} {plural}'
return f'{v} {singular}'
def human_join(seq, delim=', ', final='or'):
size = len(seq)
if size == 0:
return ''
if size == 1:
return seq[0]
if size == 2:
return f'{seq[0]} {final} {seq[1]}'
return delim.join(seq[:-1]) + f' {final} {seq[-1]}'
| 22.047619
| 67
| 0.552916
| 616
| 0.665227
| 0
| 0
| 0
| 0
| 0
| 0
| 171
| 0.184665
|
677f07bacda33862018d0c3f5ae887b33c4fb2d4
| 45,205
|
py
|
Python
|
envs/flatland/utils/gym_env_wrappers.py
|
netceteragroup/Flatland-Challenge
|
4292e8aa778d264d025ad6d32926840864b22a21
|
[
"MIT"
] | 4
|
2021-01-15T10:49:33.000Z
|
2021-12-31T08:11:35.000Z
|
envs/flatland/utils/gym_env_wrappers.py
|
netceteragroup/Flatland-Challenge
|
4292e8aa778d264d025ad6d32926840864b22a21
|
[
"MIT"
] | null | null | null |
envs/flatland/utils/gym_env_wrappers.py
|
netceteragroup/Flatland-Challenge
|
4292e8aa778d264d025ad6d32926840864b22a21
|
[
"MIT"
] | null | null | null |
from typing import Dict, Any, Optional, List
import gym
import numpy as np
from collections import defaultdict
from flatland.core.grid.grid4_utils import get_new_position
from flatland.envs.agent_utils import EnvAgent, RailAgentStatus
from flatland.envs.rail_env import RailEnv, RailEnvActions
from envs.flatland.observations.segment_graph import Graph
from envs.flatland.utils.gym_env import StepOutput
def available_actions(env: RailEnv, agent: EnvAgent, allow_noop=False) -> List[int]:
if agent.position is None:
return [0, 1, 0, 1]
else:
possible_transitions = env.rail.get_transitions(*agent.position, agent.direction)
# some actions are always available:
available_acts = [0] * len(RailEnvActions)
available_acts[RailEnvActions.MOVE_FORWARD] = 1
available_acts[RailEnvActions.STOP_MOVING] = 1
if allow_noop:
available_acts[RailEnvActions.DO_NOTHING] = 1
# check if turn left/right are available:
for movement in range(4):
if possible_transitions[movement]:
if movement == (agent.direction + 1) % 4:
available_acts[RailEnvActions.MOVE_RIGHT] = 1
elif movement == (agent.direction - 1) % 4:
available_acts[RailEnvActions.MOVE_LEFT] = 1
return available_acts[1:]
def potential_deadlock_action_masking(env: RailEnv, potential_deadlock: List) -> List[int]:
avaliable_actions = [0, 0, 0, 1]
avaliable_actions[0] = 0 if potential_deadlock[0] != 1 and potential_deadlock[0] != -1 else 1
avaliable_actions[1] = 0 if potential_deadlock[1] != 1 and potential_deadlock[1] != -1 else 1
avaliable_actions[2] = 0 if potential_deadlock[2] != 1 and potential_deadlock[2] != -1 else 1
return avaliable_actions
def priority_dist_action_masking(dist_ind, priority) -> List[int]:
available_actions = [0, 0, 0, 0]
if priority == 0:
return [0, 0, 0, 1]
else:
available_actions[dist_ind] = 1
return available_actions
class AvailableActionsWrapper(gym.Wrapper):
def __init__(self, env, allow_noop=False, potential_deadlock_masking=False) -> None:
super().__init__(env)
self._allow_noop = allow_noop
self._potential_deadlock_masking = potential_deadlock_masking
self.observation_space = gym.spaces.Dict({
'obs': self.env.observation_space,
'available_actions': gym.spaces.Box(low=0, high=1, shape=(self.action_space.n,), dtype=np.int32)
})
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
obs, reward, done, info = self.env.step(action_dict)
return StepOutput(self._transform_obs(obs), reward, done, info)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
return self._transform_obs(self.env.reset(random_seed))
def _transform_obs(self, obs):
rail_env = self.unwrapped.rail_env
if not self._potential_deadlock_masking:
return {
agent_id: {
'obs': agent_obs,
'available_actions': np.asarray(
available_actions(rail_env, rail_env.agents[agent_id], self._allow_noop))
} for agent_id, agent_obs in obs.items()
}
else:
return {
agent_id: {
'obs': agent_obs,
'available_actions': np.asarray(
priority_dist_action_masking(agent_obs[0], agent_obs[1]))
} for agent_id, agent_obs in obs.items()
}
def find_all_cells_where_agent_can_choose(rail_env: RailEnv):
switches = []
switches_neighbors = []
directions = list(range(4))
for h in range(rail_env.height):
for w in range(rail_env.width):
pos = (w, h)
is_switch = False
# Check for switch: if there is more than one outgoing transition
for orientation in directions:
possible_transitions = rail_env.rail.get_transitions(*pos, orientation)
num_transitions = np.count_nonzero(possible_transitions)
if num_transitions > 1:
switches.append(pos)
is_switch = True
break
if is_switch:
# Add all neighbouring rails, if pos is a switch
for orientation in directions:
possible_transitions = rail_env.rail.get_transitions(*pos, orientation)
for movement in directions:
if possible_transitions[movement]:
switches_neighbors.append(get_new_position(pos, movement))
decision_cells = switches + switches_neighbors
return tuple(map(set, (switches, switches_neighbors, decision_cells)))
class SkipNoChoiceCellsWrapper(gym.Wrapper):
def __init__(self, env, accumulate_skipped_rewards: bool, discounting: float) -> None:
super().__init__(env)
self._switches = None
self._switches_neighbors = None
self._decision_cells = None
self._accumulate_skipped_rewards = accumulate_skipped_rewards
self._discounting = discounting
self._skipped_rewards = defaultdict(list)
def _on_decision_cell(self, agent: EnvAgent):
return agent.position is None \
or agent.position == agent.initial_position \
or agent.position in self._decision_cells
def _on_switch(self, agent: EnvAgent):
return agent.position in self._switches
def _next_to_switch(self, agent: EnvAgent):
return agent.position in self._switches_neighbors
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
o, r, d, i = {}, {}, {}, {}
while len(o) == 0:
obs, reward, done, info = self.env.step(action_dict)
for agent_id, agent_obs in obs.items():
if done[agent_id] or self._on_decision_cell(self.unwrapped.rail_env.agents[agent_id]):
o[agent_id] = agent_obs
r[agent_id] = reward[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
if self._accumulate_skipped_rewards:
discounted_skipped_reward = r[agent_id]
for skipped_reward in reversed(self._skipped_rewards[agent_id]):
discounted_skipped_reward = self._discounting * discounted_skipped_reward + skipped_reward
r[agent_id] = discounted_skipped_reward
self._skipped_rewards[agent_id] = []
elif self._accumulate_skipped_rewards:
self._skipped_rewards[agent_id].append(reward[agent_id])
d['__all__'] = done['__all__']
action_dict = {}
return StepOutput(o, r, d, i)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
obs = self.env.reset(random_seed)
self._switches, self._switches_neighbors, self._decision_cells = \
find_all_cells_where_agent_can_choose(self.unwrapped.rail_env)
return obs
class RewardWrapperShortestPathObs(gym.Wrapper):
def __init__(self, env, rewards) -> None:
super().__init__(env)
self._finished_reward = rewards['finished_reward']
self._invalid_action_reward = rewards['invalid_action_reward']
self._not_finished_reward = rewards['not_finished_reward']
self._step_reward = rewards['step_reward']
self._step_shortest_path = rewards['step_shortest_path']
self._step_second_shortest_path = rewards['step_second_shortest_path']
self._deadlock_reward = rewards['deadlock_reward']
self._dont_move_reward = rewards['dont_move_reward']
self._deadlock_avoidance_reward = rewards['deadlock_avoidance_reward']
self._stop_on_switch_reward = rewards['stop_on_switch_reward']
self._stop_potential_deadlock_reward = rewards['stop_potential_deadlock_reward']
self._deadlock_unusable_switch_avoidance_reward = rewards['deadlock_unusable_switch_avoidance']
self._priority_reward = rewards['priority_reward']
self._priority_reward_shortest_path = rewards['priority_reward_shortest_path']
self._priority_reward_alternative_path = rewards['priority_reward_alternative_path']
self._priority_penalty = rewards['priority_penalty']
self._priority_no_path_penalty = rewards['priority_no_path_penalty']
rail_env: RailEnv = self.unwrapped.rail_env
self._prev_dist = {agent.handle: [-1, -1] for agent in rail_env.agents}
self._prev_action_mask = {agent.handle: available_actions(rail_env, agent, False) for agent in rail_env.agents}
self._prev_pos = {agent.handle: Graph.get_virtual_position(agent.handle) for agent in rail_env.agents}
self._prev_potential_deadlock = {agent.handle: (0, 0, 0) for agent in rail_env.agents}
self._prev_on_switch = {agent.handle: 0 for agent in rail_env.agents}
@staticmethod
def reward_function(handle, agent_obs, agent_action, agent_done, agent_status, agent_virtual_pos,
_prev_potential_deadlock, _prev_dist, _prev_action_mask, _prev_pos, _prev_on_switch,
_finished_reward, _invalid_action_reward, _not_finished_reward, _step_reward,
_step_shortest_path, _step_second_shortest_path, _deadlock_reward, _dont_move_reward,
_deadlock_avoidance_reward, _stop_on_switch_reward, _stop_potential_deadlock_reward,
_deadlock_unusable_switch_avoidance_reward, _priority_reward, _priority_reward_shortest_path,
_priority_reward_alternative_path, _priority_penalty, _priority_no_path_penalty):
if agent_done: # done
if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
reward = _finished_reward
elif agent_obs[7] == 1:
reward = _deadlock_reward
else:
reward = _not_finished_reward
elif agent_obs[7] == 1: # deadlock
reward = _deadlock_reward
else:
potential_deadlock = [agent_obs[19], agent_obs[20], agent_obs[21]]
available_dirs = sum(1 for d in potential_deadlock if d != -1)
deadlock_dirs = sum(1 for d in potential_deadlock if d == 1)
if agent_action == RailEnvActions.STOP_MOVING:
if agent_obs[30] == 1:
reward = _stop_on_switch_reward
elif agent_obs[36] == 1:
#TODO think about this
reward = _deadlock_unusable_switch_avoidance_reward * 1 / agent_obs[35] if agent_obs[35] >= 1 else _stop_on_switch_reward
# elif (deadlock_dirs / available_dirs) == 1. and agent_action == RailEnvActions.STOP_MOVING:
# reward = _stop_potential_deadlock_reward * 1/agent_obs[35] if agent_obs[35] >= 1 else _stop_on_switch_reward
elif agent_obs[39] == 0:
reward = _priority_reward
else:
reward = _dont_move_reward
elif agent_action in [RailEnvActions.MOVE_LEFT, RailEnvActions.MOVE_RIGHT, RailEnvActions.MOVE_FORWARD]:
deadlock_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == 1]
unavaliable_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == -1]
if _prev_on_switch == 1 and (agent_action not in deadlock_actions and len(deadlock_actions) > 0) and (
agent_action not in unavaliable_actions):
reward = _deadlock_avoidance_reward
elif agent_obs[39] == 1:
if agent_obs[9] < _prev_dist[0] and agent_obs[9] < 5000:
reward = _priority_reward_shortest_path
elif agent_obs[9] < _prev_dist[1] < 5000:
reward = _priority_reward_alternative_path
else:
reward = _priority_no_path_penalty
elif agent_obs[39] == 0:
reward = _priority_penalty
else:
reward = _step_reward
else:
reward = -1
return reward
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.unwrapped.rail_env
for handle in action_dict:
action_dict[handle] += 1
obs, reward, done, info = self.env.step(action_dict)
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
r[agent_id] = self.reward_function(handle=agent_id,
agent_obs=agent_obs,
agent_action=action_dict[agent_id],
agent_done=done[agent_id],
agent_status=rail_env.agents[agent_id].status,
agent_virtual_pos=Graph.get_virtual_position(agent_id),
_prev_potential_deadlock=self._prev_potential_deadlock[agent_id],
_prev_dist=self._prev_dist[agent_id],
_prev_pos=self._prev_pos[agent_id],
_prev_action_mask=self._prev_action_mask[agent_id],
_prev_on_switch=self._prev_on_switch[agent_id],
_finished_reward=self._finished_reward,
_invalid_action_reward=self._invalid_action_reward,
_not_finished_reward=self._not_finished_reward,
_step_reward=self._step_reward,
_step_shortest_path=self._step_shortest_path,
_step_second_shortest_path=self._step_second_shortest_path,
_deadlock_reward=self._deadlock_reward,
_dont_move_reward=self._dont_move_reward,
_deadlock_avoidance_reward=self._deadlock_avoidance_reward,
_stop_on_switch_reward=self._stop_on_switch_reward,
_stop_potential_deadlock_reward=self._stop_potential_deadlock_reward,
_deadlock_unusable_switch_avoidance_reward=self._deadlock_unusable_switch_avoidance_reward,
_priority_penalty=self._priority_penalty,
_priority_reward=self._priority_reward,
_priority_reward_alternative_path=self._priority_reward_alternative_path,
_priority_reward_shortest_path=self._priority_reward_shortest_path,
_priority_no_path_penalty=self._priority_no_path_penalty
)
# set prev_states to the length of shortest path if you go L, then F, then R (L,F,R). That corresponds to
# features 9, 10, 11 in the feature vector
# print(f"obs: {o}, reward: {r}, prev_dist: {self._prev_dist}")
self._prev_dist[agent_id] = (agent_obs[9], agent_obs[15])
self._prev_action_mask[agent_id] = available_actions(rail_env, rail_env.agents[agent_id], False)
# update potential_deadlock attribute
self._prev_potential_deadlock[agent_id] = (agent_obs[19], agent_obs[20], agent_obs[21])
# update prev_pos
self._prev_pos[agent_id] = Graph.get_virtual_position(agent_id)
self._prev_on_switch[agent_id] = agent_obs[30]
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, info={agent: {
'max_episode_steps': int(4 * 2 * (
self.rail_env.width + self.rail_env.height + self.rail_env.get_num_agents() / self.num_cities)),
'num_agents': self.rail_env.get_num_agents(),
'agent_done': d[agent] and agent not in self.rail_env.active_agents,
} for agent in o.keys()})
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
obs = self.env.reset(random_seed=random_seed)
self._prev_dist = {k: (o[9], o[15]) for k, o in obs.items()}
return obs
class RewardWrapper(gym.Wrapper):
def __init__(self, env, rewards) -> None:
super().__init__(env)
# self._finished_reward = rewards['finished_reward']
# self._invalid_action_reward = rewards['invalid_action_reward']
# self._not_finished_reward = rewards['not_finished_reward']
# self._step_reward = rewards['step_reward']
# self._step_shortest_path = rewards['step_shortest_path']
# self._step_second_shortest_path = rewards['step_second_shortest_path']
# self._deadlock_reward = rewards['deadlock_reward']
# self._dont_move_reward = rewards['dont_move_reward']
# self._deadlock_avoidance_reward = rewards['deadlock_avoidance_reward']
# self._stop_on_switch_reward = rewards['stop_on_switch_reward']
# self._stop_potential_deadlock_reward = rewards['stop_potential_deadlock_reward']
# self._deadlock_unusable_switch_avoidance_reward = rewards['deadlock_unusable_switch_avoidance']
# self._priority_reward = rewards['priority_reward']
# self._priority_reward_shortest_path = rewards['priority_reward_shortest_path']
# self._priority_reward_alternative_path = rewards['priority_reward_alternative_path']
# self._priority_penalty = rewards['priority_penalty']
# self._priority_no_path_penalty = rewards['priority_no_path_penalty']
self._finished_reward = rewards['finished_reward']
self._deadlock_reward = rewards['deadlock_reward']
self._step_reward = rewards['step_reward']
self._deadlock_unusable_switch_avoidance_reward = rewards['deadlock_unusable_switch_avoidance']
self._stop_priority_depart = rewards['stop_priority_depart']
self._stop_no_deadlocks_reward = rewards['stop_no_deadlocks_reward']
rail_env: RailEnv = self.unwrapped.rail_env
# self._prev_dist = {}
# self._prev_action_mask = {agent.handle: available_actions(rail_env, agent, False) for agent in rail_env.agents}
# self._prev_pos = {agent.handle: Graph.get_virtual_position(agent.handle) for agent in rail_env.agents}
#
# self._prev_potential_deadlock = {}
# self._prev_on_switch = {}
# self._prev_deadlock_unusable = {}
self._prev_shortest_action = {}
self._prev_priority = {}
def reward_function(self, handle, agent_obs, agent_action, agent_done, agent_status):
if agent_done:
if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
reward = self._finished_reward
else:
reward = self._step_reward
elif agent_obs[5] == 1 and agent_action == RailEnvActions.STOP_MOVING:
reward = 0
elif agent_obs[5] == 1 and agent_action != RailEnvActions.STOP_MOVING:
reward = -10
else:
if self._prev_priority[handle] == 0:
if agent_action == RailEnvActions.STOP_MOVING:
reward = 0
else:
reward = -10
else:
#if (agent_action - 1) == np.argmax(self._prev_shortest_action[handle]):
if agent_action != RailEnvActions.STOP_MOVING:
reward = 0
else:
reward = -10
# reward = (1 / agent_obs[4] + self._step_reward) * 0.70
# if agent_action == RailEnvActions.STOP_MOVING:
# if self._prev_priority[handle] == 0 and agent_status == RailAgentStatus.READY_TO_DEPART:
# reward = self._stop_priority_depart
# elif self._prev_deadlock_unusable[handle] == 1:
# reward = self._deadlock_unusable_switch_avoidance_reward
#
# elif 1 not in self._prev_potential_deadlock[handle] and self._prev_deadlock_unusable[handle] == 0 and self._prev_priority[handle] == 1:
# reward = self._stop_no_deadlocks_reward
# if agent_done: # done
# if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
# reward = _finished_reward
# elif agent_obs[7] == 1:
# reward = _deadlock_reward
# else:
# reward = _not_finished_reward
#
# elif agent_obs[7] == 1: # deadlock
# reward = _deadlock_reward
#
# else:
# potential_deadlock = [agent_obs[19], agent_obs[20], agent_obs[21]]
# available_dirs = sum(1 for d in potential_deadlock if d != -1)
# deadlock_dirs = sum(1 for d in potential_deadlock if d == 1)
#
# if agent_action == RailEnvActions.STOP_MOVING:
# if agent_obs[30] == 1:
# reward = _stop_on_switch_reward
# elif agent_obs[36] == 1:
# # TODO think about this
# if agent_obs[35] == 1:
# reward = _deadlock_unusable_switch_avoidance_reward
# else:
# r = -_deadlock_unusable_switch_avoidance_reward
# reward = -(r**(1/agent_obs[35])*0.5)
# # elif (deadlock_dirs / available_dirs) == 1. and agent_action == RailEnvActions.STOP_MOVING:
# # reward = _stop_potential_deadlock_reward * 1/agent_obs[35] if agent_obs[35] >= 1 else _stop_on_switch_reward
# elif agent_obs[39] == 0:
# reward = _priority_reward
# else:
# reward = _dont_move_reward
#
# elif agent_action in [RailEnvActions.MOVE_LEFT, RailEnvActions.MOVE_RIGHT, RailEnvActions.MOVE_FORWARD]:
# deadlock_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == 1]
# unavaliable_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == -1]
# if _prev_on_switch == 1 and (agent_action not in deadlock_actions and len(deadlock_actions) > 0) and (
# agent_action not in unavaliable_actions):
# reward = _deadlock_avoidance_reward
#
# elif agent_obs[39] == 1:
# if agent_obs[9] < _prev_dist[0] and agent_obs[9] < 5000:
# reward = _priority_reward_shortest_path
# elif agent_obs[9] < _prev_dist[1] < 5000:
# reward = _priority_reward_alternative_path
# else:
# reward = _priority_no_path_penalty
# elif agent_obs[39] == 0:
# reward = _priority_penalty
#
# else:
# reward = _step_reward
#
# else:
# reward = -1
#
# return reward
#
# if agent_done:
# if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
# # agent is done and really done -> give finished reward
# reward = _finished_reward
# else:
# # agent is done but not really done -> give not_finished reward
# if agent_obs[7] == 1:
# reward = _deadlock_reward
# else:
# reward = _not_finished_reward
#
# elif agent_obs[7] == 1:
# reward = _deadlock_reward
#
# else:
# if agent_obs[9] < _prev_dist[0] and agent_obs[9] != -1:
# reward = _step_shortest_path
#
# elif agent_obs[15] < _prev_dist[1] and agent_obs[15] != -1:
# reward = _step_second_shortest_path
#
# else:
# reward = _step_reward
#
#
#
# # invalid action reward
# if _prev_action_mask[agent_action-1] == 0:
# reward += _invalid_action_reward
#
# # if agent not moving
# if tuple(_prev_pos) == tuple(agent_virtual_pos):
# reward += _dont_move_reward
#
# # stop on switch
# if agent_obs[30] == 1 and agent_action == RailEnvActions.STOP_MOVING:
# reward += _stop_on_switch_reward
#
# potential_deadlock = [agent_obs[19], agent_obs[20], agent_obs[21]]
# available_dirs = sum(1 for d in potential_deadlock if d != -1)
# deadlock_dirs = sum(1 for d in potential_deadlock if d == 1)
# if (deadlock_dirs / available_dirs) == 1. and agent_action == RailEnvActions.STOP_MOVING:
# reward += _stop_potential_deadlock_reward * 1/agent_obs[35] if agent_obs[35] >= 1 else 0
#
#
# if agent_obs[36] == 1 and agent_action == RailEnvActions.STOP_MOVING:
# reward += _deadlock_unusable_switch_avoidance_reward * 1 / agent_obs[35] if agent_obs[35] >= 1 else 0
#
# # reward if agent avoided deadlock
# if _prev_on_switch == 1:
# deadlock_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == 1]
# unavaliable_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == -1]
# if (agent_action not in deadlock_actions and len(deadlock_actions) > 0) and (
# agent_action not in unavaliable_actions) and (agent_action != RailEnvActions.DO_NOTHING) \
# and (agent_action != RailEnvActions.STOP_MOVING):
# reward = _deadlock_avoidance_reward
return reward
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.unwrapped.rail_env
for handle in action_dict:
action_dict[handle] += 1
if action_dict[handle] < 4:
action_dict[handle] = possible_actions_sorted_by_distance(rail_env, handle)[0][0]
obs, reward, done, info = self.env.step(action_dict)
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
r[agent_id] = self.reward_function(handle=agent_id,
agent_obs=agent_obs,
agent_action=action_dict[agent_id],
agent_done=done[agent_id],
agent_status=rail_env.agents[agent_id].status,
)
# print(f"Agent {agent_id}, obs: {agent_obs}, prev_priority: {self._prev_priority[agent_id]}, prev_dist_action: {self._prev_shortest_action[agent_id]}, reward: {r[agent_id]}, action: {action_dict[agent_id] - 1}")
# set prev_states to the length of shortest path if you go L, then F, then R (L,F,R). That corresponds to
# features 9, 10, 11 in the feature vector
# print(f"obs: {o}, reward: {r}, prev_dist: {self._prev_dist}")
# self._prev_dist[agent_id] = (agent_obs[9], agent_obs[15])
# self._prev_action_mask[agent_id] = available_actions(rail_env, rail_env.agents[agent_id], False)
# update potential_deadlock attribute
# self._prev_potential_deadlock[agent_id] = (agent_obs[10], agent_obs[11], agent_obs[12])
# update prev_pos
# self._prev_pos[agent_id] = Graph.get_virtual_position(agent_id)
# self._prev_on_switch[agent_id] = agent_obs[13]
self._prev_shortest_action[agent_id] = [agent_obs[0], agent_obs[1], agent_obs[2]]
self._prev_priority[agent_id] = agent_obs[3]
# self._prev_deadlock_unusable[agent_id] = agent_obs[19]
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, info={agent: {
'max_episode_steps': int(4 * 2 * (
self.rail_env.width + self.rail_env.height + self.rail_env.get_num_agents() / self.num_cities)),
'num_agents': self.rail_env.get_num_agents(),
'agent_done': d[agent] and agent not in self.rail_env.active_agents,
} for agent in o.keys()})
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
obs = self.env.reset(random_seed=random_seed)
# self._prev_dist = {k: (o[9], o[15]) for k, o in obs.items()}
# self._prev_potential_deadlock = {k: (o[10], o[11], o[12]) for k, o in obs.items()}
# self._prev_on_switch = {k: o[13] for k, o in obs.items()}
self._prev_shortest_action = {k: [o[0], o[1], o[2]] for k, o in obs.items()}
self._prev_priority = {k: o[3] for k, o in obs.items()}
# self._prev_deadlock_unusable = {k: o[19] for k, o in obs.items()}
return obs
class SparseRewardWrapper(gym.Wrapper):
def __init__(self, env, finished_reward=1, not_finished_reward=-1) -> None:
super().__init__(env)
self._finished_reward = finished_reward
self._not_finished_reward = not_finished_reward
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.unwrapped.rail_env
for handle in action_dict:
action_dict[handle] += 1
obs, reward, done, info = self.env.step(action_dict)
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
if done[agent_id]:
if rail_env.agents[agent_id].status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
# agent is done and really done -> give finished reward
r[agent_id] = self._finished_reward
else:
# agent is done but not really done -> give not_finished reward
r[agent_id] = self._not_finished_reward
else:
r[agent_id] = 0
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, i)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
return self.env.reset(random_seed)
class DeadlockWrapper(gym.Wrapper):
def __init__(self, env, deadlock_reward=-1) -> None:
super().__init__(env)
self._deadlock_reward = deadlock_reward
self._deadlocked_agents = []
def check_deadlock(self): # -> Set[int]:
rail_env: RailEnv = self.unwrapped.rail_env
new_deadlocked_agents = []
for agent in rail_env.agents:
if agent.status == RailAgentStatus.ACTIVE and agent.handle not in self._deadlocked_agents:
position = agent.position
direction = agent.direction
while position is not None:
possible_transitions = rail_env.rail.get_transitions(*position, direction)
num_transitions = np.count_nonzero(possible_transitions)
if num_transitions == 1:
new_direction_me = np.argmax(possible_transitions)
new_cell_me = get_new_position(position, new_direction_me)
opp_agent = rail_env.agent_positions[new_cell_me]
if opp_agent != -1:
opp_position = rail_env.agents[opp_agent].position
opp_direction = rail_env.agents[opp_agent].direction
opp_possible_transitions = rail_env.rail.get_transitions(*opp_position, opp_direction)
opp_num_transitions = np.count_nonzero(opp_possible_transitions)
if opp_num_transitions == 1:
if opp_direction != direction:
self._deadlocked_agents.append(agent.handle)
new_deadlocked_agents.append(agent.handle)
position = None
else:
position = new_cell_me
direction = new_direction_me
else:
position = new_cell_me
direction = new_direction_me
else:
position = None
else:
position = None
return new_deadlocked_agents
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
obs, reward, done, info = self.env.step(action_dict)
if self._deadlock_reward != 0:
new_deadlocked_agents = self.check_deadlock()
else:
new_deadlocked_agents = []
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
if agent_id not in self._deadlocked_agents or agent_id in new_deadlocked_agents:
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
r[agent_id] = reward[agent_id]
if agent_id in new_deadlocked_agents:
# agent is in deadlocked (and was not before) -> give deadlock reward and set to done
r[agent_id] += self._deadlock_reward
d[agent_id] = True
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, i)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
self._deadlocked_agents = []
return self.env.reset(random_seed)
def possible_actions_sorted_by_distance(env: RailEnv, handle: int):
agent = env.agents[handle]
if agent.status == RailAgentStatus.READY_TO_DEPART:
agent_virtual_position = agent.initial_position
elif agent.status == RailAgentStatus.ACTIVE:
agent_virtual_position = agent.position
elif agent.status == RailAgentStatus.DONE:
agent_virtual_position = agent.target
else:
return None
possible_transitions = env.rail.get_transitions(*agent_virtual_position, agent.direction)
distance_map = env.distance_map.get()[handle]
possible_steps = []
for movement in list(range(4)):
if possible_transitions[movement]:
if movement == agent.direction:
action = RailEnvActions.MOVE_FORWARD
elif movement == (agent.direction + 1) % 4:
action = RailEnvActions.MOVE_RIGHT
elif movement == (agent.direction - 1) % 4:
action = RailEnvActions.MOVE_LEFT
else:
raise ValueError("Wtf, debug this shit.")
distance = distance_map[get_new_position(agent_virtual_position, movement) + (movement,)]
possible_steps.append((action, distance))
possible_steps = sorted(possible_steps, key=lambda step: step[1])
if len(possible_steps) == 1:
return possible_steps * 2
else:
return possible_steps
class ShortestPathActionWrapper(gym.Wrapper):
def __init__(self, env) -> None:
super().__init__(env)
print("Apply ShortestPathActionWrapper")
self.action_space = gym.spaces.Discrete(n=3) # stop, shortest path, other direction
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.env.unwrapped.rail_env
transformed_action_dict = {}
for agent_id, action in action_dict.items():
if action == 0:
transformed_action_dict[agent_id] = action
else:
assert action in [1, 2]
transformed_action_dict[agent_id] = possible_actions_sorted_by_distance(rail_env, agent_id)[action - 1][
0]
step_output = self.env.step(transformed_action_dict)
return step_output
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
return self.env.reset(random_seed)
class DeadlockResolutionWrapper(gym.Wrapper):
def __init__(self, env, deadlock_reward=0) -> None:
super().__init__(env)
self._deadlock_reward = deadlock_reward
self._num_swaps = defaultdict(int)
def get_deadlocks(self, agent: EnvAgent, seen: List[int]) -> EnvAgent:
# abort if agent already checked
if agent.handle in seen:
# handle circular deadlock
seen.append(agent.handle)
# return
return []
# add agent to seen agents
seen.append(agent.handle)
# get rail environment
rail_env: RailEnv = self.unwrapped.rail_env
# get transitions for agent's position and direction
transitions = rail_env.rail.get_transitions(*agent.position, agent.direction)
num_possible_transitions = np.count_nonzero(transitions)
# initialize list to assign deadlocked agents to directions
deadlocked_agents = [None] * len(transitions)
# check if all possible transitions are blocked
for direction, transition in enumerate(transitions):
# only check transitions > 0 but iterate through all to get direction
if transition > 0:
# get opposite agent in direction of travel if cell is occuppied
new_position = get_new_position(agent.position, direction)
i_opp_agent = rail_env.agent_positions[new_position]
if i_opp_agent != -1:
opp_agent = rail_env.agents[i_opp_agent]
# get blocking agents of opposite agent
blocking_agents = self.get_deadlocks(opp_agent, seen)
# add opposite agent to deadlocked agents if blocked by
# checking agent. also add opposite agent if it is part
# of a circular blocking structure.
if agent in blocking_agents or seen[0] == seen[-1]:
deadlocked_agents[direction] = opp_agent
# return deadlocked agents if applicable
num_deadlocked_agents = np.count_nonzero(deadlocked_agents)
if num_deadlocked_agents > 0:
# deadlock has to be resolved only if no transition is possible
if num_deadlocked_agents == num_possible_transitions:
return deadlocked_agents
# workaround for already commited agent inside cell that is blocked by at least one agent
if agent.speed_data['position_fraction'] > 1:
return deadlocked_agents
return []
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
obs, reward, done, info = self.env.step(action_dict)
# get rail environment
rail_env: RailEnv = self.unwrapped.rail_env
# check agents that have status ACTIVE for deadlocks, env.active_agents contains also other agents
active_agents = [agent for agent in rail_env.agents if agent.status == RailAgentStatus.ACTIVE]
for agent in active_agents:
deadlocked_agents = self.get_deadlocks(agent, [])
if len(deadlocked_agents) > 0:
# favor transition in front as most natural
d_agent = deadlocked_agents[agent.direction]
# get most likely transition if straight forward is no valid transition
if d_agent is None:
transitions = rail_env.rail.get_transitions(*agent.position, agent.direction)
agent.direction = np.argmax(transitions)
d_agent = deadlocked_agents[agent.direction]
# already commited agent can have only one transition blocked
if d_agent is None:
d_agent = [a for a in deadlocked_agents if a is not None][0]
# swap the deadlocked pair
agent.position, d_agent.position = d_agent.position, agent.position
rail_env.agent_positions[agent.position] = agent.handle
rail_env.agent_positions[d_agent.position] = d_agent.handle
# set direction of blocking agent because of corners
d_agent.direction = (agent.direction + 2) % 4
# position is exact after swap
agent.speed_data['position_fraction'] = 0.0
d_agent.speed_data['position_fraction'] = 0.0
# punish agents for deadlock
reward[agent.handle] += self._deadlock_reward
reward[d_agent.handle] += self._deadlock_reward
# increase swap counter in info dict
self._num_swaps[agent.handle] += 1
self._num_swaps[d_agent.handle] += 1
for i_agent in info:
info[i_agent]['num_swaps'] = self._num_swaps[i_agent]
return obs, reward, done, info
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
self._num_swaps = defaultdict(int)
return self.env.reset(random_seed)
class FlatlandRenderWrapper(RailEnv, gym.Env):
# reward_range = (-float('inf'), float('inf'))
# spec = None
# # Set these in ALL subclasses
# observation_space = None
def __init__(self, use_renderer=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_renderer = use_renderer
self.renderer = None
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 10,
'semantics.autoreset': True
}
if self.use_renderer:
self.initialize_renderer()
def reset(self, *args, **kwargs):
if self.use_renderer:
if self.renderer: # TODO: Errors with RLLib with renderer as None.
self.renderer.reset()
return super().reset(*args, **kwargs)
def render(self, mode='human'):
"""
This methods provides the option to render the
environment's behavior to a window which should be
readable to the human eye if mode is set to 'human'.
"""
if not self.use_renderer:
return
if not self.renderer:
self.initialize_renderer(mode=mode)
return self.update_renderer(mode=mode)
def initialize_renderer(self, mode="human"):
# Initiate the renderer
from flatland.utils.rendertools import RenderTool, AgentRenderVariant
self.renderer = RenderTool(self, gl="PGL", # gl="TKPILSVG",
agent_render_variant=AgentRenderVariant.ONE_STEP_BEHIND,
show_debug=False,
screen_height=600, # Adjust these parameters to fit your resolution
screen_width=800) # Adjust these parameters to fit your resolution
def update_renderer(self, mode='human'):
image = self.renderer.render_env(show=True, show_observations=False, show_predictions=False,
return_image=True)
return image[:, :, :3]
def set_renderer(self, renderer):
self.use_renderer = renderer
if self.use_renderer:
self.initialize_renderer(mode=self.use_renderer)
def close(self):
super().close()
if self.renderer:
try:
self.renderer.close_window()
self.renderer = None
except Exception as e:
# This is since the last step(Due to a stopping criteria) is skipped by rllib
# Due to this done is not true and the env does not close
# Finally the env is closed when RLLib exits but at that time there is no window
# and hence the error
print("Could Not close window due to:", e)
| 48.659849
| 224
| 0.596284
| 40,530
| 0.896582
| 0
| 0
| 3,379
| 0.074748
| 0
| 0
| 11,916
| 0.263599
|
677f502efc17cc81872e696789bcab5852c8b1a5
| 1,226
|
py
|
Python
|
acceptability/models/cbow_classifier.py
|
nyu-mll/CoLA-baselines
|
dd095d3646ed05a315280aaa8ed4ec84ba435b3e
|
[
"MIT"
] | 54
|
2018-05-31T22:57:28.000Z
|
2022-03-17T13:25:49.000Z
|
acceptability/models/cbow_classifier.py
|
nyu-mll/CoLA-baselines
|
dd095d3646ed05a315280aaa8ed4ec84ba435b3e
|
[
"MIT"
] | 4
|
2018-06-06T14:15:10.000Z
|
2020-08-07T16:35:50.000Z
|
acceptability/models/cbow_classifier.py
|
nyu-mll/CoLA-baselines
|
dd095d3646ed05a315280aaa8ed4ec84ba435b3e
|
[
"MIT"
] | 18
|
2018-07-10T12:18:17.000Z
|
2022-03-02T22:19:22.000Z
|
import torch
from torch import nn
class CBOWClassifier(nn.Module):
"""
Continuous bag of words classifier.
"""
def __init__(self, hidden_size, input_size, max_pool, dropout=0.5):
"""
:param hidden_size:
:param input_size:
:param max_pool: if true then max pool over word embeddings,
else sum word embeddings
"""
super(CBOWClassifier, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.max_pool = max_pool
self.dropout = nn.Dropout(p=dropout)
self.i2h = nn.Linear(self.input_size, self.hidden_size)
self.h2o = nn.Linear(self.hidden_size, 1)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, x):
if self.max_pool:
encoding = nn.functional.max_pool1d(x.transpose(1, 2),
x.shape[1])
encoding = encoding.transpose(1, 2).squeeze()
else:
encoding = x.sum(1)
encoding = self.dropout(encoding)
hidden = self.tanh(self.dropout(self.i2h(encoding)))
out = self.sigmoid(self.h2o(hidden))
return out
| 34.055556
| 71
| 0.577488
| 1,190
| 0.970636
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.195759
|
677f53508c3acb6aa3c5210a9a7139a828c94921
| 14,637
|
py
|
Python
|
tests/test_validators.py
|
yaaminu/yaval
|
32f04ecfa092c978fc026f6b7f58d6cf2defd8c9
|
[
"MIT"
] | 14
|
2021-02-12T19:04:21.000Z
|
2021-03-12T18:18:09.000Z
|
tests/test_validators.py
|
yaaminu/yaval
|
32f04ecfa092c978fc026f6b7f58d6cf2defd8c9
|
[
"MIT"
] | 5
|
2021-02-12T16:04:37.000Z
|
2021-04-14T12:05:02.000Z
|
tests/test_validators.py
|
yaaminu/yaval
|
32f04ecfa092c978fc026f6b7f58d6cf2defd8c9
|
[
"MIT"
] | null | null | null |
import datetime
from mock import Mock, call
import pytest
from finicky import ValidationException, is_int, is_float, is_str, is_date, is_dict, is_list
# noinspection PyShadowingBuiltins
class TestIntValidator:
def test_must_raise_validation_exception_when_input_is_none_and_required_is_true(self):
with pytest.raises(ValidationException) as exc_info:
is_int(required=True)(None)
assert exc_info.value.args[0] == "required but was missing"
@pytest.mark.parametrize("input", ["3a", "", "3.5", 3.5, "20/12/2020"])
def test_must_raise_validation_exception_when_input_is_not_a_valid_int(self, input):
with pytest.raises(ValidationException) as exc_info:
is_int()(input)
assert exc_info.value.args[0] == "'{}' is not a valid integer".format(input)
@pytest.mark.parametrize("input,min", [(-1, 0), (0, 1), (8, 9), (11, 120)])
def test_must_raise_validation_exception_when_input_is_less_than_minimum_allowed(self, input, min):
with pytest.raises(ValidationException) as exc_info:
is_int(min=min)(input)
assert exc_info.value.args[0] == "'{}' is less than minimum allowed ({})".format(input, min)
@pytest.mark.parametrize("input,max", [(1, 0), (0, -1), (10, 9), (100, 99)])
def test_must_raise_validation_exception_when_input_is_greater_than_maximum_allowed(self, input, max):
with pytest.raises(ValidationException) as exc_info:
is_int(max=max)(input)
assert exc_info.value.args[0] == "'{}' is greater than maximum allowed ({})".format(input, max)
@pytest.mark.parametrize("input, min, max", [(8, 2, 10), (0, -1, 1), ("8", 1, 12)])
def test_must_return_input_upon_validation(self, input, min, max):
assert is_int(min=min, max=max)(input) == int(input)
def test_must_return_default_provided_when_input_is_missing(self):
assert is_int(default=8)(None) == 8
def test_must_return_none_when_input_is_none_and_required_is_false(self):
assert is_int(required=False)(None) is None
# noinspection PyShadowingBuiltins
class TestFloatValidator:
def test_must_raise_validation_exception_when_input_is_none_and_required_is_true(self):
with pytest.raises(ValidationException) as exc_info:
is_float(required=True)(None)
assert exc_info.value.args[0] == "required but was missing"
@pytest.mark.parametrize("input", ["3a", "", "20/12/2020"])
def test_must_raise_validation_exception_when_input_is_not_a_valid_int(self, input):
with pytest.raises(ValidationException) as exc_info:
is_float()(input)
assert exc_info.value.args[0] == "'{}' is not a valid floating number".format(input)
@pytest.mark.parametrize("input,min", [(-0.99, 0), (0.1, 0.12), (8.9, 9), (13, 120)])
def test_must_raise_validation_exception_when_input_is_less_than_minimum_allowed(self, input, min):
with pytest.raises(ValidationException) as exc_info:
is_float(min=min)(input)
assert exc_info.value.args[0] == "'{}' is less than minimum allowed ({})".format(float(input), min)
@pytest.mark.parametrize("input,max", [(0.2, 0), (-0.1, -0.2), (9.9, 9), (99.1, 99)])
def test_must_raise_validation_exception_when_input_is_greater_than_maximum_allowed(self, input, max):
print(input, max)
with pytest.raises(ValidationException) as exc_info:
is_float(max=max)(input)
assert exc_info.value.args[0] == "'{}' is greater than maximum allowed ({})".format(float(input), max)
@pytest.mark.parametrize("input, min, max", [(8.2, 0.1, 8.3), (0.1, -0.1, 0.2), ("0.2", 0.1, 12)])
def test_must_return_input_upon_validation(self, input, min, max):
assert is_float(min=min, max=max)(input) == float(input)
def test_must_return_default_provided_when_input_is_missing(self):
assert is_float(default=0.5)(None) == 0.5
@pytest.mark.parametrize("input, expected", [(8.589, 8.59), (0.182, 0.18), ("-0.799", -0.80)])
def test_must_round_returned_value_to_2_decimal_places_by_default(self, input, expected):
assert is_float()(input) == expected
@pytest.mark.parametrize("input, expected, round_to",
[(8.589, 9, 0), ("-0.799", -0.8, 1), (0.3333, 0.33, 2), (0.182, 0.182, 3), ])
def test_must_round_returned_value_to_provided_decimal_places(self, input, expected, round_to):
assert is_float(round_to=round_to)(input) == expected
def test_must_return_none_when_input_is_none_and_required_is_false(self):
assert is_float(required=False)(None) is None
# noinspection PyShadowingBuiltins
class TestStrValidator:
def test_must_raise_exception_when_input_is_none_and_required_is_true(self):
with pytest.raises(ValidationException) as exc_info:
is_str(required=True)(None)
assert exc_info.value.args[0] == 'required but was missing'
@pytest.mark.parametrize("input, expected",
[(" GH-A323 ", "GH-A323"), ("GH-A3 ", "GH-A3"), (33, "33"), ("GH-A3", "GH-A3")])
def test_must_automatically_strip_trailing_or_leading_whitespaces_on_inputs(self, input, expected):
assert is_str()(input) == expected
@pytest.mark.parametrize("input,min_len", [("GH ", 3), (" G ", 2), ("Python", 7), (" ", 1)])
def test_must_raise_validation_exception_when_input_is_shorter_than_minimum_required_length(self, input, min_len):
with pytest.raises(ValidationException) as exc_info:
is_str(min_len=min_len)(input)
assert exc_info.value.args[0] == "'{}' is shorter than minimum required length({})".format(input.strip(), min_len)
@pytest.mark.parametrize("input,max_len", [("GHAN ", 3), (" GH ", 1), ("Python GH", 7)])
def test_must_raise_validation_exception_when_input_is_shorter_than_minimum_required_length(self, input, max_len):
with pytest.raises(ValidationException) as exc_info:
is_str(max_len=max_len)(input)
assert exc_info.value.args[0] == "'{}' is longer than maximum required length({})".format(input.strip(), max_len)
@pytest.mark.parametrize("input, pattern", [("GH", r"\bGHA$"), ("GH-1A", r"\bGH-\d?$")])
def test_must_raise_validation_error_when_input_does_not_match_expected_pattern(self, input, pattern):
with pytest.raises(ValidationException) as exc_info:
is_str(pattern=pattern)(input)
assert exc_info.value.args[0] == "'{}' does not match expected pattern({})".format(input, pattern)
def test_must_return_default_when_input_is_none(self):
assert is_str(default="Text")(None) == "Text"
def test_must_return_none_when_input_is_none_and_required_is_false_and_default(self):
assert is_str(required=False)(None) is None
# noinspection PyShadowingBuiltins
class TestIsDateValidator:
def test_must_raise_validation_exception_when_input_is_missing_and_required_is_true(self):
with pytest.raises(ValidationException) as exc_info:
is_date(required=True)(None)
assert exc_info.value.args[0] == "required but was missing"
@pytest.mark.parametrize("format,input",
[("%d-%m-%Y", "20/12/2020"), ("%d-%m-%Y", "38-01-2020"), ("%d/%m/%Y", "31/06/2020")])
def test_must_raise_validation_exception_when_input_str_does_not_match_format(self, format, input):
with pytest.raises(ValidationException) as exc_info:
is_date(format=format)(input)
assert exc_info.value.args[0] == "'{}' does not match expected format({})".format(input, format)
@pytest.mark.parametrize("input", ["2020-12-20", "2021-01-31 ", " 1999-08-12 "])
def test_must_use_iso_8601_format_when_format_is_not_supplied(self, input):
date = is_date()(input)
assert date == datetime.datetime.strptime(input.strip(), "%Y-%m-%d")
@pytest.mark.parametrize("input,min", [("2020-12-19", "2020-12-20"), ("2020-12-31", "2021-01-31")])
def test_must_raise_validation_exception_when_date_is_older_than_latest_by_if_defined(self, input, min):
with pytest.raises(ValidationException) as exc_info:
is_date(min=datetime.datetime.strptime(min, "%Y-%m-%d"))(input)
assert exc_info.value.args[0] == "'{}' occurs before minimum date({})".format(input, min)
@pytest.mark.parametrize("max,input", [("2020-12-19", "2020-12-20"), ("2020-12-31", "2021-01-31",)])
def test_must_raise_validation_exception_when_date_is_older_than_latest_by_if_defined(self, max, input):
with pytest.raises(ValidationException) as exc_info:
is_date(max=datetime.datetime.strptime(max, "%Y-%m-%d"))(input)
assert exc_info.value.args[0] == "'{}' occurs after maximum date({})".format(input, max)
def test_must_support_datetime_objects_as_input_dates(self):
today = datetime.datetime.today()
assert today == is_date()(today)
def test_when_input_date_is_none_must_return_default_date_if_available(self):
today = datetime.datetime.today()
assert today == is_date(default=today)(None)
def test_must_return_none_when_input_is_none_and_required_is_false_and_default_is_not_provided(self):
assert is_date(required=False)(None) is None
@pytest.mark.parametrize("input", ["2020-12-20", "2021-01-31", "1999-08-12"])
def test_must_return_newly_validated_date_as_datetime_object(self, input):
assert is_date()(input) == datetime.datetime.strptime(input, "%Y-%m-%d")
class TestDictValidator:
def test_must_raise_validation_exception_when_input_is_none_but_was_required(self):
with pytest.raises(ValidationException) as exc:
is_dict(required=True, schema={})(None)
assert exc.value.args[0] == "required but was missing"
def test_must_return_default_value_when_input_is_none(self):
address = {"phone": "+233-282123233"}
assert is_dict(required=False, default=address, schema={})(None) == address
@pytest.mark.parametrize("input", ["input", ["entry1", "entry2"], 2, 2.3, object()])
def test_must_raise_validation_error_when_input_is_not_dict(self, input):
with pytest.raises(ValidationException) as exc_info:
is_dict(schema={"phone": is_str(required=True)})(input)
assert exc_info.value.errors == "expected a dictionary but got {}".format(type(input))
@pytest.mark.parametrize(
("schema", "input_dict", "expected_errors"),
[({"phone": is_str(required=True)}, {"phone": None}, {"phone": "required but was missing"}),
({"id": is_int(required=True, min=1)}, {"id": -2}, {"id": "'-2' is less than minimum allowed (1)"}),
({"user_name": is_str(required=True, max_len=5)}, {"user_name": "yaaminu"},
{"user_name": "'yaaminu' is longer than maximum required length(5)"})
])
def test_must_validate_input_against_schema(self, schema, input_dict, expected_errors):
with pytest.raises(ValidationException) as exc:
is_dict(schema=schema)(input_dict)
assert expected_errors == exc.value.errors
def test_must_return_newly_validated_input(self):
validated_input = is_dict(schema={"phone": is_str(required=True)})({"phone": "+233-23-23283234"})
assert validated_input == {"phone": "+233-23-23283234"}
def test_must_clean_validated_input_before_returning(self):
validated_input = is_dict(schema={"phone": is_str(required=True)})({"phone": " +233-23-23283234"})
assert validated_input == {"phone": "+233-23-23283234"}
class TestListValidator:
"""
1. must reject none input whend field is required
2. must return default value when field isnot required and default is provided
4. must validate all entries against the validator.
5. must require all entries to pass validation by default
6. when all is set to false, must require that at least one entry pass valdiation
7. must return only validated entries
6. on error, must return all errors encountered
"""
def test_must_raise_validation_error_when_input_is_none_but_required_is_true(self):
with pytest.raises(ValidationException) as exc_info:
is_list(required=True, validator=is_int())(None)
assert exc_info.value.errors == "required but was missing"
def test_must_return_default_value_when_input_is_none(self):
default = [1, 2]
assert default == is_list(required=False, default=[1, 2], validator=is_int())(None)
@pytest.mark.parametrize("input", ["value", {"id": 23}, object, 2.8])
def test_must_raise_validation_exception_for_non_list_input(self, input):
with pytest.raises(ValidationException) as exc:
is_list(validator=Mock())(input)
assert exc.value.errors == "expected a list but got {}".format(type(input))
def test_must_validate_all_input_against_validator(self):
validator = Mock()
is_list(validator=validator)([-1, 8])
validator.assert_has_calls([call(-1), call(8)])
@pytest.mark.parametrize(
("validator", "input", "errors"),
[(is_int(min=1), [-1, 2, 8], ["'-1' is less than minimum allowed (1)"]),
(is_int(max=5), [8, 10],
["'8' is greater than maximum allowed (5)", "'10' is greater than maximum allowed (5)"]),
(is_str(pattern=r"\A\d{3}\Z"), ["2323", "128"], ["'2323' does not match expected pattern(\\A\\d{3}\\Z)"])]
)
def test_must_raise_validation_when_at_least_one_entry_is_invalid_by_default(self, validator, input, errors):
with pytest.raises(ValidationException) as exc:
is_list(validator=validator)(input)
assert exc.value.errors == errors
def test_must_raise_validation_exception_only_when_all_entries_are_invalid_when_all_is_false(self):
input = [-1, 2, 8]
try:
is_list(validator=is_int(min=1), all=False)(input)
except ValidationException:
raise AssertionError("should not throw")
@pytest.mark.parametrize(
("validator", "input", "return_val"),
[(is_int(required=True), [-3, 8, 112], [-3, 8, 112]),
(is_str(required=True), ["one", "three ", " four "], ["one", "three", "four"]),
(is_date(format="%Y-%m-%d"), ["2021-02-07 "], [datetime.datetime(year=2021, month=2, day=7)])])
def test_must_return_newly_validated_input(self, validator, input, return_val):
assert is_list(validator=validator)(input) == return_val
def test_must_return_only_valid_inputs_when_all_is_false(self):
input = [1, -8, 3]
assert is_list(validator=is_int(min=1), all=False)(input) == [1, 3]
| 52.841155
| 122
| 0.683678
| 14,327
| 0.978821
| 0
| 0
| 9,259
| 0.632575
| 0
| 0
| 2,780
| 0.18993
|
677f77f661f042444b5b6e3515ca7ba65cf1bbd5
| 583
|
py
|
Python
|
polygon.py
|
SYED-RAFI-NAQVI/10hourcodingchallenge
|
20c7c3aee52a2eb281381a9db4d57075cbf38446
|
[
"MIT"
] | null | null | null |
polygon.py
|
SYED-RAFI-NAQVI/10hourcodingchallenge
|
20c7c3aee52a2eb281381a9db4d57075cbf38446
|
[
"MIT"
] | null | null | null |
polygon.py
|
SYED-RAFI-NAQVI/10hourcodingchallenge
|
20c7c3aee52a2eb281381a9db4d57075cbf38446
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2 as cv
img = cv.imread('1.jpeg',cv.IMREAD_COLOR)
#for polygon we need to have set of points so we create a numpy array. and pts is an object.
pts = np.array([[20,33],[300,120], [67,79], [123,111], [144,134]], np.int32)
#the method polylines will actully draws a polygon by taking different parametes, 1.where to draw (img),
#2.which set of points, 3.checks first and last point should be connected or not by (bool), 4.color, 5.widht of line.
cv.polylines(img, [pts], True,(0,231,123), 1)
cv.imshow('image',img)
cv.waitKey(0)
cv.destroyAllWindows()
| 32.388889
| 117
| 0.711835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 328
| 0.562607
|
67805442e518a6adbf84390b3eb7ec7d3ff5cd9c
| 3,871
|
py
|
Python
|
lib/fathead/firefox_about_config/parse.py
|
aeisenberg/zeroclickinfo-fathead
|
9be00a038d812ca9ccd0d601220afde777ab2f8e
|
[
"Apache-2.0"
] | 1
|
2021-01-05T16:48:23.000Z
|
2021-01-05T16:48:23.000Z
|
lib/fathead/firefox_about_config/parse.py
|
aeisenberg/zeroclickinfo-fathead
|
9be00a038d812ca9ccd0d601220afde777ab2f8e
|
[
"Apache-2.0"
] | null | null | null |
lib/fathead/firefox_about_config/parse.py
|
aeisenberg/zeroclickinfo-fathead
|
9be00a038d812ca9ccd0d601220afde777ab2f8e
|
[
"Apache-2.0"
] | 1
|
2016-06-12T06:12:02.000Z
|
2016-06-12T06:12:02.000Z
|
#!/usr/bin/env python2
from BeautifulSoup import BeautifulSoup, NavigableString
import urllib
import string
import re
class Entry(object):
def __init__(self, name, value, description, url):
self.name = name
self.value = value
self.description = description
self.url = url
def __str__(self):
fields = [
self.name, # title
'A', # type
'', # redirect
'', # otheruses
'', # categories
'', # references
'', # see_also
'', # further_reading
'', # external_links
'', # disambiguation
'', # images
self.description, # abstract
self.url # source_url
]
return '%s' % ('\t'.join(fields))
class Parser(object):
def __init__(self, input='download/About:config_entries'):
self.soup = BeautifulSoup(open(input))
# Requires trailing / for relative link replacement
self.baseURL = "http://kb.mozillazine.org/"
def findEntries(self):
self.entries = []
headers = map(lambda x: x.string, self.soup.findAll('h1')[2:])
table = self.soup.findAll('div', id="bodyContent")[0]
for table in table.findAll('table'):
header = True
for tr in table.findAll('tr'):
if header:
header = False
continue
i = 0
for th in tr.findAll('td'):
description = ''
if i == 0:
name = ''.join(th.b.findAll(text=True)).replace(' ','')
anchor = string.capitalize(urllib.quote(name.split('.')[0])) + "."
if anchor in headers:
url = self.baseURL + 'About:config_entries#' + anchor
else:
url = self.baseURL + 'About:config_entries'
elif i == 1:
value = th.text
elif i == 2:
if value:
article = 'a'
if value[0] == 'I': article += 'n'
optionType = "it accepts " + article + " " + value.lower() + "."
synopsis = '"' + name + '"' + ' is a configuration option ' \
'for the Firefox web browser; ' + optionType + "<br>"
for tag in th.findAll('br'):
tag.insert(0, NavigableString("\n"))
description = ''.join(th.findAll(text=True))
description = description.rstrip().replace('\n', '<br>').strip()
expandedURL = 'href="' + self.baseURL
description = description.replace('href="/', expandedURL)
description = re.sub('<\s*b\s*>', '<i>', description)
description = re.sub('<\s*/\s*b\s*>', '</i>', description)
description = '<blockquote>' + description + '</blockquote>'
description = synopsis + description
i = -1
self.entries.append(Entry(name, value, description.strip(), url))
i += 1
if __name__ == "__main__":
parser = Parser()
parser.findEntries()
with open('output.txt', 'w') as file:
for entry in parser.entries:
file.write(entry.__str__().encode('UTF-8') + '\n')
| 42.538462
| 92
| 0.422113
| 3,528
| 0.911392
| 0
| 0
| 0
| 0
| 0
| 0
| 644
| 0.166365
|
6781793ae8fc13e5299017f4d13600e84c029c5a
| 547
|
py
|
Python
|
sources/simulators/multiprocessing_simulator/start_client.py
|
M4rukku/impact_of_non_iid_data_in_federated_learning
|
c818db03699c82e42217d56f8ddd4cc2081c8bb1
|
[
"MIT"
] | null | null | null |
sources/simulators/multiprocessing_simulator/start_client.py
|
M4rukku/impact_of_non_iid_data_in_federated_learning
|
c818db03699c82e42217d56f8ddd4cc2081c8bb1
|
[
"MIT"
] | null | null | null |
sources/simulators/multiprocessing_simulator/start_client.py
|
M4rukku/impact_of_non_iid_data_in_federated_learning
|
c818db03699c82e42217d56f8ddd4cc2081c8bb1
|
[
"MIT"
] | null | null | null |
import flwr as fl
import flwr.client
from sources.utils.simulation_parameters import DEFAULT_SERVER_ADDRESS
from sources.simulators.base_client_provider import BaseClientProvider
def start_client(client_provider: BaseClientProvider, client_identifier):
client = client_provider(str(client_identifier))
if isinstance(client, flwr.client.NumPyClient):
fl.client.start_numpy_client(server_address=DEFAULT_SERVER_ADDRESS, client=client)
else:
fl.client.start_client(server_address=DEFAULT_SERVER_ADDRESS, client=client)
| 39.071429
| 90
| 0.824497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6785745e950d85dea8868d37187f8f6ecdfbf12a
| 23,056
|
py
|
Python
|
aea/helpers/pipe.py
|
bryanchriswhite/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 126
|
2019-09-07T09:32:44.000Z
|
2022-03-29T14:28:41.000Z
|
aea/helpers/pipe.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 1,814
|
2019-08-24T10:08:07.000Z
|
2022-03-31T14:28:36.000Z
|
aea/helpers/pipe.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 46
|
2019-09-03T22:13:58.000Z
|
2022-03-22T01:25:16.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Portable pipe implementation for Linux, MacOS, and Windows."""
import asyncio
import errno
import logging
import os
import socket
import struct
import tempfile
from abc import ABC, abstractmethod
from asyncio import AbstractEventLoop
from asyncio.streams import StreamWriter
from shutil import rmtree
from typing import IO, Optional
from aea.exceptions import enforce
_default_logger = logging.getLogger(__name__)
PIPE_CONN_TIMEOUT = 10.0
PIPE_CONN_ATTEMPTS = 10
TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS = 5
class IPCChannelClient(ABC):
"""Multi-platform interprocess communication channel for the client side."""
@abstractmethod
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to communication channel
:param timeout: timeout for other end to connect
:return: connection status
"""
@abstractmethod
async def write(self, data: bytes) -> None:
"""
Write `data` bytes to the other end of the channel
Will first write the size than the actual data
:param data: bytes to write
"""
@abstractmethod
async def read(self) -> Optional[bytes]:
"""
Read bytes from the other end of the channel
Will first read the size than the actual data
:return: read bytes
"""
@abstractmethod
async def close(self) -> None:
"""Close the communication channel."""
class IPCChannel(IPCChannelClient):
"""Multi-platform interprocess communication channel."""
@property
@abstractmethod
def in_path(self) -> str:
"""
Rendezvous point for incoming communication.
:return: path
"""
@property
@abstractmethod
def out_path(self) -> str:
"""
Rendezvous point for outgoing communication.
:return: path
"""
class PosixNamedPipeProtocol:
"""Posix named pipes async wrapper communication protocol."""
def __init__(
self,
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize a new posix named pipe.
:param in_path: rendezvous point for incoming data
:param out_path: rendezvous point for outgoing data
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self._loop = loop
self._in_path = in_path
self._out_path = out_path
self._in = -1
self._out = -1
self._stream_reader = None # type: Optional[asyncio.StreamReader]
self._reader_protocol = None # type: Optional[asyncio.StreamReaderProtocol]
self._fileobj = None # type: Optional[IO[str]]
self._connection_attempts = PIPE_CONN_ATTEMPTS
self._connection_timeout = PIPE_CONN_TIMEOUT
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to the other end of the pipe
:param timeout: timeout before failing
:return: connection success
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._connection_timeout = timeout / PIPE_CONN_ATTEMPTS if timeout > 0 else 0
if self._connection_attempts <= 1: # pragma: no cover
return False
self._connection_attempts -= 1
self.logger.debug(
"Attempt opening pipes {}, {}...".format(self._in_path, self._out_path)
)
self._in = os.open(self._in_path, os.O_RDONLY | os.O_NONBLOCK | os.O_SYNC)
try:
self._out = os.open(self._out_path, os.O_WRONLY | os.O_NONBLOCK)
except OSError as e: # pragma: no cover
if e.errno == errno.ENXIO:
self.logger.debug("Sleeping for {}...".format(self._connection_timeout))
await asyncio.sleep(self._connection_timeout)
return await self.connect(timeout)
raise e
# setup reader
enforce(
self._in != -1 and self._out != -1 and self._loop is not None,
"Incomplete initialization.",
)
self._stream_reader = asyncio.StreamReader(loop=self._loop)
self._reader_protocol = asyncio.StreamReaderProtocol(
self._stream_reader, loop=self._loop
)
self._fileobj = os.fdopen(self._in, "r")
await self._loop.connect_read_pipe(
lambda: self.__reader_protocol, self._fileobj
)
return True
@property
def __reader_protocol(self) -> asyncio.StreamReaderProtocol:
"""Get reader protocol."""
if self._reader_protocol is None:
raise ValueError("reader protocol not set!") # pragma: nocover
return self._reader_protocol
async def write(self, data: bytes) -> None:
"""
Write to pipe.
:param data: bytes to write to pipe
"""
self.logger.debug("writing {}...".format(len(data)))
size = struct.pack("!I", len(data))
os.write(self._out, size + data)
await asyncio.sleep(0.0)
async def read(self) -> Optional[bytes]:
"""
Read from pipe.
:return: read bytes
"""
if self._stream_reader is None: # pragma: nocover
raise ValueError("StreamReader not set, call connect first!")
try:
self.logger.debug("waiting for messages (in={})...".format(self._in_path))
buf = await self._stream_reader.readexactly(4)
if not buf: # pragma: no cover
return None
size = struct.unpack("!I", buf)[0]
if size <= 0: # pragma: no cover
return None
data = await self._stream_reader.readexactly(size)
if not data: # pragma: no cover
return None
return data
except asyncio.IncompleteReadError as e: # pragma: no cover
self.logger.info(
"Connection disconnected while reading from pipe ({}/{})".format(
len(e.partial), e.expected
)
)
return None
except asyncio.CancelledError: # pragma: no cover
return None
async def close(self) -> None:
"""Disconnect pipe."""
self.logger.debug("closing pipe (in={})...".format(self._in_path))
if self._fileobj is None:
raise ValueError("Pipe not connected") # pragma: nocover
try:
# hack for MacOSX
size = struct.pack("!I", 0)
os.write(self._out, size)
os.close(self._out)
self._fileobj.close()
except OSError: # pragma: no cover
pass
await asyncio.sleep(0)
class TCPSocketProtocol:
"""TCP socket communication protocol."""
def __init__(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize the tcp socket protocol.
:param reader: established asyncio reader
:param writer: established asyncio writer
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self.loop = loop if loop is not None else asyncio.get_event_loop()
self._reader = reader
self._writer = writer
@property
def writer(self) -> StreamWriter:
"""Get a writer associated with protocol."""
return self._writer
async def write(self, data: bytes) -> None:
"""
Write to socket.
:param data: bytes to write
"""
if self._writer is None:
raise ValueError("writer not set!") # pragma: nocover
self.logger.debug("writing {}...".format(len(data)))
size = struct.pack("!I", len(data))
self._writer.write(size + data)
await self._writer.drain()
async def read(self) -> Optional[bytes]:
"""
Read from socket.
:return: read bytes
"""
try:
self.logger.debug("waiting for messages...")
buf = await self._reader.readexactly(4)
if not buf: # pragma: no cover
return None
size = struct.unpack("!I", buf)[0]
data = await self._reader.readexactly(size)
if not data: # pragma: no cover
return None
if len(data) != size: # pragma: no cover
raise ValueError(
f"Incomplete Read Error! Expected size={size}, got: {len(data)}"
)
return data
except asyncio.IncompleteReadError as e: # pragma: no cover
self.logger.info(
"Connection disconnected while reading from pipe ({}/{})".format(
len(e.partial), e.expected
)
)
return None
except asyncio.CancelledError: # pragma: no cover
return None
async def close(self) -> None:
"""Disconnect socket."""
if self._writer.can_write_eof():
self._writer.write_eof()
await self._writer.drain()
self._writer.close()
wait_closed = getattr(self._writer, "wait_closed", None)
if wait_closed:
# in py3.6 writer does not have the coroutine
await wait_closed() # pragma: nocover
class TCPSocketChannel(IPCChannel):
"""Interprocess communication channel implementation using tcp sockets."""
def __init__(
self,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""Initialize tcp socket interprocess communication channel."""
self.logger = logger
self._loop = loop
self._server = None # type: Optional[asyncio.AbstractServer]
self._connected = None # type: Optional[asyncio.Event]
self._sock = None # type: Optional[TCPSocketProtocol]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
self._port = s.getsockname()[1]
s.close()
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Setup communication channel and wait for other end to connect.
:param timeout: timeout for the connection to be established
:return: connection status
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._connected = asyncio.Event()
self._server = await asyncio.start_server(
self._handle_connection, host="127.0.0.1", port=self._port
)
if self._server.sockets is None:
raise ValueError("Server sockets is None!") # pragma: nocover
self._port = self._server.sockets[0].getsockname()[1]
self.logger.debug("socket pipe rdv point: {}".format(self._port))
try:
await asyncio.wait_for(self._connected.wait(), timeout)
except asyncio.TimeoutError: # pragma: no cover
return False
self._server.close()
await self._server.wait_closed()
return True
async def _handle_connection(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
"""Handle connection."""
if self._connected is None:
raise ValueError("Connected is None!") # pragma: nocover
self._connected.set()
self._sock = TCPSocketProtocol(
reader, writer, logger=self.logger, loop=self._loop
)
async def write(self, data: bytes) -> None:
"""
Write to channel.
:param data: bytes to write
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.write(data)
async def read(self) -> Optional[bytes]:
"""
Read from channel.
:return: read bytes
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
return await self._sock.read()
async def close(self) -> None:
"""Disconnect from channel and clean it up."""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.close()
@property
def in_path(self) -> str:
"""Rendezvous point for incoming communication."""
return str(self._port)
@property
def out_path(self) -> str:
"""Rendezvous point for outgoing communication."""
return str(self._port)
class PosixNamedPipeChannel(IPCChannel):
"""Interprocess communication channel implementation using Posix named pipes."""
def __init__(
self,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""Initialize posix named pipe interprocess communication channel."""
self.logger = logger
self._loop = loop
self._pipe_dir = tempfile.mkdtemp()
self._in_path = "{}/process_to_aea".format(self._pipe_dir)
self._out_path = "{}/aea_to_process".format(self._pipe_dir)
# setup fifos
self.logger.debug(
"Creating pipes ({}, {})...".format(self._in_path, self._out_path)
)
if os.path.exists(self._in_path):
os.remove(self._in_path) # pragma: no cover
if os.path.exists(self._out_path):
os.remove(self._out_path) # pragma: no cover
os.mkfifo(self._in_path)
os.mkfifo(self._out_path)
self._pipe = PosixNamedPipeProtocol(
self._in_path, self._out_path, logger=logger, loop=loop
)
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Setup communication channel and wait for other end to connect.
:param timeout: timeout for connection to be established
:return: bool, indicating success
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
return await self._pipe.connect(timeout)
async def write(self, data: bytes) -> None:
"""
Write to the channel.
:param data: data to write to channel
"""
await self._pipe.write(data)
async def read(self) -> Optional[bytes]:
"""
Read from the channel.
:return: read bytes
"""
return await self._pipe.read()
async def close(self) -> None:
"""Close the channel and clean it up."""
await self._pipe.close()
rmtree(self._pipe_dir)
@property
def in_path(self) -> str:
"""Rendezvous point for incoming communication."""
return self._in_path
@property
def out_path(self) -> str:
"""Rendezvous point for outgoing communication."""
return self._out_path
class TCPSocketChannelClient(IPCChannelClient):
"""Interprocess communication channel client using tcp sockets."""
def __init__( # pylint: disable=unused-argument
self,
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize a tcp socket communication channel client.
:param in_path: rendezvous point for incoming data
:param out_path: rendezvous point for outgoing data
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self._loop = loop
parts = in_path.split(":")
if len(parts) == 1:
self._port = int(in_path)
self._host = "127.0.0.1"
else: # pragma: nocover
self._port = int(parts[1])
self._host = parts[0]
self._sock = None # type: Optional[TCPSocketProtocol]
self._attempts = TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS
self._timeout = PIPE_CONN_TIMEOUT / self._attempts
self.last_exception: Optional[Exception] = None
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to the other end of the communication channel.
:param timeout: timeout for connection to be established
:return: connection status
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._timeout = timeout / TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS
self.logger.debug(
"Attempting to connect to {}:{}.....".format("127.0.0.1", self._port)
)
connected = False
while self._attempts > 0:
self._attempts -= 1
try:
self._sock = await self._open_connection()
connected = True
break
except ConnectionRefusedError:
await asyncio.sleep(self._timeout)
except Exception as e: # pylint: disable=broad-except # pragma: nocover
self.last_exception = e
return False
return connected
async def _open_connection(self) -> TCPSocketProtocol:
reader, writer = await asyncio.open_connection(
self._host, self._port, loop=self._loop, # pylint: disable=protected-access
)
return TCPSocketProtocol(reader, writer, logger=self.logger, loop=self._loop)
async def write(self, data: bytes) -> None:
"""
Write data to channel.
:param data: bytes to write
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.write(data)
async def read(self) -> Optional[bytes]:
"""
Read data from channel.
:return: read bytes
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
return await self._sock.read()
async def close(self) -> None:
"""Disconnect from communication channel."""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.close()
class PosixNamedPipeChannelClient(IPCChannelClient):
"""Interprocess communication channel client using Posix named pipes."""
def __init__(
self,
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize a posix named pipe communication channel client.
:param in_path: rendezvous point for incoming data
:param out_path: rendezvous point for outgoing data
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self._loop = loop
self._in_path = in_path
self._out_path = out_path
self._pipe = None # type: Optional[PosixNamedPipeProtocol]
self.last_exception: Optional[Exception] = None
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to the other end of the communication channel.
:param timeout: timeout for connection to be established
:return: connection status
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._pipe = PosixNamedPipeProtocol(
self._in_path, self._out_path, logger=self.logger, loop=self._loop
)
try:
return await self._pipe.connect()
except Exception as e: # pragma: nocover # pylint: disable=broad-except
self.last_exception = e
return False
async def write(self, data: bytes) -> None:
"""
Write data to channel.
:param data: bytes to write
"""
if self._pipe is None:
raise ValueError("Pipe not connected.") # pragma: nocover
await self._pipe.write(data)
async def read(self) -> Optional[bytes]:
"""
Read data from channel.
:return: read bytes
"""
if self._pipe is None:
raise ValueError("Pipe not connected.") # pragma: nocover
return await self._pipe.read()
async def close(self) -> None:
"""Disconnect from communication channel."""
if self._pipe is None:
raise ValueError("Pipe not connected.") # pragma: nocover
return await self._pipe.close()
def make_ipc_channel(
logger: logging.Logger = _default_logger, loop: Optional[AbstractEventLoop] = None
) -> IPCChannel:
"""
Build a portable bidirectional InterProcess Communication channel
:param logger: the logger
:param loop: the loop
:return: IPCChannel
"""
if os.name == "posix":
return PosixNamedPipeChannel(logger=logger, loop=loop)
if os.name == "nt": # pragma: nocover
return TCPSocketChannel(logger=logger, loop=loop)
raise NotImplementedError( # pragma: nocover
"make ipc channel is not supported on platform {}".format(os.name)
)
def make_ipc_channel_client(
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> IPCChannelClient:
"""
Build a portable bidirectional InterProcess Communication client channel
:param in_path: rendezvous point for incoming communication
:param out_path: rendezvous point for outgoing outgoing
:param logger: the logger
:param loop: the loop
:return: IPCChannel
"""
if os.name == "posix":
return PosixNamedPipeChannelClient(in_path, out_path, logger=logger, loop=loop)
if os.name == "nt": # pragma: nocover
return TCPSocketChannelClient(in_path, out_path, logger=logger, loop=loop)
raise NotImplementedError( # pragma: nocover
"make ip channel client is not supported on platform {}".format(os.name)
)
| 32.022222
| 88
| 0.600885
| 20,226
| 0.877255
| 0
| 0
| 2,030
| 0.088046
| 12,868
| 0.558119
| 8,300
| 0.359993
|
6785ebdaa0a0f8a5a088b840a1b64f1e5c59a6a9
| 6,046
|
py
|
Python
|
src/config/svc-monitor/svc_monitor/tests/test_port_tuple.py
|
UbuntuEvangelist/contrail-controller
|
4e8a992230f8f8e91e4f753e19b5442d9e1b446d
|
[
"Apache-2.0"
] | null | null | null |
src/config/svc-monitor/svc_monitor/tests/test_port_tuple.py
|
UbuntuEvangelist/contrail-controller
|
4e8a992230f8f8e91e4f753e19b5442d9e1b446d
|
[
"Apache-2.0"
] | null | null | null |
src/config/svc-monitor/svc_monitor/tests/test_port_tuple.py
|
UbuntuEvangelist/contrail-controller
|
4e8a992230f8f8e91e4f753e19b5442d9e1b446d
|
[
"Apache-2.0"
] | 18
|
2017-01-12T09:28:44.000Z
|
2019-04-18T20:47:42.000Z
|
import mock
from mock import patch
import unittest
from vnc_api.vnc_api import *
from svc_monitor.port_tuple import PortTupleAgent
from svc_monitor.config_db import *
import test_common_utils as test_utils
class PortTupleTest(unittest.TestCase):
def setUp(self):
InstanceIpSM._cassandra = mock.MagicMock()
InstanceIpSM._cassandra.object_read = test_utils.iip_db_read
ServiceInstanceSM._cassandra = mock.MagicMock()
ServiceInstanceSM._cassandra.object_read = test_utils.si_db_read
VirtualMachineInterfaceSM._cassandra = mock.MagicMock()
VirtualMachineInterfaceSM._cassandra.object_read = test_utils.vmi_db_read
self.mocked_vnc = mock.MagicMock()
self.mocked_vnc.fq_name_to_id = test_utils.get_vn_id_for_fq_name
self.mocked_vnc.instance_ip_create = test_utils.iip_create
self.pt_agent = PortTupleAgent(
svc_mon=mock.MagicMock(), vnc_lib=self.mocked_vnc,
cassandra=mock.MagicMock(), config_section=mock.MagicMock(),
logger=mock.MagicMock())
def tearDown(self):
ServiceTemplateSM.reset()
ServiceInstanceSM.reset()
InstanceIpSM.reset()
del InstanceIpSM._cassandra
ServiceInstanceSM.reset()
del ServiceInstanceSM._cassandra
VirtualMachineInterfaceSM.reset()
del VirtualMachineInterfaceSM._cassandra
def test_single_vm_port_tuple_create(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_virtual_network('fake-domain:fake-project:public-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:fake-vn-uuid')
st = test_utils.create_test_st(name='fake-st-uuid',
intf_list=[['right', True], ['left', True]], version='2')
si = test_utils.create_test_si(name='fake-si-uuid', count=1,
intf_list=['public-vn', 'fake-vn-uuid'])
si.service_template = 'fake-st-uuid'
pt = test_utils.create_test_port_tuple(
'fake-domain:fake-project:fake-si-uuid:fake-port-tuple',
'fake-si-uuid')
vmi = test_utils.create_test_vmi('fake-domain:fake-project:fake-vmi-uuid-left', pt)
vmi.params = {}
vmi.params['service_interface_type'] = 'left'
vmi = test_utils.create_test_vmi('fake-domain:fake-project:fake-vmi-uuid-right', pt)
vmi.params = {}
vmi.params['service_interface_type'] = 'right'
self.pt_agent.update_port_tuple(pt_id='fake-port-tuple')
self.mocked_vnc.ref_update.assert_any_call('instance-ip',
'fake-iip-uuid', 'virtual-machine-interface', 'fake-vmi-uuid-left',
None, 'ADD')
self.mocked_vnc.ref_update.assert_any_call('instance-ip',
'fake-iip-uuid', 'virtual-machine-interface', 'fake-vmi-uuid-right',
None, 'ADD')
self.mocked_vnc.ref_update.assert_any_call('service-instance',
'fake-si-uuid', 'instance-ip', 'fake-iip-uuid', None, 'ADD',
ServiceInterfaceTag('left'))
self.mocked_vnc.ref_update.assert_any_call('service-instance',
'fake-si-uuid', 'instance-ip', 'fake-iip-uuid', None, 'ADD',
ServiceInterfaceTag('right'))
def test_two_vm_port_tuple_create(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_virtual_network('fake-domain:fake-project:public-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:fake-vn-uuid')
st = test_utils.create_test_st(name='fake-st-uuid',
intf_list=[['right', True], ['left', True]], version='2')
si = test_utils.create_test_si(name='fake-si-uuid', count=1,
intf_list=['public-vn', 'fake-vn-uuid'])
si.service_template = 'fake-st-uuid'
pt = test_utils.create_test_port_tuple(
'fake-domain:fake-project:fake-si-uuid:fake-port-tuple1',
'fake-si-uuid')
vmi = test_utils.create_test_vmi('fake-domain:fake-project:fake-vmi-uuid-left1', pt)
vmi.params = {}
vmi.params['service_interface_type'] = 'left'
vmi = test_utils.create_test_vmi('fake-domain:fake-project:fake-vmi-uuid-right1', pt)
vmi.params = {}
vmi.params['service_interface_type'] = 'right'
self.pt_agent.update_port_tuple(pt_id='fake-port-tuple1')
si.service_template = 'fake-st-uuid'
pt = test_utils.create_test_port_tuple(
'fake-domain:fake-project:fake-si-uuid:fake-port-tuple2',
'fake-si-uuid')
vmi = test_utils.create_test_vmi('fake-domain:fake-project:fake-vmi-uuid-left2', pt)
vmi.params = {}
vmi.params['service_interface_type'] = 'left'
vmi = test_utils.create_test_vmi('fake-domain:fake-project:fake-vmi-uuid-right2', pt)
vmi.params = {}
vmi.params['service_interface_type'] = 'right'
self.pt_agent.update_port_tuple(pt_id='fake-port-tuple2')
self.mocked_vnc.ref_update.assert_any_call('instance-ip',
'fake-iip-uuid', 'virtual-machine-interface', 'fake-vmi-uuid-left1',
None, 'ADD')
self.mocked_vnc.ref_update.assert_any_call('instance-ip',
'fake-iip-uuid', 'virtual-machine-interface', 'fake-vmi-uuid-right1',
None, 'ADD')
self.mocked_vnc.ref_update.assert_any_call('instance-ip',
'fake-iip-uuid', 'virtual-machine-interface', 'fake-vmi-uuid-left2',
None, 'ADD')
self.mocked_vnc.ref_update.assert_any_call('instance-ip',
'fake-iip-uuid', 'virtual-machine-interface', 'fake-vmi-uuid-right2',
None, 'ADD')
self.mocked_vnc.ref_update.assert_any_call('service-instance',
'fake-si-uuid', 'instance-ip', 'fake-iip-uuid', None, 'ADD',
ServiceInterfaceTag('left'))
self.mocked_vnc.ref_update.assert_any_call('service-instance',
'fake-si-uuid', 'instance-ip', 'fake-iip-uuid', None, 'ADD',
ServiceInterfaceTag('right'))
| 48.368
| 93
| 0.664406
| 5,838
| 0.965597
| 0
| 0
| 0
| 0
| 0
| 0
| 1,877
| 0.310453
|
6786e2d4a6f307e6300a31ab2c4e829094e2410e
| 5,672
|
py
|
Python
|
pearll/agents/ga.py
|
LondonNode/Anvil
|
bc50fd7b16af36051157814e2548a98e787b03de
|
[
"MIT"
] | 13
|
2022-01-17T14:43:05.000Z
|
2022-03-10T04:05:36.000Z
|
pearll/agents/ga.py
|
LondonNode/Anvil
|
bc50fd7b16af36051157814e2548a98e787b03de
|
[
"MIT"
] | 3
|
2022-02-24T18:29:12.000Z
|
2022-03-22T11:09:07.000Z
|
pearll/agents/ga.py
|
LondonNode/Anvil
|
bc50fd7b16af36051157814e2548a98e787b03de
|
[
"MIT"
] | null | null | null |
from functools import partial
from typing import Callable, List, Optional, Type
import numpy as np
from gym.vector.vector_env import VectorEnv
from pearll.agents.base_agents import BaseAgent
from pearll.buffers import RolloutBuffer
from pearll.buffers.base_buffer import BaseBuffer
from pearll.callbacks.base_callback import BaseCallback
from pearll.common.type_aliases import Log
from pearll.common.utils import filter_rewards
from pearll.explorers.base_explorer import BaseExplorer
from pearll.models import ActorCritic, Dummy
from pearll.settings import (
BufferSettings,
ExplorerSettings,
LoggerSettings,
MiscellaneousSettings,
MutationSettings,
PopulationSettings,
Settings,
)
from pearll.signal_processing import (
crossover_operators,
mutation_operators,
selection_operators,
)
from pearll.updaters.evolution import BaseEvolutionUpdater, GeneticUpdater
def default_model(env: VectorEnv):
"""
Returns a default model for the given environment.
"""
actor = Dummy(space=env.single_action_space)
critic = Dummy(space=env.single_action_space)
return ActorCritic(
actor=actor,
critic=critic,
population_settings=PopulationSettings(
actor_population_size=env.num_envs, actor_distribution="uniform"
),
)
class GA(BaseAgent):
"""
Genetic Algorithm
https://www.geeksforgeeks.org/genetic-algorithms/
:param env: the gym-like environment to be used, should be a VectorEnv
:param model: the neural network model
:param updater_class: the updater class to be used
:param selection_operator: the selection operator to be used
:param selection_settings: the selection settings to be used
:param crossover_operator: the crossover operator to be used
:param crossover_settings: the crossover settings to be used
:param mutation_operator: the mutation operator to be used
:param mutation_settings: the mutation settings to be used
:param elitism: the elitism ratio
:param buffer_class: the buffer class for storing and sampling trajectories
:param buffer_settings: settings for the buffer
:param action_explorer_class: the explorer class for random search at beginning of training and
adding noise to actions
:param explorer settings: settings for the action explorer
:param callbacks: an optional list of callbacks (e.g. if you want to save the model)
:param callback_settings: settings for callbacks
:param logger_settings: settings for the logger
:param misc_settings: settings for miscellaneous parameters
"""
def __init__(
self,
env: VectorEnv,
model: Optional[ActorCritic] = None,
updater_class: Type[BaseEvolutionUpdater] = GeneticUpdater,
selection_operator: Callable = selection_operators.roulette_selection,
selection_settings: Settings = Settings(),
crossover_operator: Callable = crossover_operators.one_point_crossover,
crossover_settings: Settings = Settings(),
mutation_operator: Callable = mutation_operators.uniform_mutation,
mutation_settings: MutationSettings = MutationSettings(),
elitism: float = 0.1,
buffer_class: Type[BaseBuffer] = RolloutBuffer,
buffer_settings: BufferSettings = BufferSettings(),
action_explorer_class: Type[BaseExplorer] = BaseExplorer,
explorer_settings: ExplorerSettings = ExplorerSettings(start_steps=0),
callbacks: Optional[List[Type[BaseCallback]]] = None,
callback_settings: Optional[List[Settings]] = None,
logger_settings: LoggerSettings = LoggerSettings(),
misc_settings: MiscellaneousSettings = MiscellaneousSettings(),
) -> None:
model = model if model is not None else default_model(env)
super().__init__(
env=env,
model=model,
action_explorer_class=action_explorer_class,
explorer_settings=explorer_settings,
buffer_class=buffer_class,
buffer_settings=buffer_settings,
logger_settings=logger_settings,
callbacks=callbacks,
callback_settings=callback_settings,
misc_settings=misc_settings,
)
self.updater = updater_class(self.model)
self.selection_operator = partial(
selection_operator, **selection_settings.filter_none()
)
self.crossover_operator = partial(
crossover_operator, **crossover_settings.filter_none()
)
self.mutation_operator = partial(
mutation_operator, **mutation_settings.filter_none()
)
self.elitism = elitism
def _fit(
self, batch_size: int, actor_epochs: int = 1, critic_epochs: int = 1
) -> Log:
divergences = np.zeros(actor_epochs)
entropies = np.zeros(actor_epochs)
trajectories = self.buffer.sample(batch_size, dtype="numpy")
rewards = trajectories.rewards.squeeze()
rewards = filter_rewards(rewards, trajectories.dones.squeeze())
if rewards.ndim > 1:
rewards = rewards.sum(dim=-1)
for i in range(actor_epochs):
log = self.updater(
rewards=rewards,
selection_operator=self.selection_operator,
crossover_operator=self.crossover_operator,
mutation_operator=self.mutation_operator,
elitism=self.elitism,
)
divergences[i] = log.divergence
entropies[i] = log.entropy
self.buffer.reset()
return Log(divergence=divergences.sum(), entropy=entropies.mean())
| 38.849315
| 99
| 0.701164
| 4,349
| 0.766749
| 0
| 0
| 0
| 0
| 0
| 0
| 1,352
| 0.238364
|
6787612d23eda8ccb35a41398442232a6c1a614e
| 17,643
|
py
|
Python
|
src/tequila/optimizers/optimizer_scipy.py
|
snc2/tequila
|
6767ced9215408f7d055c22df7a66ccd610b00fb
|
[
"MIT"
] | null | null | null |
src/tequila/optimizers/optimizer_scipy.py
|
snc2/tequila
|
6767ced9215408f7d055c22df7a66ccd610b00fb
|
[
"MIT"
] | null | null | null |
src/tequila/optimizers/optimizer_scipy.py
|
snc2/tequila
|
6767ced9215408f7d055c22df7a66ccd610b00fb
|
[
"MIT"
] | null | null | null |
import scipy, numpy, typing, numbers
from tequila.objective import Objective
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from .optimizer_base import Optimizer
from ._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from collections import namedtuple
from tequila.utils.exceptions import TequilaException
from tequila.circuit.noise import NoiseModel
from tequila.tools.qng import get_qng_combos
class TequilaScipyException(TequilaException):
""" """
pass
SciPyReturnType = namedtuple('SciPyReturnType', 'energy angles history scipy_output')
class OptimizerSciPy(Optimizer):
""" """
gradient_free_methods = ['NELDER-MEAD', 'COBYLA', 'POWELL', 'SLSQP']
gradient_based_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC']
hessian_based_methods = ["TRUST-KRYLOV", "NEWTON-CG", "DOGLEG", "TRUST-NCG", "TRUST-EXACT", "TRUST-CONSTR"]
@classmethod
def available_methods(cls):
""":return: All tested available methods"""
return cls.gradient_free_methods + cls.gradient_based_methods + cls.hessian_based_methods
def __init__(self, method: str = "L-BFGS-B",
tol: numbers.Real = None,
method_options=None,
method_bounds=None,
method_constraints=None,
silent: bool = True,
**kwargs):
"""
Optimize a circuit to minimize a given objective using scipy
See the Optimizer class for all other parameters to initialize
:param method: The scipy method passed as string
:param use_gradient: do gradient based optimization
:param tol: See scipy documentation for the method you picked
:param method_options: See scipy documentation for the method you picked
:param method_bounds: See scipy documentation for the method you picked
:param method_constraints: See scipy documentation for the method you picked
:param silent: if False the optimizer print out all evaluated energies
:param use_gradient: select if gradients shall be used. Can be done automatically for most methods
"""
super().__init__(**kwargs)
if hasattr(method, "upper"):
self.method = method.upper()
else:
self.method = method
self.tol = tol
self.method_options = method_options
if method_bounds is not None:
method_bounds = {assign_variable(k): v for k, v in method_bounds.items()}
self.method_bounds = method_bounds
self.silent = silent
if method_options is None:
self.method_options = {'maxiter': self.maxiter}
else:
self.method_options = method_options
if 'maxiter' not in method_options:
self.method_options['maxiter'] = self.maxiter
self.method_options['disp'] = not silent
if method_constraints is None:
self.method_constraints = ()
else:
self.method_constraints = method_constraints
def __call__(self, objective: Objective,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyReturnType:
"""
Optimizes with scipy and gives back the optimized angles
Get the optimized energies over the history
:param objective: The tequila Objective to minimize
:param initial_values: initial values for the objective
:param return_scipy_output: chose if the full scipy output shall be returned
:param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
:return: tuple of optimized energy ,optimized angles and scipy output
"""
infostring = "{:15} : {}\n".format("Method", self.method)
infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
# do the compilation here to avoid costly recompilation during the optimization
compiled_objective = self.compile_objective(objective=objective)
E = _EvalContainer(objective=compiled_objective,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
backend_options=self.backend_options,
print_level=self.print_level)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise,
backend_options=self.backend_options)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
grad_obj, comp_grad_obj = self.compile_gradient(objective=objective, variables=variables, gradient=gradient)
expvals = sum([o.count_expectationvalues() for o in comp_grad_obj.values()])
infostring += "{:15} : {} expectationvalues\n".format("gradient", expvals)
dE = _GradContainer(objective=comp_grad_obj,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level,
backend_options=self.backend_options)
if compile_hessian:
hess_obj, comp_hess_obj = self.compile_hessian(variables=variables,
hessian=hessian,
grad_obj=grad_obj,
comp_grad_obj=comp_grad_obj)
expvals = sum([o.count_expectationvalues() for o in comp_hess_obj.values()])
infostring += "{:15} : {} expectationvalues\n".format("hessian", expvals)
ddE = _HessContainer(objective=comp_hess_obj,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level,
backend_options=self.backend_options)
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
E_final = res.fun
angles_final = dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyReturnType(energy=E_final, angles=format_variable_dictionary(angles_final), history=self.history,
scipy_output=res)
def available_methods(energy=True, gradient=True, hessian=True) -> typing.List[str]:
"""Convenience
:return: Available methods of the scipy optimizer
Parameters
----------
energy :
(Default value = True)
gradient :
(Default value = True)
hessian :
(Default value = True)
Returns
-------
"""
methods = []
if energy:
methods += OptimizerSciPy.gradient_free_methods
if gradient:
methods += OptimizerSciPy.gradient_based_methods
if hessian:
methods += OptimizerSciPy.hessian_based_methods
return methods
def minimize(objective: Objective,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyReturnType:
"""
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real]: (Default value = None):
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable] :
(Default value = None)
List of Variables to optimize
samples: int :
(Default value = None)
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int :
(Default value = 100)
backend: str :
(Default value = None)
Simulator backend, will be automatically chosen if set to None
backend_options: dict:
(Default value = None)
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel:
(Default value =None)
a NoiseModel to apply to all expectation values in the objective.
method: str :
(Default value = "BFGS")
Optimization method (see scipy documentation, or 'available methods')
tol: float :
(Default value = 1.e-3)
Convergence tolerance for optimization (see scipy documentation)
method_options: dict :
(Default value = None)
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]]:
(Default value = None)
bounds for the variables (see scipy documentation)
method_constraints :
(Default value = None)
(see scipy documentation
silent: bool :
(Default value = False)
No printout if True
save_history: bool:
(Default value = True)
Save the history throughout the optimization
Returns
-------
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = OptimizerSciPy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(objective=objective,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 44.55303
| 141
| 0.590829
| 11,181
| 0.633736
| 0
| 0
| 194
| 0.010996
| 0
| 0
| 5,481
| 0.310661
|
67884e5df8d269868ffffa5bd0b7c492cbdd5945
| 12,051
|
py
|
Python
|
Section_3.3_simul_3/2_Runtime/bsolar.py
|
isaac2math/solar
|
92a2a869cd902e15edce7aa5ed5af10f148763d9
|
[
"Intel"
] | null | null | null |
Section_3.3_simul_3/2_Runtime/bsolar.py
|
isaac2math/solar
|
92a2a869cd902e15edce7aa5ed5af10f148763d9
|
[
"Intel"
] | null | null | null |
Section_3.3_simul_3/2_Runtime/bsolar.py
|
isaac2math/solar
|
92a2a869cd902e15edce7aa5ed5af10f148763d9
|
[
"Intel"
] | null | null | null |
import numpy as np
import time
import warnings
from sklearn.linear_model import LinearRegression
from solar import solar
from sklearn.exceptions import ConvergenceWarning
# For recent version of Scikit-learn: since the class 'Lars' may rely on the Cholesky decomposition and hence may have potential convergence warning in high dimensional data (p is much larger than n), we input the following commmand to skip the convergence warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning, module="sklearn")
#####################################################
# define the class of bsolar (sequential computing) #
#####################################################
'''
this class is used to demonstrate the performance of bootstrap solar (bsolar) via sequential computation
please note that this file is identical to "bsolar_parallel.py" except the subsample selection frequency estimation (step 2, line 93 - 120 of this file), where we use sequential computing scheme instead.
Check this before you run the code:
Plz check if you have 'sci-kit learn', 'numpy', 'joblib', 'matplotlib' and 'tqdm' installed. If not,
1. run 'pip install scikit-learn joblib numpy matplotlib tqdm' if you use pure Python3
2. run 'conda install scikit-learn joblib numpy matplotlib tqdm' if you use Anaconda3
Modules:
1. from scikit-learn, we call 'Lars' to compute solar.
2. we use 'numpy' for matrix computation and random variable generation;
3. for simulator, plz see 'simulator.py' for detail;
4. we use class 'time' to time the computation of solar
Inputs:
1. X and y : the inputs and output of regression;
2. n_repeat_solar : the number of subsamples that solar generates;
3. n_repeat_bsolar : the number of subsamples that bsolar generates;
4. step_size : the step size of grid search for threshold optimization of subsample selection frequency;
Outputs:
1. bsolar_coef_H : the bsolar-H regression coefficients;
2. bsolar_coef_S : the bsolar-S regression coefficients;
4. Qc_list : the detailed subsample selection frequency of bsolar;
5. Q_opt_c_H : the variable that bsolar-H selects;
5. Q_opt_c_S : the variable that bsolar-S selects;
Remarks:
1. fit : the function that trains bsolar;
2. q_list : the plot function that returns the full list of subsample selection frequency for each variable in bsolar;
'''
class bsolar:
def __init__(self, X, Y, n_repeat_solar, n_repeat_bsolar, step_size):
# for convinience, we define the common variable (variables we need to use for each of the following functions) in the class as follows (the common variable is defined as self.xxxx)
# sample size
self.sample_size = X.shape[0]
# the number of subsamples generated in solar
self.n_repeat_solar = n_repeat_solar
# the number of subsamples generated in bsolar
self.n_repeat_bsolar = n_repeat_bsolar
# (grid search) step size for tuning the threshold of subsample selection frequency for bsolar
self.step_size = -0.02
# the size of each subsample
self.subsample_size = int(self.sample_size * 0.9)
# the number of total variables in X
self.n_dim = X.shape[1]
# the maximum value of c in its grid search (for plotting only)
self.q_start = 1
# the minimum value of c in its grid search (for plotting only)
self.q_end = 0.1
# step size of c in its grid search (for plotting only)
self.q_step = -0.02
# the sample we generate via data-generating process
self.X = X; self.y = Y
def fit(self):
#1. construct a placeholder called 'qhat_k_container', which is the list of all qhat^k (a binary string representing whether each variable is selected by solar on subsample k) of each subsample
qhat_k_container = list()
#2. train a solar on each subsample, find out which variable is selected on a given sample and save the corresponding selection result on subsample k as qhat^k
for j in range(self.n_repeat_bsolar):
# 2a. randomly choose a subset of sample points (whose index is 'index_subsample') and use them to generate a subsample in the given repeat of for-loop
index_subsample = np.random.choice(self.sample_size, self.subsample_size, replace=False)
# 2b. based on 'index_subsample', take the corresponding observations of X as the "X_subample"
X_subsample = self.X[index_subsample]
# 2c. based on 'index_subsample', take the corresponding observations of Y out and save them as the subample
y_subsample = self.y[index_subsample]
# 2d. change dimension for solar training
y_subsample.shape = (y_subsample.shape[0],1)
# 2e. given a subsample, compute solar on it
# 2e(1). call the class 'solar'
trial2 = solar( X_subsample, y_subsample, self.n_repeat_solar, self.step_size, lasso=False)
# 2e(2). compute solar on the subsample
solar_coef, _, _, _, _, _, _, _,_ = trial2.fit()
# 2e(3). save the active set of solar on this subsample (indices of variables select by solar) as 'active'.
active = np.nonzero(solar_coef)[0]
# 2f. based on the active set of solar, we compute qhat^k as the binary string of whether each variable is selected by solar on subsample K
# 2f(1). we generate 'qhat_k' as a row of zeros;
qhat_k = np.zeros((1, self.n_dim))
# 2f(2). if a variable (the ith column in matrix X) is selected by solar, we change the ith value of qhat_k as 1
for i in active:
qhat_k[0, i] = 1
# 2f(3). we append the result into 'qhat_k_container' and save it as one element of the list
qhat_k_container.append(qhat_k)
# 3. compute the subsample selection frequency for all variables
# 3a. we transform the list of all qhat^k ('qhat_k_container') into a matrix ('qhat_k_container_matrix')
# row of the matrix : the qhat^k on a given subsample for all variables
# column of the matrix : the corresponding values of qhat^k for variable "X_i" on all subsamples
# axis =0 means that we treat each item as a row;
qhat_k_container_matrix = np.concatenate(qhat_k_container, axis=0)
# 3b. compute the the value of qhat for each variable (the subsample selection frequency of each variable)
# e.g., compute the mean of each column
qhat_value = np.mean(qhat_k_container_matrix, axis=0)
# 3c. set 'Qc_list' as the container for the subsample selection frequencies for all variables, ranking in decreasing order.
Qc_list = list()
# 3d. set 'c_seq' as the sequence of c (the threshold of subsample selection frequency in bsolar)
c_seq = np.arange(1, 0.1, -0.02)
# 3e. for each value of c, generate Q(c) --- the set of variables with subsample selection frequency larger or equal to c
for j in c_seq:
# 3e(1). define 'container' as the placeholder of Q(c) when c == j;
container = list()
for i in range(self.X.shape[1]):
# 3e(2). include all variables into 'container' if their corresponding values in q-hat are larger or equal to j;
if (qhat_value[i] >= j):
container.append(i)
# 3e(3). append 'container' (Q(c) when c == j) into 'Qc_list' (the container of Q(c) for all value of c);
Qc_list.append(container)
# 4. pick the variable that are selected most of the time;
# 4a. if it is bsolar-S, choose c = 0.9
Q_opt_c_S = Qc_list[5]
# if it is bsolar-H, choose c = 1
Q_opt_c_H = Qc_list[0]
# 5. output the bsolar-S result (Q_opt_c_S is the active set of bolasso-S)
# 5a. if Q_opt_c_S is empty, return a zero array and empty active set
if Q_opt_c_S == []:
bsolar_coef_S = np.zeros([self.n_dim, 1])
# 5b. otherwise, regress Y onto the selected variables in X (variables in Q_opt_c_S)
else :
# 5b(1). call the LinearRegression class;
OLS_S = LinearRegression()
# 5b(2). fit OLS of Y to the variables of Q_opt_c_S on X;
OLS_S.fit(self.X[:, Q_opt_c_S], self.y)
# 5b(3). set 'bsolar_coef_S' (an array of zeros) as the placeholder of bsolar-S regression coefficents
bsolar_coef_S = np.zeros([self.n_dim, 1])
# 5b(4). put the estimated regression coefficents into their corresponding place of 'bsolarS_coef'
bsolar_coef_S[Q_opt_c_S, 0] = OLS_S.coef_
# 5c. output the bsolar-H result (Q_opt_c_H is the active set of bolasso-H)
# if Q_opt_c_H is empty, return a zero array and empty active set
if Q_opt_c_H == []:
bsolar_coef_H = np.zeros([self.n_dim, 1])
# 5d. otherwise, regress Y onto the selected variables in X (variables in Q_opt_c_H)
else :
# 5d(1). call the LinearRegression class;
OLS_H = LinearRegression()
# 5d(2). fit OLS of Y on the variables of Q(c*) in X;
OLS_H.fit(self.X[:, Q_opt_c_H], self.y)
# 5d(3). set 'bsolar_coef_H' (an array of zeros) as the placeholder of bsolar regression coefficents
bsolar_coef_H = np.zeros([self.n_dim, 1])
# 5d(4). put the estimated regression coefficents into their corresponding place of 'bsolarH_coef'
bsolar_coef_H[Q_opt_c_H, 0] = OLS_H.coef_
return bsolar_coef_H, bsolar_coef_S, Qc_list, Q_opt_c_H, Q_opt_c_S
# return the full list of subsample selection frequency for each variable in bsolar
def q_list(self, Qc_list):
# 1. concatenate Qc_list into a matrix
var_mark_plot = np.concatenate(Qc_list)
# 2. compute the value of c for each Q(c) and the corresponding variables in each Q(c)
var_index, counts = np.unique(var_mark_plot, return_counts=True)
var_index_ordered = [x for _, x in sorted(zip(counts, var_index))]
var_plot = var_index_ordered[::-1]
cou_plot = np.sort(counts)[::-1] / \
((self.q_end - self.q_start) / self.q_step)
var_plot = ['X' + str(i) for i in var_plot]
# 3. print the list of variables with different value of c
var_loc_list = list()
var_q_list = list()
q_value_list = np.unique(cou_plot)[::-1]
i = 1
for j in q_value_list:
ans_ind = np.where([cou_plot == j])[1]
ans_var = [var_plot[i] for i in ans_ind]
var_loc_list.append(ans_ind)
var_q_list.append(ans_var)
print('selection frequency >= ', j)
print(var_q_list[:i])
i += 1
##################################
# test if this module works fine #
##################################
'''
this part is set up to test the functionability of the class above;
you can run all the codes in this file to test if the class works;
when you call the class from this file, the codes (even functions or classes) after " if __name__ == '__main__': " will be ingored
'''
if __name__ == '__main__':
from simulator import simul
sample_size = 100
n_dim = 12
n_info = 5
n_repeat_solar = 10
n_repeat_bsolar = 3
step_size = -0.02
np.random.seed(0)
# generate X and Y
trial1 = simul(sample_size, n_dim, n_info)
X, Y = trial1.data_gen()
# start timing
start = time.time()
# train solar
trial2 = bsolar(X, Y, n_repeat_solar, n_repeat_bsolar, step_size)
bsolar_coef_H, bsolar_coef_S, Qc_list, Q_opt_c_H, Q_opt_c_S = trial2.fit()
# end timing
end = time.time()
# print the result
print('variables that bsolar-H selects: ', Q_opt_c_H)
print('variables that bsolar-S selects: ', Q_opt_c_S)
trial2.q_list(Qc_list)
| 46.528958
| 265
| 0.64866
| 8,464
| 0.702348
| 0
| 0
| 0
| 0
| 0
| 0
| 7,367
| 0.611319
|
6788b2d4a5d2258670eff8708364f1ba49cb5189
| 615
|
py
|
Python
|
solutions/nelum_pokuna.py
|
UdeshUK/RxH5-Prextreme
|
6f329b13d552d9c7e9ad927e2fe607c7cc0964f6
|
[
"Apache-2.0"
] | 1
|
2018-10-14T12:47:03.000Z
|
2018-10-14T12:47:03.000Z
|
solutions/nelum_pokuna.py
|
Team-RxH5/Prextreme
|
6f329b13d552d9c7e9ad927e2fe607c7cc0964f6
|
[
"Apache-2.0"
] | null | null | null |
solutions/nelum_pokuna.py
|
Team-RxH5/Prextreme
|
6f329b13d552d9c7e9ad927e2fe607c7cc0964f6
|
[
"Apache-2.0"
] | null | null | null |
cases=int(raw_input())
for case in range(cases):
answers=[0,0]
grid=[[0 for x in range(4)] for y in range(2)]
common=[]
for i in range(2):
answers[i]=int(raw_input())
for j in range(4):
grid[i][j]=raw_input().split()
grid[i][j] = map(int, grid[i][j])
# Code begins
for i in grid[0][answers[0]-1]:
if i in grid[1][answers[1]-1]:
common.append(i)
if len(common)>1:
print "Bad magician!"
elif len(common)==1:
for i in common:
print i
elif len(common)==0:
print "Volunteer cheated!"
| 23.653846
| 50
| 0.518699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.078049
|
6788c25e7a00ed595c0a516765861ce2d8e549e1
| 69,067
|
py
|
Python
|
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/MolKit/data/allct_dat.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | 9
|
2021-03-06T04:24:28.000Z
|
2022-01-03T09:53:07.000Z
|
MolKit/data/allct_dat.py
|
e-mayo/autodocktools-prepare-py3k
|
2dd2316837bcb7c19384294443b2855e5ccd3e01
|
[
"BSD-3-Clause"
] | 3
|
2021-03-07T05:37:16.000Z
|
2021-09-19T15:06:54.000Z
|
MolKit/data/allct_dat.py
|
e-mayo/autodocktools-prepare-py3k
|
2dd2316837bcb7c19384294443b2855e5ccd3e01
|
[
"BSD-3-Clause"
] | 4
|
2019-08-28T23:11:39.000Z
|
2021-11-27T08:43:36.000Z
|
allct_dat = {
"TYR": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"INTX,KFORM":['INT', '1'],
"HD2":{'torsion': 180.0, 'tree': 'E', 'NC': 16, 'NB': 19, 'NA': 21, 'I': 22, 'angle': 120.0, 'blen': 1.09, 'charge': 0.064, 'type': 'HC'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"OH":{'torsion': 180.0, 'tree': 'S', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 120.0, 'blen': 1.36, 'charge': -0.528, 'type': 'OH'},
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 120.0, 'blen': 1.09, 'charge': 0.064, 'type': 'HC'},
"HE1":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 120.0, 'blen': 1.09, 'charge': 0.102, 'type': 'HC'},
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 19, 'I': 20, 'angle': 120.0, 'blen': 1.09, 'charge': 0.102, 'type': 'HC'},
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 14, 'NB': 16, 'NA': 19, 'I': 21, 'angle': 120.0, 'blen': 1.4, 'charge': -0.002, 'type': 'CA'},
"NAMRES":'TYROSINE COO- ANION',
"CD1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 120.0, 'blen': 1.4, 'charge': -0.002, 'type': 'CA'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CE1', 'HE1', 'CZ', 'OH', 'HH', 'CE2', 'HE2', 'CD2', 'HD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 120.0, 'blen': 1.4, 'charge': -0.264, 'type': 'CA'},
"CE2":{'torsion': 0.0, 'tree': 'B', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 19, 'angle': 120.0, 'blen': 1.4, 'charge': -0.264, 'type': 'CA'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"HH":{'torsion': 0.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 17, 'I': 18, 'angle': 113.0, 'blen': 0.96, 'charge': 0.334, 'type': 'HO'},
"CZ":{'torsion': 0.0, 'tree': 'B', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 120.0, 'blen': 1.4, 'charge': 0.462, 'type': 'C'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 23, 'I': 24, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.51, 'charge': -0.03, 'type': 'CA'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 23, 'I': 25, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 23, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"ASN": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'OD1', 'ND2', 'HD21', 'HD22', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"ND2":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 116.6, 'blen': 1.335, 'charge': -0.867, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.086, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CB', 'ND2', 'CG', 'OD1'], ['CG', 'HD21', 'ND2', 'HD22']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 111.1, 'blen': 1.522, 'charge': 0.675, 'type': 'C'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD21":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 14, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"OD1":{'torsion': 0.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 120.5, 'blen': 1.229, 'charge': -0.47, 'type': 'O'},
"HD22":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 15, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 16, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 18, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ASPARAGINE COO- ANION',
},
"CYS": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'SG', 'HSG', 'LP1', 'LP2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"SG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 116.0, 'blen': 1.81, 'charge': 0.827, 'type': 'SH'},
"LP1":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 96.7, 'blen': 0.679, 'charge': -0.481, 'type': 'LP'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 15, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.06, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"LP2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 96.7, 'blen': 0.679, 'charge': -0.481, 'type': 'LP'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HSG":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 96.0, 'blen': 1.33, 'charge': 0.135, 'type': 'HS'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 15, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 15, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'CYSTEINE COO- ANION',
},
"ARG": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.056, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.056, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['NE', 'NH1', 'CZ', 'NH2'], ['CD', 'CZ', 'NE', 'HE'], ['CZ', 'HH12', 'NH1', 'HH11'], ['CZ', 'HH22', 'NH2', 'HH21']],
"HH11":{'torsion': 0.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 20, 'I': 21, 'angle': 119.8, 'blen': 1.01, 'charge': 0.361, 'type': 'H3'},
"HH12":{'torsion': 180.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 20, 'I': 22, 'angle': 119.8, 'blen': 1.01, 'charge': 0.361, 'type': 'H3'},
"HH21":{'torsion': 0.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 23, 'I': 24, 'angle': 119.8, 'blen': 1.01, 'charge': 0.361, 'type': 'H3'},
"HH22":{'torsion': 180.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 23, 'I': 25, 'angle': 119.8, 'blen': 1.01, 'charge': 0.361, 'type': 'H3'},
"INTX,KFORM":['INT', '1'],
"NE":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 17, 'angle': 111.0, 'blen': 1.48, 'charge': -0.324, 'type': 'N2'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.074, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD2":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.133, 'type': 'HC'},
"HD3":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.133, 'type': 'HC'},
"NAMRES":'ARGININE COO- ANION',
"HE":{'torsion': 0.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 18, 'angle': 118.5, 'blen': 1.01, 'charge': 0.269, 'type': 'H3'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'NE', 'HE', 'CZ', 'NH1', 'HH11', 'HH12', 'NH2', 'HH21', 'HH22', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"NH2":{'torsion': 180.0, 'tree': 'B', 'NC': 14, 'NB': 17, 'NA': 19, 'I': 23, 'angle': 118.0, 'blen': 1.33, 'charge': -0.624, 'type': 'N2'},
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.074, 'type': 'HC'},
"NH1":{'torsion': 0.0, 'tree': 'B', 'NC': 14, 'NB': 17, 'NA': 19, 'I': 20, 'angle': 122.0, 'blen': 1.33, 'charge': -0.624, 'type': 'N2'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"CZ":{'torsion': 180.0, 'tree': 'B', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 19, 'angle': 123.0, 'blen': 1.33, 'charge': 0.76, 'type': 'CA'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CD":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 109.47, 'blen': 1.525, 'charge': -0.228, 'type': 'CT'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 26, 'I': 27, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.103, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.08, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 26, 'I': 28, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 26, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"LEU": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.033, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.033, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"NAMRES":'LEUCINE COO- ANION',
"HG":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG', 'CD1', 'HD11', 'HD12', 'HD13', 'CD2', 'HD21', 'HD22', 'HD23', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HD11":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 14, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"HD12":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"HD13":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"CD2":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 17, 'angle': 109.47, 'blen': 1.525, 'charge': -0.107, 'type': 'CT'},
"CD1":{'torsion': 60.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.47, 'blen': 1.525, 'charge': -0.107, 'type': 'CT'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 22, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.01, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.061, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 23, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"HD21":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 17, 'I': 18, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"HD23":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 17, 'I': 20, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"HD22":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 17, 'I': 19, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 21, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"HID": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"NE2":{'torsion': 0.0, 'tree': 'S', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 109.0, 'blen': 1.31, 'charge': -0.502, 'type': 'NB'},
"ND1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 122.0, 'blen': 1.39, 'charge': -0.146, 'type': 'NA'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CG', 'CE1', 'ND1', 'HD1']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 108.0, 'blen': 1.32, 'charge': 0.241, 'type': 'CR'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 126.0, 'blen': 1.01, 'charge': 0.228, 'type': 'H'},
"NAMRES":'HISTIDINE DELTAH COO- ANION',
"HE":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 120.0, 'blen': 1.09, 'charge': 0.036, 'type': 'HC'},
"HD":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 17, 'I': 18, 'angle': 120.0, 'blen': 1.09, 'charge': 0.018, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'HD1', 'CE1', 'HE', 'NE2', 'CD2', 'HD', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 110.0, 'blen': 1.36, 'charge': 0.195, 'type': 'CV'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': -0.032, 'type': 'CC'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 21, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 19, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"HIE": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"NE2":{'torsion': 0.0, 'tree': 'B', 'NC': 11, 'NB': 12, 'NA': 13, 'I': 15, 'angle': 109.0, 'blen': 1.31, 'charge': -0.146, 'type': 'NA'},
"ND1":{'torsion': 180.0, 'tree': 'S', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 122.0, 'blen': 1.39, 'charge': -0.502, 'type': 'NB'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CE1', 'CD2', 'NE2', 'HE2']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 108.0, 'blen': 1.32, 'charge': 0.241, 'type': 'CR'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 12, 'NB': 13, 'NA': 15, 'I': 16, 'angle': 125.0, 'blen': 1.01, 'charge': 0.228, 'type': 'H'},
"NAMRES":'HISTIDINE EPSILON-H COO- ANION',
"HE":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 13, 'I': 14, 'angle': 120.0, 'blen': 1.09, 'charge': 0.036, 'type': 'HC'},
"HD":{'torsion': 180.0, 'tree': 'E', 'NC': 13, 'NB': 15, 'NA': 17, 'I': 18, 'angle': 120.0, 'blen': 1.09, 'charge': 0.114, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'CE1', 'HE', 'NE2', 'HE2', 'CD2', 'HD', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 12, 'NB': 13, 'NA': 15, 'I': 17, 'angle': 110.0, 'blen': 1.36, 'charge': -0.184, 'type': 'CW'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': 0.251, 'type': 'CC'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 21, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 19, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"MET": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.027, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.027, 'type': 'HC'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 22, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"SD":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 110.0, 'blen': 1.81, 'charge': 0.737, 'type': 'S'},
"LP1":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 96.7, 'blen': 0.679, 'charge': -0.381, 'type': 'LP'},
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HE3":{'torsion': 300.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 20, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 19, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"HE1":{'torsion': 60.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 18, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"NAMRES":'METHIONINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'SD', 'LP1', 'LP2', 'CE', 'HE1', 'HE2', 'HE3', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"CE":{'torsion': 180.0, 'tree': '3', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 17, 'angle': 100.0, 'blen': 1.78, 'charge': -0.134, 'type': 'CT'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.054, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.151, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 23, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"LP2":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 96.7, 'blen': 0.679, 'charge': -0.381, 'type': 'LP'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 21, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"IDBGEN,IREST,ITYPF":['1', '1', '201'],
"ALA": { "HB2":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB1', 'HB2', 'HB3', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HB1":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 12, 'I': 13, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 12, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 12, 'I': 14, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ALANINE COO- ANION',
},
"PHE": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"INTX,KFORM":['INT', '1'],
"HD2":{'torsion': 180.0, 'tree': 'E', 'NC': 16, 'NB': 18, 'NA': 20, 'I': 21, 'angle': 120.0, 'blen': 1.09, 'charge': 0.058, 'type': 'HC'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 120.0, 'blen': 1.09, 'charge': 0.058, 'type': 'HC'},
"HE1":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 120.0, 'blen': 1.09, 'charge': 0.063, 'type': 'HC'},
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 18, 'I': 19, 'angle': 120.0, 'blen': 1.09, 'charge': 0.063, 'type': 'HC'},
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 14, 'NB': 16, 'NA': 18, 'I': 20, 'angle': 120.0, 'blen': 1.4, 'charge': -0.069, 'type': 'CA'},
"NAMRES":'PHENYLALANINE COO- ANION',
"CD1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 120.0, 'blen': 1.4, 'charge': -0.069, 'type': 'CA'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CE1', 'HE1', 'CZ', 'HZ', 'CE2', 'HE2', 'CD2', 'HD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 120.0, 'blen': 1.4, 'charge': -0.059, 'type': 'CA'},
"CE2":{'torsion': 0.0, 'tree': 'B', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 18, 'angle': 120.0, 'blen': 1.4, 'charge': -0.059, 'type': 'CA'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"CZ":{'torsion': 0.0, 'tree': 'B', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 120.0, 'blen': 1.4, 'charge': -0.065, 'type': 'CA'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 22, 'I': 23, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': 0.055, 'type': 'CA'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 22, 'I': 24, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 22, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"HZ":{'torsion': 180.0, 'tree': 'E', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 120.0, 'blen': 1.09, 'charge': 0.062, 'type': 'HC'},
},
"CYX": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0495, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0495, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'SG', 'LP1', 'LP2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"SG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 116.0, 'blen': 1.81, 'charge': 0.824, 'type': 'S'},
"LP1":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 96.7, 'blen': 0.679, 'charge': -0.4045, 'type': 'LP'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"LP2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 96.7, 'blen': 0.679, 'charge': -0.4045, 'type': 'LP'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'CYSTINE(S-S BRIDGE) COO- ANION',
},
"PRO": { "HB2":{'torsion': 136.3, 'tree': 'E', 'NC': 5, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.061, 'type': 'HC'},
"HB3":{'torsion': 256.3, 'tree': 'E', 'NC': 5, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.061, 'type': 'HC'},
"impropTors":[['CA', 'OXT', 'C', 'O'], ['-M', 'CA', 'N', 'CD']],
"INTX,KFORM":['INT', '1'],
"HG3":{'torsion': 98.0, 'tree': 'E', 'NC': 4, 'NB': 5, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.063, 'type': 'HC'},
"HG2":{'torsion': 218.0, 'tree': 'E', 'NC': 4, 'NB': 5, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.063, 'type': 'HC'},
"CD":{'torsion': 356.1, 'tree': '3', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 126.1, 'blen': 1.458, 'charge': -0.012, 'type': 'CT'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"loopList":[['CA', 'CB']],
"HD2":{'torsion': 80.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 5, 'I': 6, 'angle': 109.5, 'blen': 1.09, 'charge': 0.06, 'type': 'HC'},
"HD3":{'torsion': 320.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 5, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.06, 'type': 'HC'},
"NAMRES":'PROLINE COO- ANION',
"atNameList":['N', 'CD', 'HD2', 'HD3', 'CG', 'HG2', 'HG3', 'CB', 'HB2', 'HB3', 'CA', 'HA', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA":{'torsion': 81.1, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 117.0, 'blen': 1.337, 'charge': -0.229, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 200.1, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 5, 'I': 8, 'angle': 103.2, 'blen': 1.5, 'charge': -0.121, 'type': 'CT'},
"CA":{'torsion': 175.2, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 14, 'angle': 120.6, 'blen': 1.451, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 338.3, 'tree': 'B', 'NC': 4, 'NB': 5, 'NA': 8, 'I': 11, 'angle': 106.0, 'blen': 1.51, 'charge': -0.115, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 14, 'NA': 16, 'I': 18, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 0.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 14, 'I': 16, 'angle': 111.1, 'blen': 1.522, 'charge': 0.438, 'type': 'C'},
},
"LYS": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HZ2":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 17, 'NA': 20, 'I': 22, 'angle': 109.47, 'blen': 1.01, 'charge': 0.294, 'type': 'H3'},
"HZ3":{'torsion': 300.0, 'tree': 'E', 'NC': 14, 'NB': 17, 'NA': 20, 'I': 23, 'angle': 109.47, 'blen': 1.01, 'charge': 0.294, 'type': 'H3'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HZ1":{'torsion': 60.0, 'tree': 'E', 'NC': 14, 'NB': 17, 'NA': 20, 'I': 21, 'angle': 109.47, 'blen': 1.01, 'charge': 0.294, 'type': 'H3'},
"NZ":{'torsion': 180.0, 'tree': '3', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 20, 'angle': 109.47, 'blen': 1.47, 'charge': -0.138, 'type': 'N3'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 24, 'I': 25, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"INTX,KFORM":['INT', '1'],
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.116, 'type': 'HC'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.116, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HE3":{'torsion': 60.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 19, 'angle': 109.5, 'blen': 1.09, 'charge': 0.098, 'type': 'HC'},
"HE2":{'torsion': 300.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 18, 'angle': 109.5, 'blen': 1.09, 'charge': 0.098, 'type': 'HC'},
"HD2":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.122, 'type': 'HC'},
"HD3":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.122, 'type': 'HC'},
"NAMRES":'LYSINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CD":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 109.47, 'blen': 1.525, 'charge': -0.18, 'type': 'CT'},
"CE":{'torsion': 180.0, 'tree': '3', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 17, 'angle': 109.47, 'blen': 1.525, 'charge': -0.038, 'type': 'CT'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.16, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 24, 'I': 26, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 24, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"NAMDBF":'db4.dat',
"SER": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.119, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.119, 'type': 'HC'},
"HG":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.47, 'blen': 0.96, 'charge': 0.31, 'type': 'HO'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'OG', 'HG', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 13, 'I': 14, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': 0.018, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"OG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.43, 'charge': -0.55, 'type': 'OH'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 13, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 13, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'SERINE COO- ANION',
},
"ASP": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.071, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.071, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'OD1', 'OD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.398, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CB', 'OD1', 'CG', 'OD2']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.527, 'charge': 0.714, 'type': 'C'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"OD2":{'torsion': 270.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 117.2, 'blen': 1.26, 'charge': -0.721, 'type': 'O2'},
"OD1":{'torsion': 90.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 117.2, 'blen': 1.26, 'charge': -0.721, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ASPARTIC ACID COO- ANION',
},
"GLN": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"NE2":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 116.6, 'blen': 1.335, 'charge': -0.867, 'type': 'N'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CG', 'NE2', 'CD', 'OE1'], ['CD', 'HE21', 'NE2', 'HE22']],
"INTX,KFORM":['INT', '1'],
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.057, 'type': 'HC'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.057, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"NAMRES":'GLUTAMINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HE21":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"HE22":{'torsion': 0.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 16, 'I': 18, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CD":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.675, 'type': 'C'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.102, 'type': 'CT'},
"OE1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.47, 'type': 'O'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 21, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 19, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"GLU": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.092, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.092, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CG', 'OE1', 'CD', 'OE2']],
"INTX,KFORM":['INT', '1'],
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.071, 'type': 'HC'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.071, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"OE2":{'torsion': 270.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 117.2, 'blen': 1.26, 'charge': -0.721, 'type': 'O2'},
"NAMRES":'GLUTAMIC ACID COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'OE2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CD":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 109.47, 'blen': 1.527, 'charge': 0.714, 'type': 'C'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 17, 'I': 18, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.51, 'charge': -0.398, 'type': 'CT'},
"OE1":{'torsion': 90.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 117.2, 'blen': 1.26, 'charge': -0.721, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.184, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 17, 'I': 19, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 17, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"TRP": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 26, 'I': 28, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"HZ2":{'torsion': 0.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 17, 'I': 18, 'angle': 120.0, 'blen': 1.09, 'charge': 0.084, 'type': 'HC'},
"HZ3":{'torsion': 180.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 21, 'I': 22, 'angle': 120.0, 'blen': 1.09, 'charge': 0.057, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CD1', 'CE2', 'NE1', 'HE1'], ['CE2', 'CH2', 'CZ2', 'HZ2'], ['CZ2', 'CZ3', 'CH2', 'HH2'], ['CH2', 'CE3', 'CZ3', 'HZ3'], ['CZ3', 'CD2', 'CE3', 'HE3']],
"CH2":{'torsion': 180.0, 'tree': 'B', 'NC': 14, 'NB': 16, 'NA': 17, 'I': 19, 'angle': 116.0, 'blen': 1.39, 'charge': -0.077, 'type': 'CA'},
"CZ3":{'torsion': 0.0, 'tree': 'B', 'NC': 16, 'NB': 17, 'NA': 19, 'I': 21, 'angle': 121.0, 'blen': 1.35, 'charge': -0.066, 'type': 'CA'},
"NE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 107.0, 'blen': 1.43, 'charge': -0.352, 'type': 'NA'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HE3":{'torsion': 180.0, 'tree': 'E', 'NC': 19, 'NB': 21, 'NA': 23, 'I': 24, 'angle': 120.0, 'blen': 1.09, 'charge': 0.086, 'type': 'HC'},
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 120.0, 'blen': 1.09, 'charge': 0.093, 'type': 'HC'},
"HE1":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 125.5, 'blen': 1.01, 'charge': 0.271, 'type': 'H'},
"NAMRES":'TRYPTOPHAN COO- ANION',
"CD1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 127.0, 'blen': 1.34, 'charge': 0.044, 'type': 'CW'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'NE1', 'HE1', 'CE2', 'CZ2', 'HZ2', 'CH2', 'HH2', 'CZ3', 'HZ3', 'CE3', 'HE3', 'CD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CD2":{'torsion': 0.0, 'tree': 'E', 'NC': 19, 'NB': 21, 'NA': 23, 'I': 25, 'angle': 117.0, 'blen': 1.4, 'charge': 0.146, 'type': 'CB'},
"CE2":{'torsion': 0.0, 'tree': 'S', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 109.0, 'blen': 1.31, 'charge': 0.154, 'type': 'CN'},
"CE3":{'torsion': 0.0, 'tree': 'B', 'NC': 17, 'NB': 19, 'NA': 21, 'I': 23, 'angle': 122.0, 'blen': 1.41, 'charge': -0.173, 'type': 'CA'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 26, 'I': 27, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': -0.135, 'type': 'C*'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"CZ2":{'torsion': 180.0, 'tree': 'B', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 128.0, 'blen': 1.4, 'charge': -0.168, 'type': 'CA'},
"loopList":[['CG', 'CD2'], ['CE2', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 26, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"HH2":{'torsion': 180.0, 'tree': 'E', 'NC': 16, 'NB': 17, 'NA': 19, 'I': 20, 'angle': 120.0, 'blen': 1.09, 'charge': 0.074, 'type': 'HC'},
},
"GLY": { "HA3":{'torsion': 60.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 109.5, 'blen': 1.09, 'charge': 0.032, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA2', 'HA3', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA2":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.032, 'type': 'HC'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 9, 'I': 10, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 9, 'angle': 110.4, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 9, 'I': 11, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'GLYCINE COO- ANION',
},
"THR": { "atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB', 'CG2', 'HG21', 'HG22', 'HG23', 'OG1', 'HG1', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HG23":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.065, 'type': 'HC'},
"HB":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.082, 'type': 'HC'},
"HG22":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.065, 'type': 'HC'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': 0.17, 'type': 'CT'},
"HG1":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 15, 'angle': 109.47, 'blen': 0.96, 'charge': 0.31, 'type': 'HO'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HG21":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 11, 'angle': 109.5, 'blen': 1.09, 'charge': 0.065, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"OG1":{'torsion': 60.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 14, 'angle': 109.47, 'blen': 1.43, 'charge': -0.55, 'type': 'OH'},
"CG2":{'torsion': 300.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.47, 'blen': 1.525, 'charge': -0.191, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 16, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 18, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'THREONINE COO- ANION',
},
"HIP": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.086, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.086, 'type': 'HC'},
"NE2":{'torsion': 0.0, 'tree': 'B', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 109.0, 'blen': 1.31, 'charge': -0.058, 'type': 'NA'},
"ND1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 122.0, 'blen': 1.39, 'charge': -0.058, 'type': 'NA'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CG', 'CE1', 'ND1', 'HD1'], ['CE1', 'CD2', 'NE2', 'HE2']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 108.0, 'blen': 1.32, 'charge': 0.114, 'type': 'CR'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 126.0, 'blen': 1.01, 'charge': 0.306, 'type': 'H'},
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 125.0, 'blen': 1.01, 'charge': 0.306, 'type': 'H'},
"NAMRES":'HISTIDINE PLUS COO-',
"HE":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 120.0, 'blen': 1.09, 'charge': 0.158, 'type': 'HC'},
"HD":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 18, 'I': 19, 'angle': 120.0, 'blen': 1.09, 'charge': 0.153, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'HD1', 'CE1', 'HE', 'NE2', 'HE2', 'CD2', 'HD', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 18, 'angle': 110.0, 'blen': 1.36, 'charge': -0.037, 'type': 'CW'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 20, 'I': 21, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': 0.058, 'type': 'CC'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 20, 'I': 22, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 20, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"VAL": { "HG22":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"HG23":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 17, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"HG21":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HG13":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"HG12":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"HG11":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 11, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"INTX,KFORM":['INT', '1'],
"CG2":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 14, 'angle': 109.47, 'blen': 1.525, 'charge': -0.091, 'type': 'CT'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CG1":{'torsion': 60.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.47, 'blen': 1.525, 'charge': -0.091, 'type': 'CT'},
"NAMRES":'VALINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB', 'CG1', 'HG11', 'HG12', 'HG13', 'CG2', 'HG21', 'HG22', 'HG23', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HB":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.024, 'type': 'HC'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 18, 'I': 19, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.012, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 18, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 18, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"ILE": { "HG22":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.029, 'type': 'HC'},
"HG23":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.029, 'type': 'HC'},
"HG21":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 11, 'angle': 109.5, 'blen': 1.09, 'charge': 0.029, 'type': 'HC'},
"HD13":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 14, 'NA': 17, 'I': 20, 'angle': 109.5, 'blen': 1.09, 'charge': 0.028, 'type': 'HC'},
"HG13":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.027, 'type': 'HC'},
"HG12":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.027, 'type': 'HC'},
"INTX,KFORM":['INT', '1'],
"CG2":{'torsion': 60.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.47, 'blen': 1.525, 'charge': -0.085, 'type': 'CT'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CG1":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 14, 'angle': 109.47, 'blen': 1.525, 'charge': -0.049, 'type': 'CT'},
"NAMRES":'ISOLEUCINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB', 'CG2', 'HG21', 'HG22', 'HG23', 'CG1', 'HG12', 'HG13', 'CD1', 'HD11', 'HD12', 'HD13', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HD11":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 14, 'NA': 17, 'I': 18, 'angle': 109.5, 'blen': 1.09, 'charge': 0.028, 'type': 'HC'},
"HD12":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 14, 'NA': 17, 'I': 19, 'angle': 109.5, 'blen': 1.09, 'charge': 0.028, 'type': 'HC'},
"HB":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.022, 'type': 'HC'},
"CD1":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 17, 'angle': 109.47, 'blen': 1.525, 'charge': -0.085, 'type': 'CT'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 22, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': -0.012, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 23, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 21, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
},
"filename":'allct.in',
}
| 116.274411
| 251
| 0.447768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37,382
| 0.541243
|
678c13af2d3d4847271449c6ae5791e470d46e78
| 39,961
|
py
|
Python
|
chi/_mechanistic_models.py
|
DavAug/erlotinib
|
9d113257de52b56359ed6451ba7db455645315d1
|
[
"BSD-3-Clause"
] | null | null | null |
chi/_mechanistic_models.py
|
DavAug/erlotinib
|
9d113257de52b56359ed6451ba7db455645315d1
|
[
"BSD-3-Clause"
] | 221
|
2020-11-06T13:03:32.000Z
|
2021-07-30T08:17:58.000Z
|
chi/_mechanistic_models.py
|
DavAug/erlotinib
|
9d113257de52b56359ed6451ba7db455645315d1
|
[
"BSD-3-Clause"
] | 1
|
2021-02-10T13:03:58.000Z
|
2021-02-10T13:03:58.000Z
|
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import copy
import myokit
import myokit.formats.sbml as sbml
import numpy as np
class MechanisticModel(object):
"""
A base class for models that are specified by sbml files.
Parameters
----------
sbml_file
A path to the SBML model file that specifies the model.
"""
def __init__(self, sbml_file):
super(MechanisticModel, self).__init__()
# Import model
self._model = sbml.SBMLImporter().model(sbml_file)
# Set default number and names of states, parameters and outputs.
self._set_number_and_names()
# Get time unit
self._time_unit = self._get_time_unit()
# Create simulator without sensitivities
# (intentionally public property)
self.simulator = myokit.Simulation(self._model)
self._has_sensitivities = False
def _get_time_unit(self):
"""
Gets the model's time unit.
"""
# Get bound variables
bound_variables = [var for var in self._model.variables(bound=True)]
# Get the variable that is bound to time
# (only one can exist in myokit.Model)
for var in bound_variables:
if var._binding == 'time':
return var.unit()
def _set_const(self, parameters):
"""
Sets values of constant model parameters.
"""
for id_var, var in enumerate(self._const_names):
self.simulator.set_constant(var, float(parameters[id_var]))
def _set_state(self, parameters):
"""
Sets initial values of states.
"""
parameters = np.array(parameters)
parameters = parameters[self._original_order]
self.simulator.set_state(parameters)
def _set_number_and_names(self):
"""
Sets the number of states, parameters and outputs, as well as their
names. If the model is ``None`` the self._model is taken.
"""
# Get the number of states and parameters
self._n_states = self._model.count_states()
n_const = self._model.count_variables(const=True)
self._n_parameters = self._n_states + n_const
# Get constant variable names and state names
names = [var.qname() for var in self._model.states()]
self._state_names = sorted(names)
self._const_names = sorted(
[var.qname() for var in self._model.variables(const=True)])
# Remember original order of state names for simulation
order_after_sort = np.argsort(names)
self._original_order = np.argsort(order_after_sort)
# Set default parameter names
self._parameter_names = self._state_names + self._const_names
# Set default outputs
self._output_names = self._state_names
self._n_outputs = self._n_states
# Create references of displayed parameter and output names to
# orginal myokit names (defaults to identity map)
# (Key: myokit name, value: displayed name)
self._parameter_name_map = dict(
zip(self._parameter_names, self._parameter_names))
self._output_name_map = dict(
zip(self._output_names, self._output_names))
def copy(self):
"""
Returns a deep copy of the mechanistic model.
.. note::
Copying the model resets the sensitivity settings.
"""
# Copy model manually and get protocol
myokit_model = self._model.clone()
protocol = self.simulator._protocol
# Copy the mechanistic model
model = copy.deepcopy(self)
# Replace myokit model by safe copy and create simulator
model._model = myokit_model
model.simulator = myokit.Simulation(myokit_model, protocol)
return model
def enable_sensitivities(self, enabled, parameter_names=None):
"""
Enables the computation of the model output sensitivities to the model
parameters if set to ``True``.
The sensitivities are computed using the forward sensitivities method,
where an ODE for each sensitivity is derived. The sensitivities are
returned together with the solution to the orginal system of ODEs when
simulating the mechanistic model :meth:`simulate`.
The optional parameter names argument can be used to set which
sensitivities are computed. By default the sensitivities to all
parameters are computed.
:param enabled: A boolean flag which enables (``True``) / disables
(``False``) the computation of sensitivities.
:type enabled: bool
:param parameter_names: A list of parameter names of the model. If
``None`` sensitivities for all parameters are computed.
:type parameter_names: list[str], optional
"""
enabled = bool(enabled)
# Get dosing regimen from existing simulator
protocol = self.simulator._protocol
if not enabled:
if self._has_sensitivities:
# Disable sensitivities
sim = myokit.Simulation(self._model, protocol)
self.simulator = sim
self._has_sensitivities = False
return None
# Sensitivities are already disabled
return None
# Get parameters whose output sensitivities are computed
parameters = []
for param_id, param in enumerate(self._parameter_names):
if param_id < self._n_states:
# Convert initial value parameters to the correct syntax
parameters.append('init(' + param + ')')
continue
# Other parameters can be appended without modification
parameters.append(param)
if parameter_names is not None:
# Get myokit names for input parameter names
container = []
for index, public_name in enumerate(
self._parameter_name_map.values()):
if public_name in parameter_names:
container.append(parameters[index])
parameters = container
if not parameters:
raise ValueError(
'None of the parameters could be identified. The valid '
'parameter names are <' + str(self._parameter_names) + '>.')
# Create simulator
sensitivities = (self._output_names, parameters)
sim = myokit.Simulation(self._model, protocol, sensitivities)
# Update simulator and sensitivity state
self.simulator = sim
self._has_sensitivities = True
def has_sensitivities(self):
"""
Returns a boolean indicating whether sensitivities have been enabled.
"""
return self._has_sensitivities
def n_outputs(self):
"""
Returns the number of output dimensions.
By default this is the number of states.
"""
return self._n_outputs
def n_parameters(self):
"""
Returns the number of parameters in the model.
Parameters of the model are initial state values and structural
parameter values.
"""
return self._n_parameters
def outputs(self):
"""
Returns the output names of the model.
"""
# Get user specified output names
output_names = [
self._output_name_map[name] for name in self._output_names]
return output_names
def parameters(self):
"""
Returns the parameter names of the model.
"""
# Get user specified parameter names
parameter_names = [
self._parameter_name_map[name] for name in self._parameter_names]
return parameter_names
def set_outputs(self, outputs):
"""
Sets outputs of the model.
The outputs can be set to any quantifiable variable name of the
:class:`myokit.Model`, e.g. `compartment.variable`.
.. note::
Setting outputs resets the sensitivity settings (by default
sensitivities are disabled.)
:param outputs:
A list of output names.
:type outputs: list[str]
"""
outputs = list(outputs)
# Translate public names to myokit names, if set previously
for myokit_name, public_name in self._output_name_map.items():
if public_name in outputs:
# Replace public name by myokit name
index = outputs.index(public_name)
outputs[index] = myokit_name
# Check that outputs are valid
for output in outputs:
try:
var = self.simulator._model.get(output)
if not (var.is_state() or var.is_intermediary()):
raise ValueError(
'Outputs have to be state or intermediary variables.')
except KeyError:
raise KeyError(
'The variable <' + str(output) + '> does not exist in the '
'model.')
# Remember outputs
self._output_names = outputs
self._n_outputs = len(outputs)
# Create an updated output name map
output_name_map = {}
for myokit_name in self._output_names:
try:
output_name_map[myokit_name] = self._output_name_map[
myokit_name]
except KeyError:
# The output did not exist before, so create an identity map
output_name_map[myokit_name] = myokit_name
self._output_name_map = output_name_map
# Disable sensitivities
self.enable_sensitivities(False)
def set_output_names(self, names):
"""
Assigns names to the model outputs. By default the
:class:`myokit.Model` names are assigned to the outputs.
:param names: A dictionary that maps the current output names to new
names.
:type names: dict[str, str]
"""
if not isinstance(names, dict):
raise TypeError(
'Names has to be a dictionary with the current output names'
'as keys and the new output names as values.')
# Check that new output names are unique
new_names = list(names.values())
n_unique_new_names = len(set(names.values()))
if len(new_names) != n_unique_new_names:
raise ValueError(
'The new output names have to be unique.')
# Check that new output names do not exist already
for new_name in new_names:
if new_name in list(self._output_name_map.values()):
raise ValueError(
'The output names cannot coincide with existing '
'output names. One output is already called '
'<' + str(new_name) + '>.')
# Replace currently displayed names by new names
for myokit_name in self._output_names:
old_name = self._output_name_map[myokit_name]
try:
new_name = names[old_name]
self._output_name_map[myokit_name] = str(new_name)
except KeyError:
# KeyError indicates that the current output is not being
# renamed.
pass
def set_parameter_names(self, names):
"""
Assigns names to the parameters. By default the :class:`myokit.Model`
names are assigned to the parameters.
:param names: A dictionary that maps the current parameter names to new
names.
:type names: dict[str, str]
"""
if not isinstance(names, dict):
raise TypeError(
'Names has to be a dictionary with the current parameter names'
'as keys and the new parameter names as values.')
# Check that new parameter names are unique
new_names = list(names.values())
n_unique_new_names = len(set(names.values()))
if len(new_names) != n_unique_new_names:
raise ValueError(
'The new parameter names have to be unique.')
# Check that new parameter names do not exist already
for new_name in new_names:
if new_name in list(self._parameter_name_map.values()):
raise ValueError(
'The parameter names cannot coincide with existing '
'parameter names. One parameter is already called '
'<' + str(new_name) + '>.')
# Replace currently displayed names by new names
for myokit_name in self._parameter_names:
old_name = self._parameter_name_map[myokit_name]
try:
new_name = names[old_name]
self._parameter_name_map[myokit_name] = str(new_name)
except KeyError:
# KeyError indicates that the current parameter is not being
# renamed.
pass
def simulate(self, parameters, times):
"""
Returns the numerical solution of the model outputs (and optionally
the sensitivites) for the specified parameters and times.
The model outputs are returned as a 2 dimensional NumPy array of shape
(n_outputs, n_times). If sensitivities are enabled, a tuple is returned
with the NumPy array of the model outputs and a NumPy array of the
sensitivities of shape (n_times, n_outputs, n_parameters).
:param parameters: An array-like object with values for the model
parameters.
:type parameters: list, numpy.ndarray
:param times: An array-like object with time points at which the output
values are returned.
:type times: list, numpy.ndarray
"""
# Reset simulation
self.simulator.reset()
# Set initial conditions
self._set_state(parameters[:self._n_states])
# Set constant model parameters
self._set_const(parameters[self._n_states:])
# Simulate
if not self._has_sensitivities:
output = self.simulator.run(
times[-1] + 1, log=self._output_names, log_times=times)
output = np.array([output[name] for name in self._output_names])
return output
output, sensitivities = self.simulator.run(
times[-1] + 1, log=self._output_names, log_times=times)
output = np.array([output[name] for name in self._output_names])
sensitivities = np.array(sensitivities)
return output, sensitivities
def time_unit(self):
"""
Returns the model's unit of time.
"""
return self._time_unit
class PharmacodynamicModel(MechanisticModel):
"""
Converts a pharmacodynamic model specified by an SBML file into a forward
model that can be solved numerically.
Extends :class:`MechanisticModel`.
Parameters
----------
sbml_file
A path to the SBML model file that specifies the pharmacodynamic model.
"""
def __init__(self, sbml_file):
super(PharmacodynamicModel, self).__init__(sbml_file)
# Set default pharmacokinetic input variable
# (Typically drug concentration)
self._pk_input = None
if self._model.has_variable('myokit.drug_concentration'):
self._pk_input = 'myokit.drug_concentration'
def pk_input(self):
"""
Returns the pharmacokinetic input variable. In most models this will be
the concentration of the drug.
Defaults to ``None`` or ``myokit.drug_concentration`` if the latter is
among the model parameters.
"""
return self._pk_input
def set_pk_input(self, name):
"""
Sets the pharmacokinetic input variable. In most models this will be
the concentration of the drug.
The name has to match a parameter of the model.
"""
if name not in self._parameter_names:
raise ValueError(
'The name does not match a model parameter.')
self._pk_input = name
class PharmacokineticModel(MechanisticModel):
"""
Converts a pharmacokinetic model specified by an SBML file into a forward
model that can be solved numerically.
Extends :class:`MechanisticModel`.
Parameters
----------
sbml_file
A path to the SBML model file that specifies the pharmacokinetic model.
"""
def __init__(self, sbml_file):
super(PharmacokineticModel, self).__init__(sbml_file)
# Set default dose administration
self._administration = None
# Safe vanilla model
self._vanilla_model = self._model.clone()
# Set default output variable that interacts with the pharmacodynamic
# model
# (Typically drug concentration in central compartment)
self._pd_output = None
if self._model.has_variable('central.drug_concentration'):
self._pd_output = 'central.drug_concentration'
# Set default output to pd output if not None
if self._pd_output is not None:
self.set_outputs([self._pd_output])
def _add_dose_compartment(self, model, drug_amount):
"""
Adds a dose compartment to the model with a linear absorption rate to
the connected compartment.
"""
# Add a dose compartment to the model
dose_comp = model.add_component_allow_renaming('dose')
# Create a state variable for the drug amount in the dose compartment
dose_drug_amount = dose_comp.add_variable('drug_amount')
dose_drug_amount.set_rhs(0)
dose_drug_amount.set_unit(drug_amount.unit())
dose_drug_amount.promote()
# Create an absorption rate variable
absorption_rate = dose_comp.add_variable('absorption_rate')
absorption_rate.set_rhs(1)
absorption_rate.set_unit(1 / self.time_unit())
# Add outflow expression to dose compartment
dose_drug_amount.set_rhs(
myokit.Multiply(
myokit.PrefixMinus(myokit.Name(absorption_rate)),
myokit.Name(dose_drug_amount)
)
)
# Add inflow expression to connected compartment
rhs = drug_amount.rhs()
drug_amount.set_rhs(
myokit.Plus(
rhs,
myokit.Multiply(
myokit.Name(absorption_rate),
myokit.Name(dose_drug_amount)
)
)
)
# Update number of parameters and states, as well as their names
self._model = model
self._set_number_and_names()
# Set default output to pd_output if it is not None
if self._pd_output is not None:
self.set_outputs([self._pd_output])
return model, dose_drug_amount
def _add_dose_rate(self, compartment, drug_amount):
"""
Adds a dose rate variable to the state variable, which is bound to the
dosing regimen.
"""
# Register a dose rate variable to the compartment and bind it to
# pace, i.e. tell myokit that its value is set by the dosing regimen/
# myokit.Protocol
dose_rate = compartment.add_variable_allow_renaming(
str('dose_rate'))
dose_rate.set_binding('pace')
# Set initial value to 0 and unit to unit of drug amount over unit of
# time
dose_rate.set_rhs(0)
dose_rate.set_unit(drug_amount.unit() / self.time_unit())
# Add the dose rate to the rhs of the drug amount variable
rhs = drug_amount.rhs()
drug_amount.set_rhs(
myokit.Plus(
rhs,
myokit.Name(dose_rate)
)
)
def administration(self):
"""
Returns the mode of administration in form of a dictionary.
The dictionary has the keys 'compartment' and 'direct'. The former
provides information about which compartment is dosed, and the latter
whether the dose is administered directly ot indirectly to the
compartment.
"""
return self._administration
def dosing_regimen(self):
"""
Returns the dosing regimen of the compound in form of a
:class:`myokit.Protocol`. If the protocol has not been set, ``None`` is
returned.
"""
return self.simulator._protocol
def set_administration(
self, compartment, amount_var='drug_amount', direct=True):
r"""
Sets the route of administration of the compound.
The compound is administered to the selected compartment either
directly or indirectly. If it is administered directly, a dose rate
variable is added to the drug amount's rate of change expression
.. math ::
\frac{\text{d}A}{\text{d}t} = \text{RHS} + r_d,
where :math:`A` is the drug amount in the selected compartment, RHS is
the rate of change of :math:`A` prior to adding the dose rate, and
:math:`r_d` is the dose rate.
The dose rate can be set by :meth:`set_dosing_regimen`.
If the route of administration is indirect, a dosing compartment
is added to the model, which is connected to the selected compartment.
The dose rate variable is then added to the rate of change expression
of the dose amount variable in the dosing compartment. The drug amount
in the dosing compartment flows at a linear absorption rate into the
selected compartment
.. math ::
\frac{\text{d}A_d}{\text{d}t} = -k_aA_d + r_d \\
\frac{\text{d}A}{\text{d}t} = \text{RHS} + k_aA_d,
where :math:`A_d` is the amount of drug in the dose compartment and
:math:`k_a` is the absorption rate.
Setting an indirect administration route changes the number of
parameters of the model, and resets the parameter names to their
defaults.
.. note:
Setting the route of administration will reset the sensitivity
settings.
:param compartment: Compartment to which doses are either directly or
indirectly administered.
:type compartment: str
:param amount_var: Drug amount variable in the compartment. By default
the drug amount variable is assumed to be 'drug_amount'.
:type amount_var: str, optional
:param direct: A boolean flag that indicates whether the dose is
administered directly or indirectly to the compartment.
:type direct: bool, optional
"""
# Check inputs
model = self._vanilla_model.clone()
if not model.has_component(compartment):
raise ValueError(
'The model does not have a compartment named <'
+ str(compartment) + '>.')
comp = model.get(compartment, class_filter=myokit.Component)
if not comp.has_variable(amount_var):
raise ValueError(
'The drug amount variable <' + str(amount_var) + '> could not '
'be found in the compartment.')
drug_amount = comp.get(amount_var)
if not drug_amount.is_state():
raise ValueError(
'The variable <' + str(drug_amount) + '> is not a state '
'variable, and can therefore not be dosed.')
# If administration is indirect, add a dosing compartment and update
# the drug amount variable to the one in the dosing compartment
if not direct:
model, drug_amount = self._add_dose_compartment(model, drug_amount)
comp = model.get(compartment, class_filter=myokit.Component)
# Add dose rate variable to the right hand side of the drug amount
self._add_dose_rate(comp, drug_amount)
# Update model and simulator
# (otherwise simulator won't know about pace bound variable)
self._model = model
self.simulator = myokit.Simulation(model)
self._has_sensitivities = False
# Remember type of administration
self._administration = dict(
{'compartment': compartment, 'direct': direct})
def set_dosing_regimen(
self, dose, start, duration=0.01, period=None, num=None):
"""
Sets the dosing regimen with which the compound is administered.
The route of administration can be set with :meth:`set_administration`.
However, the type of administration, e.g. bolus injection or infusion,
may be controlled with the duration input.
By default the dose is administered as a bolus injection (duration on
a time scale that is 100 fold smaller than the basic time unit). To
model an infusion of the dose over a longer time period, the
``duration`` can be adjusted to the appropriate time scale.
By default the dose is administered once. To apply multiple doses
provide a dose administration period.
Parameters
----------
dose
The amount of the compound that is injected at each administration.
start
Start time of the treatment.
duration
Duration of dose administration. For a bolus injection, a dose
duration of 1% of the time unit should suffice. By default the
duration is set to 0.01 (bolus).
period
Periodicity at which doses are administered. If ``None`` the dose
is administered only once.
num
Number of administered doses. If ``None`` and the periodicity of
the administration is not ``None``, doses are administered
indefinitely.
"""
if self._administration is None:
raise ValueError(
'The route of administration of the dose has not been set.')
if num is None:
# Myokits default is zero, i.e. infinitely many doses
num = 0
if period is None:
# If period is not provided, we administer a single dose
# Myokits defaults are 0s for that.
period = 0
num = 0
# Translate dose to dose rate
dose_rate = dose / duration
# Set dosing regimen
dosing_regimen = myokit.pacing.blocktrain(
period=period, duration=duration, offset=start, level=dose_rate,
limit=num)
self.simulator.set_protocol(dosing_regimen)
def pd_output(self):
"""
Returns the variable which interacts with the pharmacodynamic model.
In most models this will be the concentration of the drug in the
central compartment.
This variable is mapped to the
:meth:`chi.PharmacodynamicModel.pk_input` variable when a
:class:`PKPDModel` is instantiated.
Defaults to ``None`` or ``central.drug_concentration`` if the latter is
among the model parameters.
"""
return self._pd_output
def set_pd_output(self, name):
"""
Sets the variable which interacts with the pharmacodynamic model.
In most models this will be the concentration of the drug in the
central compartment.
The name has to match a parameter of the model.
This variable is mapped to the
:meth:`chi.PharmacodynamicModel.pk_input` variable when a
:class:`PKPDModel` is instantiated.
"""
# Get intermediate variable names
inter_names = [
var.qname() for var in self._model.variables(inter=True)]
names = inter_names + self._parameter_names
if name not in names:
raise ValueError(
'The name does not match a model variable.')
self._pd_output = name
class ReducedMechanisticModel(object):
"""
A class that can be used to permanently fix model parameters of a
:class:`MechanisticModel` instance.
This may be useful to explore simplified versions of a model before
defining a new SBML file.
Parameters
----------
mechanistic_model
An instance of a :class:`MechanisticModel`.
"""
def __init__(self, mechanistic_model):
super(ReducedMechanisticModel, self).__init__()
# Check input
if not isinstance(mechanistic_model, MechanisticModel):
raise ValueError(
'The mechanistic model has to be an instance of a '
'chi.MechanisticModel')
self._mechanistic_model = mechanistic_model
self.simulator = mechanistic_model.simulator
# Set defaults
self._fixed_params_mask = None
self._fixed_params_values = None
self._n_parameters = mechanistic_model.n_parameters()
self._parameter_names = mechanistic_model.parameters()
def copy(self):
"""
Returns a deep copy of the reduced model.
.. note::
Copying the model resets the sensitivity settings.
"""
# Get a safe copy of the mechanistic model
mechanistic_model = self._mechanistic_model.copy()
# Copy the reduced model
# (this possibly corrupts the mechanistic model and the
# simulator)
model = copy.deepcopy(self)
# Replace mechanistic model and simulator
model._mechanistic_model = mechanistic_model
model.simulator = mechanistic_model.simulator
return model
def dosing_regimen(self):
"""
Returns the dosing regimen of the compound in form of a
:class:`myokit.Protocol`. If the protocol has not been set, ``None`` is
returned.
If the model does not support dose administration, ``None`` is
returned.
"""
try:
return self._mechanistic_model.dosing_regimen()
except AttributeError:
return None
def enable_sensitivities(self, enabled):
"""
Enables the computation of the output sensitivities with respect to
the free model parameters.
"""
if not enabled:
self._mechanistic_model.enable_sensitivities(enabled)
return None
# Get free parameters
free_parameters = np.array(self._parameter_names)
if self._fixed_params_mask is not None:
free_parameters = free_parameters[~self._fixed_params_mask]
# Set sensitivities
self._mechanistic_model.enable_sensitivities(
enabled, free_parameters)
def fix_parameters(self, name_value_dict):
"""
Fixes the value of model parameters, and effectively removes them as a
parameter from the model. Fixing the value of a parameter at ``None``,
sets the parameter free again.
Parameters
----------
name_value_dict
A dictionary with model parameter names as keys, and parameter
values as values.
"""
# Check type
try:
name_value_dict = dict(name_value_dict)
except (TypeError, ValueError):
raise ValueError(
'The name-value dictionary has to be convertable to a python '
'dictionary.')
# If no model parameters have been fixed before, instantiate a mask
# and values
if self._fixed_params_mask is None:
self._fixed_params_mask = np.zeros(
shape=self._n_parameters, dtype=bool)
if self._fixed_params_values is None:
self._fixed_params_values = np.empty(shape=self._n_parameters)
# Update the mask and values
for index, name in enumerate(self._parameter_names):
try:
value = name_value_dict[name]
except KeyError:
# KeyError indicates that parameter name is not being fixed
continue
# Fix parameter if value is not None, else unfix it
self._fixed_params_mask[index] = value is not None
self._fixed_params_values[index] = value
# If all parameters are free, set mask and values to None again
if np.alltrue(~self._fixed_params_mask):
self._fixed_params_mask = None
self._fixed_params_values = None
# Remove sensitivities for fixed parameters
if self.has_sensitivities() is True:
self.enable_sensitivities(True)
def has_sensitivities(self):
"""
Returns a boolean indicating whether sensitivities have been enabled.
"""
return self._mechanistic_model.has_sensitivities()
def mechanistic_model(self):
"""
Returns the original mechanistic model.
"""
return self._mechanistic_model
def n_fixed_parameters(self):
"""
Returns the number of fixed model parameters.
"""
if self._fixed_params_mask is None:
return 0
n_fixed = int(np.sum(self._fixed_params_mask))
return n_fixed
def n_outputs(self):
"""
Returns the number of output dimensions.
By default this is the number of states.
"""
return self._mechanistic_model.n_outputs()
def n_parameters(self):
"""
Returns the number of parameters in the model.
Parameters of the model are initial state values and structural
parameter values.
"""
# Get number of fixed parameters
n_fixed = 0
if self._fixed_params_mask is not None:
n_fixed = int(np.sum(self._fixed_params_mask))
# Subtract fixed parameters from total number
n_parameters = self._n_parameters - n_fixed
return n_parameters
def outputs(self):
"""
Returns the output names of the model.
"""
return self._mechanistic_model.outputs()
def parameters(self):
"""
Returns the parameter names of the model.
"""
# Remove fixed model parameters
names = self._parameter_names
if self._fixed_params_mask is not None:
names = np.array(names)
names = names[~self._fixed_params_mask]
names = list(names)
return copy.copy(names)
def pd_output(self):
"""
Returns the variable which interacts with the pharmacodynamic model.
In most models this will be the concentration of the drug in the
central compartment.
This variable is mapped to the
:meth:`chi.PharmacodynamicModel.pk_input` variable when a
:class:`PKPDModel` is instantiated.
Defaults to ``None`` or ``central.drug_concentration`` if the latter is
among the model parameters.
If the model does not support a pd output, ``None`` is returned.
"""
try:
return self._mechanistic_model.pd_output()
except AttributeError:
return None
def pk_input(self):
"""
Returns the pharmacokinetic input variable. In most models this will be
the concentration of the drug.
Defaults to ``None`` or ``myokit.drug_concentration`` if the latter is
among the model parameters.
If the model does not support a pk input, ``None`` is returned.
"""
try:
return self._mechanistic_model.pk_input()
except AttributeError:
return None
def set_dosing_regimen(
self, dose, start, duration=0.01, period=None, num=None):
"""
Sets the dosing regimen with which the compound is administered.
The route of administration can be set with :meth:`set_administration`.
However, the type of administration, e.g. bolus injection or infusion,
may be controlled with the duration input.
By default the dose is administered as a bolus injection (duration on
a time scale that is 100 fold smaller than the basic time unit). To
model an infusion of the dose over a longer time period, the
``duration`` can be adjusted to the appropriate time scale.
By default the dose is administered once. To apply multiple doses
provide a dose administration period.
Parameters
----------
dose
The amount of the compound that is injected at each administration.
start
Start time of the treatment.
duration
Duration of dose administration. For a bolus injection, a dose
duration of 1% of the time unit should suffice. By default the
duration is set to 0.01 (bolus).
period
Periodicity at which doses are administered. If ``None`` the dose
is administered only once.
num
Number of administered doses. If ``None`` and the periodicity of
the administration is not ``None``, doses are administered
indefinitely.
"""
try:
self._mechanistic_model.set_dosing_regimen(
dose, start, duration, period, num)
except AttributeError:
raise AttributeError(
'The mechanistic model does not support dosing regimens.')
def set_outputs(self, outputs):
"""
Sets outputs of the model.
Parameters
----------
outputs
A list of quantifiable variable names of the :class:`myokit.Model`,
e.g. `compartment.variable`.
"""
self._mechanistic_model.set_outputs(outputs)
def set_output_names(self, names):
"""
Assigns names to the outputs. By default the :class:`myokit.Model`
names are assigned to the outputs.
Parameters
----------
names
A dictionary that maps the current output names to new names.
"""
self._mechanistic_model.set_output_names(names)
def set_parameter_names(self, names):
"""
Assigns names to the parameters. By default the :class:`myokit.Model`
names are assigned to the parameters.
Parameters
----------
names
A dictionary that maps the current parameter names to new names.
"""
# Set parameter names
self._mechanistic_model.set_parameter_names(names)
self._parameter_names = self._mechanistic_model.parameters()
def simulate(self, parameters, times):
"""
Returns the numerical solution of the model outputs (and optionally
the sensitivites) for the specified parameters and times.
The model outputs are returned as a 2 dimensional NumPy array of shape
(n_outputs, n_times). If sensitivities are enabled, a tuple is returned
with the NumPy array of the model outputs and a NumPy array of the
sensitivities of shape (n_times, n_outputs, n_parameters).
:param parameters: An array-like object with values for the model
parameters.
:type parameters: list, numpy.ndarray
:param times: An array-like object with time points at which the output
values are returned.
:type times: list, numpy.ndarray
"""
# Insert fixed parameter values
if self._fixed_params_mask is not None:
self._fixed_params_values[
~self._fixed_params_mask] = parameters
parameters = self._fixed_params_values
return self._mechanistic_model.simulate(parameters, times)
def time_unit(self):
"""
Returns the model's unit of time.
"""
return self._mechanistic_model.time_unit()
| 35.332449
| 79
| 0.620205
| 39,657
| 0.992393
| 0
| 0
| 0
| 0
| 0
| 0
| 21,652
| 0.541828
|
678d54c4f215c915ab40d1921519e8d17b0d89cd
| 4,954
|
py
|
Python
|
smartlicense/settings/__init__.py
|
coblo/smartlicense
|
288b40496646c225716fa3bf5f43b48ff645b96c
|
[
"MIT"
] | 6
|
2018-04-26T05:54:29.000Z
|
2021-04-03T05:08:46.000Z
|
smartlicense/settings/__init__.py
|
coblo/smartlicense
|
288b40496646c225716fa3bf5f43b48ff645b96c
|
[
"MIT"
] | 13
|
2018-03-31T07:58:02.000Z
|
2022-02-10T10:35:28.000Z
|
smartlicense/settings/__init__.py
|
coblo/smartlicense
|
288b40496646c225716fa3bf5f43b48ff645b96c
|
[
"MIT"
] | 2
|
2019-06-13T21:42:21.000Z
|
2021-04-03T05:09:02.000Z
|
# -*- coding: utf-8 -*-
"""
Django settings for smartlicense project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
from os.path import dirname, abspath, join
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
SCRATCH_DIR = join(BASE_DIR, '.scratch')
SCRACTH_DB = join(SCRATCH_DIR, 'scratch.sqlite3')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3ka4^(c+fm7rw+@ttete34bt6tv3^8=r1!*_*-ovp1vu&qi=a9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ADMINS = [('admin', 'admin@admin.org')]
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'martor',
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_markup',
'django_object_actions',
'smartlicense.apps.SmartLicenseConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smartlicense.urls'
MEDIA_ROOT = SCRATCH_DIR
MEDIA_URL = '/media/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [join(BASE_DIR, 'smartlicense', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'smartlicense.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': SCRACTH_DB,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Mator
MARTOR_ENABLE_CONFIGS = {
'imgur': 'false', # to enable/disable imgur/custom uploader.
'mention': 'false', # to enable/disable mention
'jquery': 'true',
# to include/revoke jquery (require for admin default django)
}
# Custom project settings
NODE_IP = '127.0.0.1'
NODE_PORT = '9718'
NODE_USER = 'testuser'
NODE_PWD = 'testpassword'
STREAM_SMART_LICENSE = 'smart-license'
STREAM_SMART_LICENSE_ATTESTATION = 'smart-license'
STREAM_ISCC = 'iscc'
SUIT_CONFIG = {
'ADMIN_NAME': 'Smart License Demo',
'CONFIRM_UNSAVED_CHANGES': False,
'MENU_OPEN_FIRST_CHILD': True,
'SEARCH_URL': 'admin:smartlicense_mediacontent_changelist',
'LIST_PER_PAGE': 18,
'MENU': (
{'label': 'Smart Licenses', 'models': (
{'model': 'smartlicense.mediacontent'},
{'model': 'smartlicense.smartlicense'},
)},
{'label': 'Transactions', 'models': (
{'model': 'smartlicense.attestation'},
{'model': 'smartlicense.tokentransaction'},
)},
{'label': 'Configuration', 'models': (
{'model': 'smartlicense.template'},
{'model': 'smartlicense.rightsmodule'},
{'model': 'smartlicense.activationmode'},
)}
)
}
# Make sure deployment overrides settings
try:
from smartlicense.settings.config import *
except Exception:
print(
'No custom configuration found. Create a smartlicense/settings/config.py')
import sys
sys.exit(0)
| 27.370166
| 91
| 0.677836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,358
| 0.677836
|
678e1041a75c67c39856bfcf8a9561f7bd5138f9
| 2,226
|
py
|
Python
|
firmwire/memory_map.py
|
j4s0n/FirmWire
|
d3a20e2429cb4827f538d1a16163afde8b45826b
|
[
"BSD-3-Clause"
] | null | null | null |
firmwire/memory_map.py
|
j4s0n/FirmWire
|
d3a20e2429cb4827f538d1a16163afde8b45826b
|
[
"BSD-3-Clause"
] | null | null | null |
firmwire/memory_map.py
|
j4s0n/FirmWire
|
d3a20e2429cb4827f538d1a16163afde8b45826b
|
[
"BSD-3-Clause"
] | null | null | null |
## Copyright (c) 2022, Team FirmWire
## SPDX-License-Identifier: BSD-3-Clause
from enum import Enum, auto
from .hw.soc import SOCPeripheral
class MemoryMapEntryType(Enum):
GENERIC = auto()
FILE_BACKED = auto()
PERIPHERAL = auto()
ANNOTATION = auto()
class MemoryMapEntry:
def __init__(self, ty, start, size, **kwargs):
assert type(ty) == MemoryMapEntryType
assert isinstance(start, int), start
assert isinstance(size, int), size
self.ty = ty
self.start = start
self.size = size
self.kwargs = kwargs
def __repr__(self):
return "<MemoryMapEntry %s [%x - %x]>" % (
self.ty,
self.start,
self.start + self.size,
)
class MemoryMap:
def __init__(self):
self.memory_map = []
def add_file_backed_memory(self, start, size, file, **kwargs):
self.memory_map += [
MemoryMapEntry(
MemoryMapEntryType.FILE_BACKED, start, size, file=file, **kwargs
)
]
def add_memory_range(self, start, size, **kwargs):
# backwards compatibility
if "emulate" in kwargs:
peripheral_cls = kwargs["emulate"]
del kwargs["emulate"]
return self.create_peripheral(peripheral_cls, start, size, **kwargs)
self.memory_map += [
MemoryMapEntry(MemoryMapEntryType.GENERIC, start, size, **kwargs)
]
def add_memory_annotation(self, start, size, name):
self.memory_map += [
MemoryMapEntry(MemoryMapEntryType.ANNOTATION, start, size, name=name)
]
def create_peripheral(self, peripheral_cls, start, size, **kwargs):
self.memory_map += [
MemoryMapEntry(
MemoryMapEntryType.PERIPHERAL,
start,
size,
emulate=peripheral_cls,
**kwargs
)
]
def create_soc_peripheral(self, peripheral):
assert isinstance(peripheral, SOCPeripheral)
# The SOCPeripheral class captures the reference
self.create_peripheral(
peripheral, peripheral._address, peripheral._size, **peripheral._attr
)
| 28.177215
| 81
| 0.591195
| 2,077
| 0.933064
| 0
| 0
| 0
| 0
| 0
| 0
| 207
| 0.092992
|
678eb98334509fe0bad64239aa78922c47d0b166
| 1,688
|
py
|
Python
|
src/resources/Land.py
|
noancloarec/mapisto-api
|
b2458f6b12b229babb116f906b3e4f7e8b7b8a71
|
[
"MIT"
] | null | null | null |
src/resources/Land.py
|
noancloarec/mapisto-api
|
b2458f6b12b229babb116f906b3e4f7e8b7b8a71
|
[
"MIT"
] | 1
|
2020-07-08T07:12:31.000Z
|
2020-07-08T07:12:31.000Z
|
src/resources/Land.py
|
noancloarec/mapisto-api
|
b2458f6b12b229babb116f906b3e4f7e8b7b8a71
|
[
"MIT"
] | null | null | null |
from .helper import fill_optional_fields
from maps_geometry.feature_extraction import get_bounding_box
from .MapistoShape import MapistoShape
from .BoundingBox import BoundingBox
class Land:
def __init__(self, land_id, representations: list, bounding_box=None):
assert isinstance(bounding_box, BoundingBox) or bounding_box is None
assert isinstance(representations, list)
self.land_id = land_id
self.representations = representations
self.bounding_box = bounding_box
@staticmethod
def from_dict(json_dict):
json_dict = fill_optional_fields(json_dict, ['land_id'])
bounding_box = get_bounding_box(json_dict['d_path'])
representations = [MapistoShape(json_dict['d_path'], 0)]
return Land(json_dict['land_id'], bounding_box=bounding_box, representations=representations)
def to_dict(self):
return {
'land_id': self.land_id,
'd_path': self.representations[0].d_path
}
def __str__(self):
return str({
"land_id": self.land_id,
"representations": str([str(rep) for rep in self.representations])
})
def equals(self, other):
if not isinstance(other, Land):
return False
if other.land_id != self.land_id:
return False
if len(self.representations) != len(other.representations):
return False
if not self.bounding_box.equals(other.bounding_box):
return False
for i in range(len(self.representations)):
if not self.representations[i].equals(other.representations[i]):
return False
return True
| 35.166667
| 101
| 0.65936
| 1,506
| 0.89218
| 0
| 0
| 336
| 0.199052
| 0
| 0
| 77
| 0.045616
|
6790c65796ad1cfbe5e6c6ab2a2c1453d34ad7fb
| 298
|
py
|
Python
|
reexercises/two_sum_target.py
|
R0bertWell/interview_questions
|
f8a65a842dfe03ac28c865bb8370422ff2071137
|
[
"MIT"
] | null | null | null |
reexercises/two_sum_target.py
|
R0bertWell/interview_questions
|
f8a65a842dfe03ac28c865bb8370422ff2071137
|
[
"MIT"
] | null | null | null |
reexercises/two_sum_target.py
|
R0bertWell/interview_questions
|
f8a65a842dfe03ac28c865bb8370422ff2071137
|
[
"MIT"
] | null | null | null |
from typing import List
def two_sum(lis: List[int], target: int):
dici = {}
for i, value in enumerate(lis):
objetive = target - value
if objetive in dici:
return [dici[objetive], i]
dici[value] = i
return []
print(two_sum([1, 2, 3, 4, 5, 6], 7))
| 19.866667
| 41
| 0.553691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6792c61e36032efcbcd6f3d46a42dbabd2400582
| 1,032
|
py
|
Python
|
vue/decorators/base.py
|
adamlwgriffiths/vue.py
|
f4256454256ddfe54a8be6dea493d3fc915ef1a2
|
[
"MIT"
] | 274
|
2018-07-07T00:57:17.000Z
|
2022-03-22T23:49:53.000Z
|
vue/decorators/base.py
|
adamlwgriffiths/vue.py
|
f4256454256ddfe54a8be6dea493d3fc915ef1a2
|
[
"MIT"
] | 25
|
2018-11-24T17:19:44.000Z
|
2022-03-23T22:30:18.000Z
|
vue/decorators/base.py
|
adamlwgriffiths/vue.py
|
f4256454256ddfe54a8be6dea493d3fc915ef1a2
|
[
"MIT"
] | 18
|
2019-07-04T07:18:18.000Z
|
2022-03-22T23:49:55.000Z
|
from vue.bridge import Object
import javascript
class VueDecorator:
__key__ = None
__parents__ = ()
__id__ = None
__value__ = None
def update(self, vue_dict):
base = vue_dict
for parent in self.__parents__:
base = vue_dict.setdefault(parent, {})
if self.__id__ is None:
base[self.__key__] = self.__value__
else:
base = base.setdefault(self.__key__, {})
value = self.__value__
if isinstance(base.get(self.__id__), dict):
base[self.__id__].update(value)
else:
base[self.__id__] = value
def pyjs_bridge(fn, inject_vue_instance=False):
def wrapper(*args, **kwargs):
args = (javascript.this(), *args) if inject_vue_instance else args
args = tuple(Object.from_js(arg) for arg in args)
kwargs = {k: Object.from_js(v) for k, v in kwargs.items()}
return Object.to_js(fn(*args, **kwargs))
wrapper.__name__ = fn.__name__
return wrapper
| 28.666667
| 74
| 0.602713
| 593
| 0.574612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
679841fb13e9e1b6f465dd6a052897627ff56964
| 40,992
|
py
|
Python
|
datalabeling/google/cloud/datalabeling_v1beta1/proto/data_labeling_service_pb2_grpc.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 2
|
2021-11-26T07:08:43.000Z
|
2022-03-07T20:20:04.000Z
|
datalabeling/google/cloud/datalabeling_v1beta1/proto/data_labeling_service_pb2_grpc.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 6
|
2019-05-27T22:05:58.000Z
|
2019-08-05T16:46:16.000Z
|
datalabeling/google/cloud/datalabeling_v1beta1/proto/data_labeling_service_pb2_grpc.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 1
|
2019-03-29T18:26:16.000Z
|
2019-03-29T18:26:16.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.datalabeling_v1beta1.proto import (
annotation_spec_set_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
data_labeling_service_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
dataset_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
evaluation_job_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
evaluation_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
instruction_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_instruction__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class DataLabelingServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/CreateDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.FromString,
)
self.GetDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.FromString,
)
self.ListDatasets = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListDatasets",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsResponse.FromString,
)
self.DeleteDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteDatasetRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ImportData = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ImportData",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ImportDataRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ExportData = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ExportData",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ExportDataRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetDataItem = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetDataItem",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDataItemRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.DataItem.FromString,
)
self.ListDataItems = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListDataItems",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsResponse.FromString,
)
self.GetAnnotatedDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetAnnotatedDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotatedDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.AnnotatedDataset.FromString,
)
self.ListAnnotatedDatasets = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListAnnotatedDatasets",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsResponse.FromString,
)
self.DeleteAnnotatedDataset = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteAnnotatedDataset",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotatedDatasetRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.LabelImage = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/LabelImage",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelImageRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.LabelVideo = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/LabelVideo",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelVideoRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.LabelText = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/LabelText",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelTextRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetExample = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetExample",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetExampleRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Example.FromString,
)
self.ListExamples = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListExamples",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesResponse.FromString,
)
self.CreateAnnotationSpecSet = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/CreateAnnotationSpecSet",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateAnnotationSpecSetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.FromString,
)
self.GetAnnotationSpecSet = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetAnnotationSpecSet",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotationSpecSetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.FromString,
)
self.ListAnnotationSpecSets = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListAnnotationSpecSets",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsResponse.FromString,
)
self.DeleteAnnotationSpecSet = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteAnnotationSpecSet",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotationSpecSetRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateInstruction = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/CreateInstruction",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateInstructionRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetInstruction = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetInstruction",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetInstructionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_instruction__pb2.Instruction.FromString,
)
self.ListInstructions = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListInstructions",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsResponse.FromString,
)
self.DeleteInstruction = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteInstruction",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteInstructionRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetEvaluation = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetEvaluation",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__pb2.Evaluation.FromString,
)
self.SearchEvaluations = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/SearchEvaluations",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsResponse.FromString,
)
self.SearchExampleComparisons = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/SearchExampleComparisons",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsResponse.FromString,
)
self.CreateEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/CreateEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.FromString,
)
self.UpdateEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/UpdateEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.UpdateEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.FromString,
)
self.GetEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/GetEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.FromString,
)
self.PauseEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/PauseEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.PauseEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ResumeEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ResumeEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ResumeEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteEvaluationJob = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/DeleteEvaluationJob",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteEvaluationJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListEvaluationJobs = channel.unary_unary(
"/google.cloud.datalabeling.v1beta1.DataLabelingService/ListEvaluationJobs",
request_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsResponse.FromString,
)
class DataLabelingServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def CreateDataset(self, request, context):
"""Creates dataset. If success return a Dataset resource.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetDataset(self, request, context):
"""Gets dataset by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListDatasets(self, request, context):
"""Lists datasets under a project. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteDataset(self, request, context):
"""Deletes a dataset by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ImportData(self, request, context):
"""Imports data into dataset based on source locations defined in request.
It can be called multiple times for the same dataset. Each dataset can
only have one long running operation running on it. For example, no
labeling task (also long running operation) can be started while
importing is still ongoing. Vice versa.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExportData(self, request, context):
"""Exports data and annotations from dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetDataItem(self, request, context):
"""Gets a data item in a dataset by resource name. This API can be
called after data are imported into dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListDataItems(self, request, context):
"""Lists data items in a dataset. This API can be called after data
are imported into dataset. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetAnnotatedDataset(self, request, context):
"""Gets an annotated dataset by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListAnnotatedDatasets(self, request, context):
"""Lists annotated datasets for a dataset. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteAnnotatedDataset(self, request, context):
"""Deletes an annotated dataset by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LabelImage(self, request, context):
"""Starts a labeling task for image. The type of image labeling task is
configured by feature in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LabelVideo(self, request, context):
"""Starts a labeling task for video. The type of video labeling task is
configured by feature in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LabelText(self, request, context):
"""Starts a labeling task for text. The type of text labeling task is
configured by feature in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetExample(self, request, context):
"""Gets an example by resource name, including both data and annotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListExamples(self, request, context):
"""Lists examples in an annotated dataset. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateAnnotationSpecSet(self, request, context):
"""Creates an annotation spec set by providing a set of labels.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetAnnotationSpecSet(self, request, context):
"""Gets an annotation spec set by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListAnnotationSpecSets(self, request, context):
"""Lists annotation spec sets for a project. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteAnnotationSpecSet(self, request, context):
"""Deletes an annotation spec set by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateInstruction(self, request, context):
"""Creates an instruction for how data should be labeled.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetInstruction(self, request, context):
"""Gets an instruction by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListInstructions(self, request, context):
"""Lists instructions for a project. Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteInstruction(self, request, context):
"""Deletes an instruction object by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetEvaluation(self, request, context):
"""Gets an evaluation by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SearchEvaluations(self, request, context):
"""Searchs evaluations within a project. Supported filter: evaluation_job,
evaluation_time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SearchExampleComparisons(self, request, context):
"""Searchs example comparisons in evaluation, in format of examples
of both ground truth and prediction(s). It is represented as a search with
evaluation id.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateEvaluationJob(self, request, context):
"""Creates an evaluation job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateEvaluationJob(self, request, context):
"""Updates an evaluation job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetEvaluationJob(self, request, context):
"""Gets an evaluation job by resource name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PauseEvaluationJob(self, request, context):
"""Pauses an evaluation job. Pausing a evaluation job that is already in
PAUSED state will be a no-op.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ResumeEvaluationJob(self, request, context):
"""Resumes a paused evaluation job. Deleted evaluation job can't be resumed.
Resuming a running evaluation job will be a no-op.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteEvaluationJob(self, request, context):
"""Stops and deletes an evaluation job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListEvaluationJobs(self, request, context):
"""Lists all evaluation jobs within a project with possible filters.
Pagination is supported.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_DataLabelingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateDataset": grpc.unary_unary_rpc_method_handler(
servicer.CreateDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"GetDataset": grpc.unary_unary_rpc_method_handler(
servicer.GetDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"ListDatasets": grpc.unary_unary_rpc_method_handler(
servicer.ListDatasets,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsResponse.SerializeToString,
),
"DeleteDataset": grpc.unary_unary_rpc_method_handler(
servicer.DeleteDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteDatasetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ImportData": grpc.unary_unary_rpc_method_handler(
servicer.ImportData,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ImportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ExportData": grpc.unary_unary_rpc_method_handler(
servicer.ExportData,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ExportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetDataItem": grpc.unary_unary_rpc_method_handler(
servicer.GetDataItem,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDataItemRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.DataItem.SerializeToString,
),
"ListDataItems": grpc.unary_unary_rpc_method_handler(
servicer.ListDataItems,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsResponse.SerializeToString,
),
"GetAnnotatedDataset": grpc.unary_unary_rpc_method_handler(
servicer.GetAnnotatedDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotatedDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.AnnotatedDataset.SerializeToString,
),
"ListAnnotatedDatasets": grpc.unary_unary_rpc_method_handler(
servicer.ListAnnotatedDatasets,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsResponse.SerializeToString,
),
"DeleteAnnotatedDataset": grpc.unary_unary_rpc_method_handler(
servicer.DeleteAnnotatedDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotatedDatasetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"LabelImage": grpc.unary_unary_rpc_method_handler(
servicer.LabelImage,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelImageRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"LabelVideo": grpc.unary_unary_rpc_method_handler(
servicer.LabelVideo,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelVideoRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"LabelText": grpc.unary_unary_rpc_method_handler(
servicer.LabelText,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelTextRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetExample": grpc.unary_unary_rpc_method_handler(
servicer.GetExample,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetExampleRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Example.SerializeToString,
),
"ListExamples": grpc.unary_unary_rpc_method_handler(
servicer.ListExamples,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesResponse.SerializeToString,
),
"CreateAnnotationSpecSet": grpc.unary_unary_rpc_method_handler(
servicer.CreateAnnotationSpecSet,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateAnnotationSpecSetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.SerializeToString,
),
"GetAnnotationSpecSet": grpc.unary_unary_rpc_method_handler(
servicer.GetAnnotationSpecSet,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotationSpecSetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.SerializeToString,
),
"ListAnnotationSpecSets": grpc.unary_unary_rpc_method_handler(
servicer.ListAnnotationSpecSets,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsResponse.SerializeToString,
),
"DeleteAnnotationSpecSet": grpc.unary_unary_rpc_method_handler(
servicer.DeleteAnnotationSpecSet,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotationSpecSetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"CreateInstruction": grpc.unary_unary_rpc_method_handler(
servicer.CreateInstruction,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateInstructionRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetInstruction": grpc.unary_unary_rpc_method_handler(
servicer.GetInstruction,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetInstructionRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_instruction__pb2.Instruction.SerializeToString,
),
"ListInstructions": grpc.unary_unary_rpc_method_handler(
servicer.ListInstructions,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsResponse.SerializeToString,
),
"DeleteInstruction": grpc.unary_unary_rpc_method_handler(
servicer.DeleteInstruction,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteInstructionRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetEvaluation": grpc.unary_unary_rpc_method_handler(
servicer.GetEvaluation,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__pb2.Evaluation.SerializeToString,
),
"SearchEvaluations": grpc.unary_unary_rpc_method_handler(
servicer.SearchEvaluations,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsResponse.SerializeToString,
),
"SearchExampleComparisons": grpc.unary_unary_rpc_method_handler(
servicer.SearchExampleComparisons,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsResponse.SerializeToString,
),
"CreateEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.CreateEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateEvaluationJobRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.SerializeToString,
),
"UpdateEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.UpdateEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.UpdateEvaluationJobRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.SerializeToString,
),
"GetEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.GetEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationJobRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.SerializeToString,
),
"PauseEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.PauseEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.PauseEvaluationJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ResumeEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.ResumeEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ResumeEvaluationJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"DeleteEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.DeleteEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteEvaluationJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ListEvaluationJobs": grpc.unary_unary_rpc_method_handler(
servicer.ListEvaluationJobs,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.datalabeling.v1beta1.DataLabelingService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| 63.553488
| 169
| 0.782616
| 25,893
| 0.63166
| 0
| 0
| 0
| 0
| 0
| 0
| 8,013
| 0.195477
|
67988b46e3108d80c389257b2f89c3e8f006472d
| 6,777
|
py
|
Python
|
keras_version/utils.py
|
nunu0910/BiO-Net
|
2038eadb16f200c4e9de8346af5e3d23422eb438
|
[
"MIT"
] | 44
|
2020-07-07T06:40:13.000Z
|
2022-03-24T10:15:39.000Z
|
keras_version/utils.py
|
nunu0910/BiO-Net
|
2038eadb16f200c4e9de8346af5e3d23422eb438
|
[
"MIT"
] | 12
|
2020-11-18T01:27:08.000Z
|
2021-09-22T08:19:14.000Z
|
keras_version/utils.py
|
nunu0910/BiO-Net
|
2038eadb16f200c4e9de8346af5e3d23422eb438
|
[
"MIT"
] | 14
|
2020-07-26T14:10:09.000Z
|
2021-11-18T23:20:44.000Z
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import keras
from keras.models import Model, load_model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # mute deprecation warnings
from keras.optimizers import Adam, SGD
from tensorflow import ConfigProto
from tensorflow import InteractiveSession
import numpy as np
import sys
from PIL import Image
import argparse
from matplotlib import pyplot as plt
from .dataloader import *
from .model import *
from .metrics import *
def train(args):
# load data
x_val, y_val = load_data(args.valid_data, args.valid_dataset)
x_train, y_train = load_data(args.train_data, 'monuseg')
print('data loading finished.')
K.clear_session()
config = ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.set_learning_phase(1)
input_shape = x_train[0].shape
# create model
model = BiONet(
input_shape,
num_classes=args.num_class,
num_layers=4,
iterations=args.iter,
multiplier=args.multiplier,
integrate=args.integrate
).build()
# augmentation
train_gen = get_augmented(
x_train, y_train, batch_size=args.batch_size,
data_gen_args = dict(
rotation_range=15.,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=50,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='constant'
))
model.compile(
optimizer=Adam(lr=args.lr,decay=args.lr_decay),
loss = 'binary_crossentropy',
metrics=[iou, dice_coef]
)
print('model successfully built and compiled.')
integrate = '_int' if args.integrate else ''
weights = '_weights' if args.save_weight else ''
cpt_name = 'iter_'+str(args.iter)+'_mul_'+str(args.multiplier)+integrate+'_best'+weights+'.h5'
callbacks = [keras.callbacks.ModelCheckpoint("checkpoints/"+args.exp+"/"+cpt_name,monitor='val_iou', mode='max',verbose=0, save_weights_only=args.save_weight, save_best_only=True)]
if not os.path.isdir("checkpoints/"+args.exp):
os.mkdir("checkpoints/"+args.exp)
print('\nStart training...')
history = model.fit_generator(
train_gen,
steps_per_epoch=args.steps,
epochs=args.epochs,
validation_data=(x_val, y_val),
callbacks=callbacks
)
print('\nTraining fininshed!')
K.clear_session()
def evaluate(args):
# load data
x_val, y_val = load_data(args.valid_data, args.valid_dataset)
print('data loading finished.')
K.clear_session()
K.set_learning_phase(1)
config = ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
if args.model_path is None:
integrate = '_int' if args.integrate else ''
weights = '_weights' if args.save_weight else ''
cpt_name = 'iter_'+str(args.iter)+'_mul_'+str(args.multiplier)+integrate+'_best'+weights+'.h5'
model_path = "checkpoints/"+args.exp+"/"+cpt_name
else:
model_path = args.model_path
print('Restoring model from path: '+model_path)
if args.save_weight:
model = BiONet(
input_shape,
num_classes=args.num_class,
num_layers=4,
iterations=args.iter,
multiplier=args.multiplier,
integrate=args.integrate
).build().load_weights(model_path)
else:
model = load_model(model_path, compile=False)
model.compile(
optimizer=Adam(lr=args.lr,decay=args.lr_decay),
loss='binary_crossentropy',
metrics=[iou, dice_coef]
)
print('\nStart evaluation...')
result = model.evaluate(x_val,y_val,batch_size=args.batch_size)
print('Validation loss:\t', result[0])
print('Validation iou:\t', result[1])
print('Validation dice:\t', result[2])
print('\nEvaluation finished!')
if args.save_result:
# save metrics
if not os.path.exists("checkpoints/"+args.exp+"/outputs"):
os.mkdir("checkpoints/"+args.exp+"/outputs")
with open("checkpoints/"+args.exp+"/outputs/result.txt", 'w+') as f:
f.write('Validation loss:\t'+str(result[0])+'\n')
f.write('Validation iou:\t'+str(result[1])+'\n')
f.write('Validation dice:\t'+str(result[2])+'\n')
print('Metrics have been saved to:', "checkpoints/"+args.exp+"/outputs/result.txt")
# predict and save segmentations
results = model.predict(x_val,batch_size=args.batch_size,verbose=1)
results = (results > 0.5).astype(np.float32) # Binarization. Comment out this line if you don't want to
print('\nPrediction finished!')
print('Saving segmentations...')
if not os.path.exists("checkpoints/"+args.exp+"/outputs/segmentations"):
os.mkdir("checkpoints/"+args.exp+"/outputs/segmentations")
for i in range(results.shape[0]):
plt.imsave("checkpoints/"+args.exp+"/outputs/segmentations/"+str(i)+".png",results[i,:,:,0],cmap='gray') # binary segmenation
print('A total of '+str(results.shape[0])+' segmentation results have been saved to:', "checkpoints/"+args.exp+"/outputs/segmentations/")
K.clear_session()
def get_augmented(
X_train,
Y_train,
X_val=None,
Y_val=None,
batch_size=32,
seed=0,
data_gen_args = dict(
rotation_range=10.,
#width_shift_range=0.02,
height_shift_range=0.02,
shear_range=5,
#zoom_range=0.3,
horizontal_flip=True,
vertical_flip=False,
fill_mode='constant'
)):
# Train data, provide the same seed and keyword arguments to the fit and flow methods
X_datagen = ImageDataGenerator(**data_gen_args)
Y_datagen = ImageDataGenerator(**data_gen_args)
X_datagen.fit(X_train, augment=True, seed=seed)
Y_datagen.fit(Y_train, augment=True, seed=seed)
X_train_augmented = X_datagen.flow(X_train, batch_size=batch_size, shuffle=True, seed=seed)
Y_train_augmented = Y_datagen.flow(Y_train, batch_size=batch_size, shuffle=True, seed=seed)
train_generator = zip(X_train_augmented, Y_train_augmented)
if not (X_val is None) and not (Y_val is None):
# Validation data, no data augmentation, but we create a generator anyway
X_datagen_val = ImageDataGenerator(**data_gen_args)
Y_datagen_val = ImageDataGenerator(**data_gen_args)
X_datagen_val.fit(X_val, augment=True, seed=seed)
Y_datagen_val.fit(Y_val, augment=True, seed=seed)
X_val_augmented = X_datagen_val.flow(X_val, batch_size=batch_size, shuffle=True, seed=seed)
Y_val_augmented = Y_datagen_val.flow(Y_val, batch_size=batch_size, shuffle=True, seed=seed)
# combine generators into one which yields image and masks
val_generator = zip(X_val_augmented, Y_val_augmented)
return train_generator, val_generator
else:
return train_generator
| 32.425837
| 182
| 0.703261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,483
| 0.218828
|
6798bb647c9031d2653050d76cd3f241dd42a5cd
| 2,734
|
py
|
Python
|
sdk/python/pulumi_azure_native/batch/__init__.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/batch/__init__.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/batch/__init__.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .application import *
from .application_package import *
from .batch_account import *
from .certificate import *
from .get_application import *
from .get_application_package import *
from .get_batch_account import *
from .get_certificate import *
from .get_pool import *
from .list_batch_account_keys import *
from .pool import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.batch.v20151201 as __v20151201
v20151201 = __v20151201
import pulumi_azure_native.batch.v20170101 as __v20170101
v20170101 = __v20170101
import pulumi_azure_native.batch.v20170501 as __v20170501
v20170501 = __v20170501
import pulumi_azure_native.batch.v20170901 as __v20170901
v20170901 = __v20170901
import pulumi_azure_native.batch.v20181201 as __v20181201
v20181201 = __v20181201
import pulumi_azure_native.batch.v20190401 as __v20190401
v20190401 = __v20190401
import pulumi_azure_native.batch.v20190801 as __v20190801
v20190801 = __v20190801
import pulumi_azure_native.batch.v20200301 as __v20200301
v20200301 = __v20200301
import pulumi_azure_native.batch.v20200501 as __v20200501
v20200501 = __v20200501
import pulumi_azure_native.batch.v20200901 as __v20200901
v20200901 = __v20200901
import pulumi_azure_native.batch.v20210101 as __v20210101
v20210101 = __v20210101
import pulumi_azure_native.batch.v20210601 as __v20210601
v20210601 = __v20210601
else:
v20151201 = _utilities.lazy_import('pulumi_azure_native.batch.v20151201')
v20170101 = _utilities.lazy_import('pulumi_azure_native.batch.v20170101')
v20170501 = _utilities.lazy_import('pulumi_azure_native.batch.v20170501')
v20170901 = _utilities.lazy_import('pulumi_azure_native.batch.v20170901')
v20181201 = _utilities.lazy_import('pulumi_azure_native.batch.v20181201')
v20190401 = _utilities.lazy_import('pulumi_azure_native.batch.v20190401')
v20190801 = _utilities.lazy_import('pulumi_azure_native.batch.v20190801')
v20200301 = _utilities.lazy_import('pulumi_azure_native.batch.v20200301')
v20200501 = _utilities.lazy_import('pulumi_azure_native.batch.v20200501')
v20200901 = _utilities.lazy_import('pulumi_azure_native.batch.v20200901')
v20210101 = _utilities.lazy_import('pulumi_azure_native.batch.v20210101')
v20210601 = _utilities.lazy_import('pulumi_azure_native.batch.v20210601')
| 43.396825
| 80
| 0.793343
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 681
| 0.249086
|
6799287dad3bb8281070f0e2070fafa75ab7324c
| 1,853
|
py
|
Python
|
setup.py
|
tgolsson/appJar
|
5e2f8bff44e927e7c2bae17fccddc6dbf79952f0
|
[
"Apache-2.0"
] | 666
|
2016-11-14T18:17:40.000Z
|
2022-03-29T03:53:22.000Z
|
setup.py
|
tgolsson/appJar
|
5e2f8bff44e927e7c2bae17fccddc6dbf79952f0
|
[
"Apache-2.0"
] | 598
|
2016-10-20T21:04:09.000Z
|
2022-03-15T22:44:49.000Z
|
setup.py
|
tgolsson/appJar
|
5e2f8bff44e927e7c2bae17fccddc6dbf79952f0
|
[
"Apache-2.0"
] | 95
|
2017-01-19T12:23:58.000Z
|
2022-03-06T18:16:21.000Z
|
from setuptools import setup, find_packages
__name__ = "appJar"
__version__ = "0.94.0"
__author__ = "Richard Jarvis"
__desc__ = "An easy-to-use, feature-rich GUI wrapper for tKinter. Designed specifically for use in the classroom, but powerful enough to be used anywhere."
__author_email__ = "info@appjar.info"
__license__ = "Apache 2.0"
__url__ = "http://appJar.info"
__keywords__ = ["python", "gui", "tkinter", "appJar", "interface"]
__packages__= ["appJar"]
__classifiers__ = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Education',
'Topic :: Software Development',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
]
__long_description__ = """# appJar
Simple tKinter GUIs in Python.
"""
setup(
name=__name__,
packages=__packages__,
version=__version__,
description=__desc__,
long_description=__long_description__,
long_description_content_type="text/markdown",
author=__author__,
author_email=__author_email__,
url=__url__,
keywords=__keywords__,
license=__license__,
classifiers=__classifiers__,
package_data = {
"appJar": ["lib/*.py", "lib/*.txt", "lib/tkdnd2.8/*.tcl", "lib/tkdnd2.8/tcl_files/*.tcl", "lib/tkdnd2.8/tcl_libs/*", "resources/icons/*", "examples/showcase.py", "PYPI.md"]
}
)
| 37.06
| 180
| 0.658392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,071
| 0.577982
|
67995960cafd98e838927288e205c58078f19735
| 141,788
|
py
|
Python
|
pertama/andir.py
|
alitkurniawan48/BelajarGIS
|
c52556bc6fa74b849b9c3461410805807b742967
|
[
"MIT"
] | 2
|
2020-02-09T14:47:07.000Z
|
2020-02-09T14:47:12.000Z
|
pertama/andir.py
|
alitkurniawan48/BelajarGIS
|
c52556bc6fa74b849b9c3461410805807b742967
|
[
"MIT"
] | 12
|
2019-12-11T06:45:59.000Z
|
2020-01-06T09:35:35.000Z
|
pertama/andir.py
|
alitkurniawan48/BelajarGIS
|
c52556bc6fa74b849b9c3461410805807b742967
|
[
"MIT"
] | 71
|
2019-12-09T13:52:54.000Z
|
2021-05-28T16:19:09.000Z
|
import shapefile
class Andir:
def __init__(self):
self.kelurahan = shapefile.Writer(
'kelurahan_andir', shapeType=shapefile.POLYGON)
self.kelurahan.shapeType
self.kelurahan.field('kelurahan_di_andir', 'C')
self.kantor = shapefile.Writer(
'kantor_kelurahan_andir', shapeType=shapefile.POINT)
self.kantor.shapeType
self.kantor.field('kantor_kelurahan_di_andir', 'C')
self.jalan = shapefile.Writer(
'jalan_andir', shapeType=shapefile.POLYLINE)
self.jalan.shapeType
self.jalan.field('jalan_di_andir', 'C')
# Kelurahan
def kelurahanCampaka(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5688412, -6.9100128],
[107.5691201, -6.9097865],
[107.5691094, -6.9097226],
[107.569163, -6.9097412],
[107.5691523, -6.9096667],
[107.5692167, -6.9096507],
[107.5693562, -6.9094057],
[107.5693213, -6.9093125],
[107.5693401, -6.9092593],
[107.5693937, -6.909254],
[107.5693588, -6.9092193],
[107.5694259, -6.9090809],
[107.5694232, -6.9089158],
[107.5694956, -6.9086948],
[107.5695412, -6.9086335],
[107.5695734, -6.9085164],
[107.5696029, -6.9084791],
[107.5696002, -6.9084285],
[107.5696888, -6.908298],
[107.5697504, -6.9080557],
[107.5697183, -6.9079758],
[107.5699838, -6.9070998],
[107.570016, -6.9070679],
[107.5701353, -6.9069387],
[107.5701655, -6.906761],
[107.5701471, -6.9066948],
[107.5701499, -6.906643],
[107.570264, -6.9064906],
[107.5702372, -6.9063532],
[107.5701916, -6.9062912],
[107.5701796, -6.9062082],
[107.5702621, -6.9061069],
[107.5702705, -6.9060224],
[107.5702735, -6.9059272],
[107.5702582, -6.9058752],
[107.5702812, -6.9058353],
[107.570331, -6.9053586],
[107.5703968, -6.9051163],
[107.5703898, -6.9050277],
[107.5704728, -6.9047611],
[107.570537, -6.9044838],
[107.5705486, -6.9044575],
[107.5705852, -6.904431],
[107.5706371, -6.9043805],
[107.5706415, -6.9043086],
[107.5706277, -6.9042101],
[107.570663, -6.9041775],
[107.5706565, -6.9041212],
[107.5707491, -6.9039007],
[107.5707861, -6.9038517],
[107.5708515, -6.903726],
[107.571029, -6.9033889],
[107.5711822, -6.9031591],
[107.5712829, -6.9031001],
[107.5713198, -6.9030174],
[107.5713183, -6.9029548],
[107.5712337, -6.9028629],
[107.5709224, -6.9026604],
[107.5709029, -6.9026318],
[107.5708951, -6.9026101],
[107.570902, -6.9025687],
[107.5709255, -6.902512],
[107.5699489, -6.9018673],
[107.5693062, -6.9014289],
[107.5681693, -6.9006596],
[107.567794, -6.9004107],
[107.5674402, -6.9001352],
[107.5671834, -6.8999973],
[107.5668738, -6.8997586],
[107.5673784, -6.8990922],
[107.5674804, -6.898867],
[107.5674938, -6.8987944],
[107.5676575, -6.8983809],
[107.5678829, -6.8980119],
[107.5679768, -6.8978061],
[107.5680828, -6.897634],
[107.5682431, -6.8973935],
[107.5683634, -6.897188],
[107.5684758, -6.8969693],
[107.5685892, -6.8969163],
[107.5686339, -6.8968247],
[107.5686241, -6.8967202],
[107.5686192, -6.8965988],
[107.5685952, -6.8964875],
[107.5685484, -6.8964145],
[107.5685499, -6.8963149],
[107.5685818, -6.896162],
[107.5685851, -6.8960259],
[107.5684635, -6.8959323],
[107.5684081, -6.8959108],
[107.5683958, -6.8958547],
[107.5683944, -6.8957056],
[107.568337, -6.8955877],
[107.5682957, -6.895382],
[107.5681746, -6.8949567],
[107.5682117, -6.894851],
[107.5680265, -6.8945171],
[107.5680009, -6.8944163],
[107.5678567, -6.8942487],
[107.56782, -6.8941755],
[107.5678101, -6.894097],
[107.5678994, -6.8939173],
[107.5679365, -6.8938042],
[107.5679396, -6.8937164],
[107.5678969, -6.8936179],
[107.5677161, -6.8934741],
[107.5671385, -6.8930081],
[107.5671064, -6.8928797],
[107.5669697, -6.8927356],
[107.5669268, -6.8925901],
[107.5670463, -6.8923704],
[107.5668794, -6.8920902],
[107.5668064, -6.892011],
[107.56655, -6.8918584],
[107.5662883, -6.891759],
[107.5660598, -6.8918053],
[107.5648658, -6.8913936],
[107.5648508, -6.8916644],
[107.5646481, -6.8921482],
[107.564771, -6.8922401],
[107.5649719, -6.892278],
[107.5651165, -6.8924518],
[107.5647567, -6.8931561],
[107.5645768, -6.8931904],
[107.5645606, -6.8932806],
[107.5642931, -6.8933683],
[107.5639611, -6.8939379],
[107.5639608, -6.8940531],
[107.5650172, -6.894951],
[107.5649834, -6.8950079],
[107.5650354, -6.895118],
[107.5650229, -6.8952121],
[107.5648317, -6.8955468],
[107.5646603, -6.8960059],
[107.5645372, -6.8961801],
[107.5644152, -6.8963125],
[107.5643791, -6.896405],
[107.5642962, -6.8964301],
[107.5642355, -6.8965784],
[107.563983, -6.8967268],
[107.5639292, -6.8968649],
[107.563634, -6.8969206],
[107.5630975, -6.896794],
[107.5630412, -6.8968213],
[107.5628963, -6.8970962],
[107.5617891, -6.8964114],
[107.5614757, -6.8962167],
[107.5611903, -6.8960248],
[107.5608727, -6.8958382],
[107.5602911, -6.8954171],
[107.5601647, -6.8953644],
[107.5599015, -6.8953304],
[107.5621585, -6.8990243],
[107.5648286, -6.9034798],
[107.5675416, -6.9078873],
[107.568771, -6.909903],
[107.5688412, -6.9100128],
]])
def kelurahanCiroyom(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5835421, -6.9108951],
[107.5834349, -6.9110762],
[107.5831184, -6.9118271],
[107.5827777, -6.912759],
[107.5825149, -6.913691],
[107.5830862, -6.9138388],
[107.5833719, -6.9138914],
[107.5835388, -6.9138911],
[107.5836441, -6.9139414],
[107.5833732, -6.9150303],
[107.5835301, -6.9150849],
[107.5847384, -6.9154724],
[107.5852265, -6.9156322],
[107.5851635, -6.9173336],
[107.5851589, -6.9176585],
[107.5850925, -6.9185052],
[107.592997, -6.9194399],
[107.5933806, -6.9166693],
[107.5933698, -6.9164523],
[107.593343, -6.9163811],
[107.593351, -6.9162309],
[107.5932464, -6.9160569],
[107.5931579, -6.9159776],
[107.5928159, -6.9157064],
[107.5928199, -6.9154431],
[107.5925933, -6.91527],
[107.5926596, -6.9150317],
[107.5926606, -6.9149552],
[107.5928542, -6.9143204],
[107.5929564, -6.9143133],
[107.5929807, -6.9142711],
[107.592966, -6.9140969],
[107.5929728, -6.9140132],
[107.5916209, -6.9139234],
[107.5901283, -6.9139075],
[107.5891399, -6.9137692],
[107.588118, -6.9135059],
[107.5868466, -6.9130123],
[107.5852239, -6.9120075],
[107.5847424, -6.9116616],
[107.5835421, -6.9108951],
]])
def kelurahanDungusCariang(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5791752, -6.9079447],
[107.57897, -6.9082596],
[107.5787527, -6.9087209],
[107.5769161, -6.9168328],
[107.5768158, -6.9174711],
[107.57737, -6.9175158],
[107.578077, -6.9176377],
[107.5808391, -6.9179676],
[107.581166, -6.918042],
[107.5850925, -6.9185052],
[107.5851589, -6.9176585],
[107.5851635, -6.9173336],
[107.5851729, -6.9170687],
[107.5852265, -6.9156322],
[107.5847384, -6.9154724],
[107.5835301, -6.9150849],
[107.5833732, -6.9150303],
[107.5836441, -6.9139414],
[107.5835388, -6.9138911],
[107.5833719, -6.9138914],
[107.5830862, -6.9138388],
[107.5825149, -6.913691],
[107.5827777, -6.912759],
[107.5831184, -6.9118271],
[107.5834349, -6.9110762],
[107.5835421, -6.9108951],
[107.5791752, -6.9079447],
]])
def kelurahanGaruda(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5768158, -6.9174711],
[107.5769161, -6.9168328],
[107.5772109, -6.91553],
[107.5773243, -6.9150268],
[107.5774343, -6.9145455],
[107.5775151, -6.9141873],
[107.5775811, -6.9138958],
[107.5777239, -6.9132647],
[107.5779033, -6.9124703],
[107.57808, -6.9116905],
[107.578135, -6.9114494],
[107.5781879, -6.9112091],
[107.5785809, -6.9094757],
[107.5786521, -6.9091642],
[107.5787527, -6.9087209],
[107.57897, -6.9082596],
[107.5791752, -6.9079447],
[107.5752133, -6.9053072],
[107.5749994, -6.9056383],
[107.5748938, -6.905937],
[107.5746716, -6.9063303],
[107.5749331, -6.9064624],
[107.574801, -6.9066536],
[107.5747065, -6.9068635],
[107.5744047, -6.9067304],
[107.5742339, -6.9071245],
[107.5739906, -6.9076517],
[107.5739769, -6.9079446],
[107.5740325, -6.9080411],
[107.5737868, -6.9084887],
[107.5734708, -6.9094448],
[107.5733059, -6.909819],
[107.5737331, -6.9100447],
[107.5749462, -6.910534],
[107.5760434, -6.9109361],
[107.5760824, -6.9109827],
[107.5761964, -6.9110346],
[107.5760496, -6.9114401],
[107.5763302, -6.9115669],
[107.5760199, -6.9123399],
[107.5758189, -6.9122928],
[107.5755187, -6.9124428],
[107.5753446, -6.912919],
[107.5753835, -6.9129355],
[107.5750134, -6.9137162],
[107.5746031, -6.9135943],
[107.5745069, -6.913683],
[107.5743743, -6.9139497],
[107.5739091, -6.9150764],
[107.5736437, -6.9150009],
[107.5734682, -6.9149867],
[107.5730218, -6.9153425],
[107.5729636, -6.9155205],
[107.572743, -6.9155911],
[107.5725292, -6.9158665],
[107.5724994, -6.9158819],
[107.573832, -6.9170058],
[107.5753239, -6.9173423],
[107.5768158, -6.9174711],
]])
def kelurahanKebonJeruk(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.592997, -6.9194399],
[107.6041614, -6.9207853],
[107.6045322, -6.9159498],
[107.6045972, -6.9154333],
[107.6046274, -6.9148208],
[107.6046281, -6.9145413],
[107.6022538, -6.9144167],
[107.5983239, -6.9143134],
[107.5952277, -6.914224],
[107.5934297, -6.9139855],
[107.5929728, -6.9140132],
[107.592966, -6.9140969],
[107.5929807, -6.9142711],
[107.5929564, -6.9143133],
[107.5928542, -6.9143204],
[107.5926606, -6.9149552],
[107.5926596, -6.9150317],
[107.5925933, -6.91527],
[107.5928199, -6.9154431],
[107.5928159, -6.9157064],
[107.5931579, -6.9159776],
[107.5932464, -6.9160569],
[107.593351, -6.9162309],
[107.593343, -6.9163811],
[107.5933698, -6.9164523],
[107.5933806, -6.9166693],
[107.592997, -6.9194399],
]])
def kelurahanMaleber(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5709255, -6.902512],
[107.570902, -6.9025687],
[107.5708951, -6.9026101],
[107.5709029, -6.9026318],
[107.5709224, -6.9026604],
[107.5712337, -6.9028629],
[107.5713183, -6.9029548],
[107.5713198, -6.9030174],
[107.5712829, -6.9031001],
[107.5711822, -6.9031591],
[107.571029, -6.9033889],
[107.5707861, -6.9038517],
[107.5707491, -6.9039007],
[107.5706565, -6.9041212],
[107.570663, -6.9041775],
[107.5706277, -6.9042101],
[107.5706415, -6.9043086],
[107.5706371, -6.9043805],
[107.5705852, -6.904431],
[107.5705486, -6.9044575],
[107.570537, -6.9044838],
[107.5704728, -6.9047611],
[107.5703898, -6.9050277],
[107.5703968, -6.9051163],
[107.570331, -6.9053586],
[107.5702812, -6.9058353],
[107.5702582, -6.9058752],
[107.5702735, -6.9059272],
[107.5702705, -6.9060224],
[107.5702621, -6.9061069],
[107.5701796, -6.9062082],
[107.5701916, -6.9062912],
[107.5702372, -6.9063532],
[107.570264, -6.9064906],
[107.5701499, -6.906643],
[107.5701471, -6.9066948],
[107.5701655, -6.906761],
[107.5701353, -6.9069387],
[107.570016, -6.9070679],
[107.5699838, -6.9070998],
[107.5697183, -6.9079758],
[107.5697504, -6.9080557],
[107.5696888, -6.908298],
[107.5696002, -6.9084285],
[107.5696029, -6.9084791],
[107.5695734, -6.9085164],
[107.5695412, -6.9086335],
[107.5694956, -6.9086948],
[107.5694232, -6.9089158],
[107.5694259, -6.9090809],
[107.5693588, -6.9092193],
[107.5693937, -6.909254],
[107.5693401, -6.9092593],
[107.5693213, -6.9093125],
[107.5693403, -6.9093623],
[107.5693562, -6.9094057],
[107.5692167, -6.9096507],
[107.5691523, -6.9096667],
[107.569163, -6.9097412],
[107.5691094, -6.9097226],
[107.5691201, -6.9097865],
[107.5688412, -6.9100128],
[107.5700534, -6.9120367],
[107.5714501, -6.9143263],
[107.5715541, -6.9146136],
[107.5716975, -6.914762],
[107.5720703, -6.9154531],
[107.5724994, -6.9158819],
[107.5725292, -6.9158665],
[107.572743, -6.9155911],
[107.5729636, -6.9155205],
[107.5730218, -6.9153425],
[107.5734682, -6.9149867],
[107.5736437, -6.9150009],
[107.5739091, -6.9150764],
[107.5742, -6.9143704],
[107.5743743, -6.9139497],
[107.5745069, -6.913683],
[107.5746031, -6.9135943],
[107.5750134, -6.9137162],
[107.5753835, -6.9129355],
[107.5753446, -6.912919],
[107.5755187, -6.9124428],
[107.5758189, -6.9122928],
[107.5760199, -6.9123399],
[107.5763302, -6.9115669],
[107.5760496, -6.9114401],
[107.5761964, -6.9110346],
[107.5760824, -6.9109827],
[107.5760434, -6.9109361],
[107.5749462, -6.910534],
[107.5737331, -6.9100447],
[107.5733059, -6.909819],
[107.5734708, -6.9094448],
[107.5737868, -6.9084887],
[107.5740325, -6.9080411],
[107.5739769, -6.9079446],
[107.5739906, -6.9076517],
[107.5742339, -6.9071245],
[107.5744047, -6.9067304],
[107.5747065, -6.9068635],
[107.574801, -6.9066536],
[107.5749331, -6.9064624],
[107.5746716, -6.9063303],
[107.5748938, -6.905937],
[107.5749994, -6.9056383],
[107.5752133, -6.9053072],
[107.5709255, -6.902512],
]])
# Kantor Kelurahan
def kantorKelurahanCampaka(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5631291, -6.8977897)
def kantorKelurahanCiroyom(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5875214, -6.9144205)
def kantorKelurahanDungusCariang(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5806731, -6.9125569)
def kantorKelurahanGaruda(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5764865, -6.9160994)
def kantorKelurahanKebonJeruk(self, nama):
self.kantor.record(nama)
self.kantor.point(107.6011905, -6.918966)
def kantorKelurahanMaleber(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5734513, -6.9073058)
# Jalan
def jalanKelurahanCampaka(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5696727,-6.903724],
[107.5697268,-6.9035733],
[107.5696674,-6.9035376],
[107.5697241,-6.9035733],
[107.5699168,-6.9036068],
[107.5701264,-6.9035813],
[107.5702467,-6.9034683],
[107.5703674,-6.9031728],
[107.5704988,-6.9029597],
[107.5707939,-6.9027334],
[107.5708904,-6.902808],
[107.5709445,-6.902873],
[107.5710889,-6.9029757],
[107.5709418,-6.9028703],
[107.5708904,-6.902808],
[107.5707939,-6.9027334],
[107.5707916,-6.9025375],
[107.5709002,-6.902592],
[107.5707862,-6.9025348],
[107.5705998,-6.9023923],
[107.5704737,-6.902576],
[107.5705998,-6.9023923],
[107.5704469,-6.9022991],
[107.570117,-6.9028236],
[107.5699051,-6.9027145],
[107.5702619,-6.9021713],
[107.5704469,-6.9022991],
[107.5702619,-6.9021713],
[107.5697415,-6.9018784],
[107.5696905,-6.9020248],
[107.5697415,-6.9018784],
[107.5694612,-6.9017094],
[107.5693271,-6.9019916],
[107.5694639,-6.901704],
[107.5692372,-6.9016014],
[107.569192,-6.9015038],
[107.5679511,-6.9007108],
[107.5667267,-6.8999008],
[107.5679511,-6.9007108],
[107.569192,-6.9015038],
[107.5692372,-6.9016014],
[107.5691916,-6.9016361],
[107.5690106,-6.9015496],
[107.5677473,-6.9007854],
[107.5676952,-6.9007974],
[107.5672929,-6.9015004],
[107.5673036,-6.9015696],
[107.5678079,-6.9018359],
[107.5678814,-6.9018904],
[107.5678814,-6.9019703],
[107.567817,-6.9020156],
[107.567722,-6.9019957],
[107.567773,-6.9018186],
[107.567722,-6.9019957],
[107.5676416,-6.9022779],
[107.5676843,-6.902328],
[107.5675397,-6.9027626],
[107.5677621,-6.9028579],
[107.5675397,-6.9027626],
[107.5676816,-6.9023226],
[107.5684194,-6.9025176],
[107.5684326,-6.9024771],
[107.5688217,-6.9014312],
[107.5690106,-6.9015496],
[107.5691916,-6.9016361],
[107.5689154,-6.9023657],
[107.5699051,-6.9027145],
[107.5689154,-6.9023657],
[107.5688493,-6.902523],
[107.5703674,-6.9031728],
[107.5702467,-6.9034683],
[107.569681,-6.9032161],
[107.5697827,-6.9029278],
[107.5688493,-6.902523],
[107.5687598,-6.9027677],
[107.569426,-6.9030878],
[107.5687598,-6.9027677],
[107.5686428,-6.9029574],
[107.5689566,-6.9030772],
[107.5692597,-6.9032343],
[107.5689566,-6.9030772],
[107.5686428,-6.9029574],
[107.5684084,-6.9033349],
[107.5680066,-6.9031487],
[107.5684084,-6.9033349],
[107.5681912,-6.9035905],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5688412,-6.9100128],
[107.5684508,-6.9093516],
[107.5687715,-6.9091011],
[107.5688948,-6.9088615],
[107.5689996,-6.9083494],
[107.5688948,-6.9088615],
[107.5687715,-6.9091011],
[107.5684508,-6.9093516],
[107.5682389,-6.9089975],
[107.568412,-6.908723],
[107.5682967,-6.9089014],
[107.5681156,-6.9087791],
[107.5683933,-6.908313],
[107.5683718,-6.9082517],
[107.5683933,-6.908313],
[107.5685354,-6.9083875],
[107.5687366,-6.9081665],
[107.5688761,-6.9082224],
[107.5687366,-6.9081665],
[107.5685354,-6.9083875],
[107.5683933,-6.908313],
[107.5681867,-6.9086591],
[107.5680821,-6.9085792],
[107.5681867,-6.9086591],
[107.5681156,-6.9087791],
[107.5682389,-6.9089975],
[107.5681156,-6.9087791],
[107.5675416,-6.9078873],
[107.5665894,-6.9062815],
[107.5675416,-6.9078873],
[107.5680661,-6.9075027],
[107.5683826,-6.9072897],
[107.5689941,-6.9070767],
[107.5685623,-6.9072264],
[107.568726,-6.9067624],
[107.5687947,-6.9066466],
[107.5689887,-6.9064269],
[107.5689083,-6.9063497],
[107.5686454,-6.9062326],
[107.5689083,-6.9063497],
[107.5691032,-6.9059463],
[107.5693821,-6.9055415],
[107.5693419,-6.905435],
[107.569248,-6.9052673],
[107.5691914,-6.905117],
[107.5691941,-6.9050238],
[107.5692531,-6.9048854],
[107.5691941,-6.9050238],
[107.5691914,-6.905117],
[107.569248,-6.9052726],
[107.5693419,-6.9054377],
[107.5693658,-6.905322],
[107.5698378,-6.9049999],
[107.5699505,-6.90488],
[107.5700098,-6.9047161],
[107.5699505,-6.90488],
[107.5698378,-6.9049999],
[107.5693658,-6.905322],
[107.5693419,-6.9054404],
[107.5693821,-6.9055442],
[107.5695592,-6.9057013],
[107.5696459,-6.9058065],
[107.5689887,-6.9064269],
[107.5696459,-6.9058065],
[107.5700026,-6.9054763],
[107.5701421,-6.9055136],
[107.570185,-6.9054923],
[107.5702655,-6.9051222],
[107.5702145,-6.9050184],
[107.5701957,-6.9049278],
[107.5702923,-6.904784],
[107.5703826,-6.9046682],
[107.5704148,-6.9045137],
[107.5703746,-6.9044711],
[107.569957,-6.9042754],
[107.5698873,-6.9043047],
[107.5697764,-6.9042687],
[107.5696459,-6.9041396],
[107.5696539,-6.9040651],
[107.5696459,-6.9041396],
[107.5697737,-6.9042687],
[107.5698873,-6.9043047],
[107.569957,-6.9042754],
[107.5699454,-6.9042022],
[107.569983,-6.9039732],
[107.5700151,-6.9038534],
[107.5700876,-6.9036909],
[107.5701492,-6.903659],
[107.5702512,-6.9036084],
[107.5703692,-6.903433],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5654548,-6.9044202],
[107.565461,-6.9044548],
[107.5665894,-6.9062815],
[107.5669547,-6.9055636],
[107.5676279,-6.9058565],
[107.5669547,-6.9055636],
[107.5669681,-6.9054997],
[107.5666784,-6.9053719],
[107.5669681,-6.9054997],
[107.5670673,-6.9052734],
[107.5668984,-6.9052042],
[107.5670673,-6.9052734],
[107.5670888,-6.9052121],
[107.5673731,-6.9053639],
[107.5678103,-6.905537],
[107.5677298,-6.9057314],
[107.5676467,-6.9057048],
[107.5677298,-6.9057314],
[107.5678103,-6.905537],
[107.5679069,-6.9053666],
[107.5680946,-6.9054278],
[107.5679069,-6.9053666],
[107.5678103,-6.905537],
[107.5673731,-6.9053639],
[107.5670888,-6.9052121],
[107.5671639,-6.9050471],
[107.5673976,-6.9051418],
[107.5671639,-6.9050471],
[107.5673141,-6.9046796],
[107.5669654,-6.9045518],
[107.5673141,-6.9046796],
[107.5673838,-6.9045358],
[107.5676574,-6.9046423],
[107.5676789,-6.9047302],
[107.5678589,-6.9049181],
[107.5681107,-6.9050271],
[107.5684379,-6.9051376],
[107.5681107,-6.9050271],
[107.567982,-6.9049718],
[107.5678961,-6.9049349],
[107.5679954,-6.9046343],
[107.5682126,-6.9046876],
[107.5682475,-6.9045491],
[107.568336,-6.9045624],
[107.5684084,-6.9043068],
[107.5684648,-6.9042882],
[107.5686207,-6.9043856],
[107.5688376,-6.9044905],
[107.5690575,-6.9039127],
[107.568787,-6.9037945],
[107.5687437,-6.9038062],
[107.5686874,-6.9039207],
[107.5686096,-6.9039154],
[107.5684648,-6.9042882],
[107.5686096,-6.9039154],
[107.5686874,-6.9039207],
[107.5687437,-6.9038062],
[107.568556,-6.9037343],
[107.568784,-6.9032177],
[107.568556,-6.9037343],
[107.5680624,-6.9035426],
[107.5681322,-6.9034095],
[107.5680624,-6.9035426],
[107.567864,-6.9035053],
[107.5676682,-6.9039234],
[107.5679954,-6.9040672],
[107.5676682,-6.9039234],
[107.5674589,-6.9043707],
[107.5679954,-6.9046343],
[107.5674589,-6.9043707],
[107.5673838,-6.9045358],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5652,-6.9039968],
[107.5654548,-6.9044202],
[107.5658421,-6.9040966],
[107.5657401,-6.9039155],
[107.5658421,-6.9040966],
[107.5659385,-6.9041193],
[107.5660191,-6.9041552],
[107.5663946,-6.9039155],
[107.5665502,-6.9037025],
[107.5666735,-6.9036812],
[107.5667326,-6.9035641],
[107.5667326,-6.9034575],
[107.5665931,-6.9034096],
[107.5667326,-6.9034575],
[107.5667326,-6.9035641],
[107.5672553,-6.9037584],
[107.5674243,-6.9037904],
[107.5672553,-6.9037584],
[107.5675265,-6.9032179],
[107.5675855,-6.9031939],
[107.5676123,-6.9030981],
[107.5675855,-6.9031939],
[107.5676821,-6.9032179],
[107.567733,-6.9031966],
[107.5678108,-6.902933],
[107.567733,-6.9031966],
[107.5676821,-6.9032179],
[107.5675855,-6.9031939],
[107.5675265,-6.9032179],
[107.5670491,-6.9041925],
[107.5663946,-6.9039155],
[107.5667755,-6.9040806],
[107.5665984,-6.9044587],
[107.5664697,-6.9046984],
[107.5665984,-6.9044587],
[107.5660191,-6.9041552],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5671064,-6.8928797],
[107.5670844,-6.8929687],
[107.5671826,-6.8930938],
[107.567197,-6.8931524],
[107.5671853,-6.8930912],
[107.5672721,-6.8931284],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5655931,-6.8933281],
[107.5659042,-6.8934826],
[107.5658211,-6.8936264],
[107.5659042,-6.8934826],
[107.5663951,-6.8937382],
[107.5665453,-6.8933974],
[107.5663951,-6.8937382],
[107.5663887,-6.8937888],
[107.5661386,-6.8941398],
[107.5661306,-6.8942223],
[107.5659696,-6.8944886],
[107.5661306,-6.8942223],
[107.5661386,-6.8941398],
[107.5663914,-6.8937888],
[107.5663951,-6.8937382],
[107.5666767,-6.8939246],
[107.5665345,-6.8941536],
[107.5666767,-6.8939246],
[107.5667062,-6.8939379],
[107.5670378,-6.8933335],
[107.5671129,-6.893171],
[107.567197,-6.8931524],
[107.5675913,-6.8934347],
[107.5675618,-6.8934853],
[107.5673945,-6.8934293],
[107.5673473,-6.8934426],
[107.567197,-6.8937036],
[107.5674331,-6.8938474],
[107.567197,-6.8937036],
[107.5670468,-6.8939539],
[107.567197,-6.8937036],
[107.5673473,-6.8934426],
[107.5673918,-6.8934267],
[107.5675618,-6.8934853],
[107.5677549,-6.8935971],
[107.5677818,-6.893677],
[107.5676852,-6.8938634],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5648649,-6.8914296],
[107.5654767,-6.8916706],
[107.5654982,-6.8916147],
[107.5654767,-6.8916706],
[107.5658814,-6.891821],
[107.5658496,-6.8919396],
[107.5658814,-6.891821],
[107.5660856,-6.891857],
[107.5661017,-6.8918038],
[107.5660933,-6.8918396],
[107.566096,-6.8919515],
[107.5658559,-6.8926917],
[107.5657691,-6.8926799],
[107.5655824,-6.8926145],
[107.5657691,-6.8926825],
[107.5658559,-6.8926917],
[107.5655931,-6.8933281],
[107.5654161,-6.8932989],
[107.5655555,-6.8928994],
[107.5654161,-6.8932989],
[107.5652122,-6.8935172],
[107.5654161,-6.8932989],
[107.5655931,-6.8933281],
[107.5657084,-6.8930539],
[107.5661698,-6.893227],
[107.5663522,-6.8928116],
[107.5664836,-6.8929127],
[107.5663522,-6.8928116],
[107.5664809,-6.8924894],
[107.5660035,-6.8922524],
[107.5664809,-6.8924894],
[107.5665332,-6.8923775],
[107.5669328,-6.8925346],
[107.5665332,-6.8923775],
[107.5665842,-6.8922337],
[107.5665577,-6.8921633],
[107.566096,-6.8919515],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5645095,-6.8932962],
[107.564906,-6.8936092],
[107.5650572,-6.8937553],
[107.5652261,-6.893517],
[107.5650853,-6.8934371],
[107.5650424,-6.8933612],
[107.5650531,-6.8932494],
[107.5651076,-6.8930699],
[107.5648662,-6.8929607],
[107.5651076,-6.8930699],
[107.5652712,-6.892713],
[107.5652364,-6.8927836],
[107.5650003,-6.8926851],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5650859,-6.894871],
[107.5649973,-6.8950414],
[107.5650724,-6.8950787],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5647506,-6.8957444],
[107.5650142,-6.8958335],
[107.565051,-6.8958136],
[107.565338,-6.8952704],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5599402,-6.8953401],
[107.5601682,-6.8956996],
[107.5602335,-6.8957136],
[107.5606604,-6.8959371],
[107.5607914,-6.8960384],
[107.5608343,-6.8961183],
[107.5608396,-6.8962195],
[107.5606697,-6.8965144],
[107.5601682,-6.8956996],
[107.5606697,-6.8965144],
[107.561225,-6.8974171],
[107.5615853,-6.8971515],
[107.5618964,-6.8967574],
[107.5615853,-6.8971515],
[107.561225,-6.8974171],
[107.5613001,-6.8975396],
[107.5615048,-6.8974124],
[107.5613001,-6.8975396],
[107.5615254,-6.897915],
[107.5622451,-6.8974817],
[107.562449,-6.8975935],
[107.5624651,-6.8976841],
[107.562288,-6.8978598],
[107.5624651,-6.8976841],
[107.562449,-6.8975935],
[107.5622451,-6.8974817],
[107.5615254,-6.897915],
[107.5619116,-6.8985328],
[107.5621378,-6.8983551],
[107.5619116,-6.8985328],
[107.5620162,-6.8987299],
[107.5621088,-6.8986744],
[107.5620162,-6.8987299],
[107.5625634,-6.8996299],
[107.5629886,-6.8992762],
[107.5632032,-6.8989993],
[107.5634816,-6.8983231],
[107.56331,-6.8982086],
[107.5631624,-6.898198],
[107.5630498,-6.8980702],
[107.5628835,-6.8981447],
[107.5627816,-6.8983311],
[107.5628835,-6.8981447],
[107.5630498,-6.8980702],
[107.5631281,-6.8979182],
[107.5631651,-6.8978904],
[107.5633797,-6.8980076],
[107.56331,-6.8982086],
[107.5633797,-6.8980076],
[107.5631651,-6.8978904],
[107.5632751,-6.8976588],
[107.564702,-6.8986493],
[107.5641661,-6.8996427],
[107.5642117,-6.8997129],
[107.564378,-6.8998451],
[107.5646194,-6.8994931],
[107.5643512,-6.8993173],
[107.5646194,-6.8994931],
[107.5647642,-6.8992588],
[107.5644906,-6.899083],
[107.5647642,-6.8992588],
[107.5649922,-6.8988972],
[107.564702,-6.8986493],
[107.5649922,-6.8988972],
[107.5652202,-6.8990516],
[107.5649834,-6.8994079],
[107.5647642,-6.8992588],
[107.5649861,-6.8994079],
[107.5648574,-6.8996475],
[107.5646194,-6.8994931],
[107.5648574,-6.8996502],
[107.5646033,-6.9000368],
[107.5643699,-6.8998744],
[107.564378,-6.8998451],
[107.5643699,-6.8998744],
[107.5644263,-6.8999223],
[107.56404,-6.9004975],
[107.5645309,-6.9007957],
[107.5647079,-6.9005507],
[107.5646489,-6.9004629],
[107.5642385,-6.9001993],
[107.5646489,-6.9004629],
[107.5647079,-6.9005507],
[107.5649171,-6.9002392],
[107.5646033,-6.9000368],
[107.5649171,-6.9002392],
[107.5655255,-6.8992671],
[107.5652202,-6.8990516],
[107.5655255,-6.8992671],
[107.5651183,-6.8999223],
[107.5655742,-6.9002392],
[107.5652999,-6.9000443],
[107.5656735,-6.8995149],
[107.5654796,-6.8993493],
[107.5656735,-6.8995149],
[107.5658688,-6.8996505],
[107.5658237,-6.899688],
[107.5654991,-6.9001913],
[107.5658237,-6.899688],
[107.5658688,-6.8996505],
[107.5659544,-6.8996715],
[107.5664589,-6.9000073],
[107.5660753,-6.899757],
[107.5662121,-6.8996239],
[107.5667267,-6.8999008],
[107.566869,-6.8996741],
[107.5667993,-6.8995809],
[107.5664962,-6.8993839],
[107.5665605,-6.8993014],
[107.5664935,-6.8993839],
[107.566287,-6.8992534],
[107.5663781,-6.8992028],
[107.5662789,-6.8992561],
[107.5659168,-6.8990324],
[107.5659544,-6.8988939],
[107.5663325,-6.8980099],
[107.5676199,-6.8984603],
[107.5663325,-6.8980099],
[107.5651229,-6.8975945],
[107.5649592,-6.8979407],
[107.5649405,-6.8980845],
[107.5653052,-6.8982549],
[107.5649405,-6.8980845],
[107.5648198,-6.8984067],
[107.5659168,-6.8990324],
[107.5648036,-6.8984017],
[107.563999,-6.8978315],
[107.5648036,-6.8984017],
[107.5649405,-6.8980845],
[107.5649592,-6.8979407],
[107.5651229,-6.8975945],
[107.5636133,-6.8970598],
[107.5649061,-6.8975232],
[107.5654991,-6.8962457],
[107.5655847,-6.896221],
[107.5683769,-6.897161],
[107.5669875,-6.896703],
[107.5671217,-6.8963249],
[107.5669849,-6.8962583],
[107.5669527,-6.8962264],
[107.5666764,-6.8960719],
[107.5667461,-6.8958616],
[107.5665664,-6.895779],
[107.5663358,-6.8961145],
[107.5662124,-6.896253],
[107.5663358,-6.8961145],
[107.5665664,-6.895779],
[107.566612,-6.8956272],
[107.5667087,-6.8956643],
[107.5668133,-6.8956696],
[107.5668749,-6.8956991],
[107.566808,-6.8956696],
[107.5667087,-6.8956669],
[107.566612,-6.8956272],
[107.5665745,-6.8957338],
[107.5662687,-6.895582],
[107.5665745,-6.8957338],
[107.5665986,-6.8956619],
[107.5666469,-6.8955607],
[107.5664591,-6.8954329],
[107.5664053,-6.8953507],
[107.5661614,-6.8952065],
[107.5661105,-6.8952518],
[107.5660675,-6.8953796],
[107.5665986,-6.8956619],
[107.5660675,-6.8953796],
[107.5658852,-6.8960134],
[107.5659442,-6.8960293],
[107.5660622,-6.896048],
[107.5659442,-6.8960293],
[107.5658852,-6.8960134],
[107.5657618,-6.8962716],
[107.5658852,-6.8960134],
[107.56603,-6.8955234],
[107.5655177,-6.8952598],
[107.5656006,-6.895143],
[107.5656275,-6.8950498],
[107.5656572,-6.8949509],
[107.5656275,-6.8950524],
[107.5656033,-6.8951456],
[107.5655177,-6.8952598],
[107.5653834,-6.8955823],
[107.5652627,-6.8957927],
[107.5653807,-6.8955823],
[107.5655177,-6.8952598],
[107.56603,-6.8955234],
[107.5660675,-6.8953796],
[107.5661105,-6.8952518],
[107.5661614,-6.8952065],
[107.566384,-6.8948391],
[107.5662164,-6.8947043],
[107.5658945,-6.8951064],
[107.5662164,-6.8947043],
[107.5659696,-6.8944886],
[107.5659015,-6.8944306],
[107.5660249,-6.8942122],
[107.5659015,-6.8944306],
[107.5658372,-6.8943746],
[107.5659364,-6.8941909],
[107.5658372,-6.8943746],
[107.565408,-6.8940311],
[107.5652632,-6.8942175],
[107.565408,-6.8940311],
[107.5652685,-6.8939273],
[107.5650996,-6.8941802],
[107.5652685,-6.8939273],
[107.5651934,-6.8939033],
[107.5652873,-6.8937995],
[107.5651934,-6.8939033],
[107.565003,-6.8937116],
[107.5649279,-6.8937995],
[107.565003,-6.8937116],
[107.564906,-6.8936092],
[107.5647509,-6.8938474],
[107.5646677,-6.8940684],
[107.5644421,-6.8944663],
[107.5646677,-6.8940684],
[107.5643137,-6.8938607],
[107.5644075,-6.8936983],
[107.5643137,-6.8938607],
[107.5641259,-6.8941163],
[107.5643137,-6.8938607],
[107.5641179,-6.8937276],
[107.5639596,-6.8940045],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5662883,-6.891759],
[107.5668064,-6.892011],
[107.5668571,-6.8920704],
[107.5668544,-6.8921343],
[107.566892,-6.8922196],
[107.5668893,-6.8922835],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5667062,-6.8939379],
[107.566732,-6.8939459],
[107.5671595,-6.8942308],
[107.5672909,-6.8939912],
[107.5671595,-6.8942308],
[107.5674331,-6.8944066],
[107.5675672,-6.8942016],
[107.5676959,-6.8940631],
[107.5675672,-6.8942016],
[107.5674331,-6.8944066],
[107.5680124,-6.8948007],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.566384,-6.8948391],
[107.5667287,-6.8951037],
[107.5668977,-6.894896],
[107.5670774,-6.8946936],
[107.5668548,-6.8945498],
[107.5670774,-6.8946936],
[107.5672866,-6.8948241],
[107.5671659,-6.895101],
[107.5672165,-6.8951513],
[107.5670533,-6.8953726],
[107.5667287,-6.8951037],
[107.5674365,-6.8956839],
[107.5671217,-6.8963249],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5651612,-6.9012271],
[107.565019,-6.9014638],
[107.5653543,-6.9016398],
[107.5656493,-6.9017889],
[107.5656433,-6.9018904],
[107.5653463,-6.9023827],
[107.5650861,-6.9022496],
[107.5650378,-6.9021324],
[107.5651337,-6.9020022],
[107.5653543,-6.9016398],
[107.5651987,-6.9019114],
[107.5655126,-6.9020739],
[107.5651987,-6.9019114],
[107.565078,-6.9020632],
[107.564834,-6.9017274],
[107.565019,-6.9014638],
[107.564834,-6.9017274],
[107.5646569,-6.9018845],
[107.5647749,-6.9020579],
[107.5646569,-6.9018845],
[107.5641137,-6.9021941],
[107.5637918,-6.9016483],
[107.5641137,-6.9021941],
[107.5644034,-6.9026734],
[107.5647165,-6.9024614],
[107.5648849,-6.9026914],
[107.5648795,-6.9027712],
[107.5645911,-6.9029797],
[107.5644034,-6.9026734],
[107.5645911,-6.9029797],
[107.5652,-6.9039968],
[107.5652759,-6.9039315],
[107.5655119,-6.9035215],
[107.5656982,-6.9031164],
[107.565843,-6.9027277],
[107.5659732,-6.9023392],
[107.5669281,-6.9003421],
[107.5668208,-6.9005605],
[107.5675236,-6.9008747],
[107.567309,-6.9007815],
[107.5669227,-6.9015643],
[107.5664721,-6.901322],
[107.5669227,-6.9015643],
[107.5664453,-6.9025416],
[107.5659732,-6.9023392],
[107.5664453,-6.9025416],
[107.5668798,-6.9027439],
[107.5670339,-6.9027383],
[107.5670622,-6.9025629],
[107.5674485,-6.9017427],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5672263,-6.8961199],
[107.5676715,-6.8964101],
[107.5676956,-6.8964767],
[107.5677788,-6.8965273],
[107.5678603,-6.8965123],
[107.5680296,-6.8964031],
[107.5676993,-6.8959798],
[107.5674529,-6.8956975],
[107.567702,-6.8959771],
[107.5680296,-6.8964031],
[107.5683001,-6.8968212],
[107.56844,-6.8969251],
[107.5685553,-6.8968878],
[107.5685925,-6.8968132],
[107.5686032,-6.8967493],
[107.5686036,-6.8967892],
[107.5684423,-6.8965895],
[107.5682733,-6.8963392],
[107.5682254,-6.896254],
[107.56798,-6.8959758],
[107.5681017,-6.8958999],
[107.56798,-6.8959758],
[107.5676742,-6.8956216],
[107.5673184,-6.8953753],
[107.5673067,-6.8952994],
[107.567642,-6.8947802],
[107.5673067,-6.8952994],
[107.5673184,-6.895378],
[107.5676742,-6.8956216],
[107.5680292,-6.8950265],
[107.5679219,-6.8952075],
[107.5681017,-6.8958999],
[107.5682254,-6.8961502],
[107.5682254,-6.896254],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5637852,-6.8990676],
[107.5635867,-6.898977],
[107.5634794,-6.8989717],
[107.5632751,-6.8988477],
[107.5634794,-6.8989717],
[107.5629859,-6.899664],
[107.5630771,-6.899925],
[107.5628772,-6.9001332],
[107.5625634,-6.8996299],
[107.5628772,-6.9001332],
[107.5637918,-6.9016483],
[107.5639374,-6.9015708],
[107.5642378,-6.9011448],
[107.5645309,-6.9007957],
[107.5651612,-6.9012271],
[107.5655152,-6.900657],
[107.5655126,-6.9006064],
[107.5649171,-6.9002392],
[107.5655126,-6.9006064],
[107.5656708,-6.9004946],
[107.5657566,-6.9004946],
[107.5662609,-6.9007475],
[107.5661911,-6.9007103],
[107.5664406,-6.9001564],
[107.5665184,-6.9002097],
[107.5664406,-6.9001564],
[107.5661214,-6.8999221],
[107.566159,-6.8999514],
[107.5657566,-6.9004946],
[107.5659578,-6.900223],
[107.5660544,-6.9002709],
[107.5661885,-6.8999753],
[107.5660544,-6.9002709],
[107.5659149,-6.9005745],
]])
def jalanKelurahanCiroyom(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5840308,-6.915249],
[107.5842615,-6.9142664],
[107.5845351,-6.9142771],
[107.5845431,-6.9142478],
[107.5845807,-6.9137019],
[107.5848891,-6.9136514],
[107.5848918,-6.9135901],
[107.584983,-6.9135795],
[107.5850367,-6.913204],
[107.584924,-6.9132067],
[107.5850367,-6.913204],
[107.5851118,-6.9129031],
[107.5851359,-6.9128153],
[107.5852083,-6.9126022],
[107.58516,-6.9125836],
[107.5852405,-6.9123386],
[107.5844621,-6.9118109],
[107.5852405,-6.9123386],
[107.5857367,-6.9126875],
[107.5855785,-6.9130576],
[107.585439,-6.9135209],
[107.5849991,-6.9134596],
[107.585439,-6.9135235],
[107.5854014,-6.9137259],
[107.5854014,-6.9139815],
[107.5852915,-6.9139815],
[107.5854309,-6.9147564],
[107.5852888,-6.9148203],
[107.585321,-6.9151558],
[107.5853103,-6.9154833],
[107.5851842,-6.9154966],
[107.5851815,-6.9154327],
[107.5851842,-6.9154966],
[107.5851815,-6.9156191],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5850876,-6.9184706],
[107.586536,-6.9186623],
[107.5878127,-6.9187848],
[107.5878556,-6.918199],
[107.5879307,-6.918199],
[107.5878556,-6.918199],
[107.5878127,-6.9187848],
[107.5884028,-6.9188594],
[107.5889982,-6.9189233],
[107.5901462,-6.9190724],
[107.5917019,-6.9192747],
[107.5923242,-6.919344],
[107.5929357,-6.9194292],
[107.5930215,-6.9188221],
[107.5917829,-6.918673],
[107.5915737,-6.9185186],
[107.5914717,-6.9183109],
[107.5917185,-6.9181937],
[107.591461,-6.9183162],
[107.5914825,-6.9181511],
[107.5914771,-6.9183109],
[107.5909299,-6.9183748],
[107.5907529,-6.9181937],
[107.5901896,-6.9185186],
[107.590136,-6.9186624],
[107.5901462,-6.9190724],
[107.590136,-6.9186624],
[107.5901896,-6.9185186],
[107.590256,-6.91848],
[107.5901574,-6.9182363],
[107.5893079,-6.9181977],
[107.5893474,-6.9178156],
[107.5892938,-6.9183269],
[107.5895995,-6.9183641],
[107.5896371,-6.918215],
[107.5895995,-6.9183641],
[107.5895942,-6.9185399],
[107.5895995,-6.9183641],
[107.5892938,-6.9183269],
[107.5890631,-6.9183002],
[107.5889982,-6.9189233],
[107.5890631,-6.9183002],
[107.5885267,-6.9182363],
[107.5884028,-6.9188594],
[107.5885267,-6.9182363],
[107.5881994,-6.9181724],
[107.58806,-6.9181937],
[107.5881994,-6.9181724],
[107.5882316,-6.9177357],
[107.5879098,-6.9176931],
[107.5882316,-6.9177357],
[107.5881994,-6.9181724],
[107.5885267,-6.9182363],
[107.5887124,-6.9182576],
[107.5888163,-6.9172991],
[107.5889773,-6.9172298],
[107.5889987,-6.9171074],
[107.5889773,-6.9172298],
[107.5888163,-6.9172991],
[107.5887626,-6.9172586],
[107.5884622,-6.9172347],
[107.5878239,-6.9171415],
[107.5872043,-6.9170935],
[107.5870353,-6.9171415],
[107.5867617,-6.9171441],
[107.5866705,-6.9171388],
[107.5866437,-6.9173145],
[107.5865364,-6.9173465],
[107.5864774,-6.9174743],
[107.5865042,-6.9175968],
[107.5865525,-6.9177672],
[107.5865632,-6.918108],
[107.586536,-6.9186623],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5851589,-6.9176585],
[107.5852342,-6.9176629],
[107.5852879,-6.9171677],
[107.5852342,-6.9176629],
[107.5861998,-6.9177375],
[107.586232,-6.9173647],
[107.5854917,-6.9172955],
[107.5853844,-6.9180304],
[107.5861569,-6.9180996],
[107.5861998,-6.9177375],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5930215,-6.9188221],
[107.5931774,-6.9177051],
[107.5931077,-6.9177158],
[107.5931774,-6.9177051],
[107.5932954,-6.9170661],
[107.5933169,-6.9165122],
[107.5927697,-6.9164803],
[107.5928233,-6.9160383],
[107.592877,-6.915969],
[107.5928877,-6.915772],
[107.592877,-6.915969],
[107.5929521,-6.915985],
[107.5929574,-6.9158226],
[107.5929521,-6.915985],
[107.5932149,-6.9160289],
[107.5929521,-6.915985],
[107.592877,-6.915969],
[107.5928233,-6.9160383],
[107.5927697,-6.9164803],
[107.5913213,-6.9164962],
[107.5910531,-6.9168104],
[107.5909136,-6.917966],
[107.5907529,-6.9181937],
[107.5909136,-6.917966],
[107.590974,-6.9174628],
[107.5910531,-6.9168104],
[107.5910531,-6.9164962],
[107.5913213,-6.9164962],
[107.5910531,-6.9164962],
[107.5904737,-6.9164536],
[107.5904415,-6.9166986],
[107.5904737,-6.9164536],
[107.590066,-6.9164004],
[107.5899534,-6.9171459],
[107.5898944,-6.9177424],
[107.5898622,-6.9182163],
[107.589889,-6.9177371],
[107.589948,-6.9171353],
[107.5900017,-6.9168477],
[107.5898085,-6.9168477],
[107.5898085,-6.9167359],
[107.589712,-6.9165921],
[107.5897066,-6.9163738],
[107.590066,-6.9164004],
[107.5897066,-6.9163738],
[107.5894223,-6.9163205],
[107.5894062,-6.9164749],
[107.5893794,-6.9166187],
[107.5894545,-6.9166773],
[107.5894598,-6.9167146],
[107.5894116,-6.9167998],
[107.5894759,-6.9168158],
[107.5894545,-6.9169223],
[107.5896261,-6.916885],
[107.5898085,-6.9168477],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5890883,-6.9137554],
[107.589024,-6.9145329],
[107.5891151,-6.9147299],
[107.5900646,-6.9159441],
[107.5902953,-6.9157844],
[107.5908371,-6.915827],
[107.5902953,-6.9157844],
[107.5904884,-6.9155501],
[107.5903972,-6.9156619],
[107.5899627,-6.91514],
[107.5903972,-6.9156619],
[107.5902953,-6.9157844],
[107.5900646,-6.9159441],
[107.5905314,-6.9164554],
[107.5910531,-6.9164962],
[107.5913213,-6.9164962],
[107.5918456,-6.9164926],
[107.5916793,-6.9158696],
[107.5913467,-6.9155394],
[107.5915238,-6.9153264],
[107.5915881,-6.915124],
[107.5917705,-6.9149057],
[107.5920924,-6.9152039],
[107.5917652,-6.914911],
[107.5913414,-6.9146021],
[107.5906923,-6.9142666],
[107.590923,-6.914224],
[107.5916203,-6.9142453],
[107.5909283,-6.9142187],
[107.5906869,-6.914272],
[107.5905367,-6.9142933],
[107.5904026,-6.9146075],
[107.5904455,-6.9145169],
[107.5911965,-6.9149589],
[107.5910249,-6.9152838],
[107.5913414,-6.9155501],
[107.5910356,-6.9152891],
[107.5905099,-6.914895],
[107.590585,-6.9149589],
[107.5907459,-6.9147033],
[107.5904509,-6.9145169],
[107.5901183,-6.9143465],
[107.5894048,-6.9140909],
[107.5890669,-6.9139844],
[107.589024,-6.9145436],
[107.5885358,-6.9141388],
[107.5880315,-6.9145649],
[107.5880047,-6.914927],
[107.5886163,-6.9150761],
[107.5891205,-6.9147299],
[107.5886216,-6.9150708],
[107.5881335,-6.9154702],
[107.5884178,-6.916237],
[107.5894223,-6.9163205],
[107.5890642,-6.9162929],
[107.5890964,-6.9164634],
[107.5890695,-6.9166178],
[107.5890481,-6.9168255],
[107.5890374,-6.9168628],
[107.5886833,-6.9169586],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5884622,-6.9172347],
[107.5884771,-6.9170196],
[107.5883752,-6.916945],
[107.5883591,-6.9167347],
[107.5883645,-6.9162341],
[107.5884178,-6.916237],
[107.5883672,-6.9162288],
[107.5879675,-6.9161729],
[107.5879756,-6.916314],
[107.5879407,-6.9166016],
[107.5879622,-6.9168359],
[107.5879434,-6.9166016],
[107.5879783,-6.9163113],
[107.5879648,-6.9161702],
[107.5876081,-6.9161409],
[107.5876403,-6.916543],
[107.587702,-6.9168332],
[107.5877127,-6.9170169],
[107.5878415,-6.9170382],
[107.5878239,-6.9171415],
[107.5878361,-6.9170382],
[107.5877047,-6.9170089],
[107.5877047,-6.9168279],
[107.5876376,-6.916535],
[107.5876081,-6.9161382],
[107.5875732,-6.9161329],
[107.5875893,-6.9158533],
[107.5883001,-6.9159252],
[107.5878924,-6.9158853],
[107.58793,-6.9157388],
[107.5881335,-6.9154702],
[107.58793,-6.9157335],
[107.5878897,-6.915888],
[107.5875866,-6.915856],
[107.587769,-6.9154752],
[107.5880399,-6.9149427],
[107.5880047,-6.914927],
[107.5880315,-6.9145649],
[107.586959,-6.9154167],
[107.5869912,-6.9154353],
[107.5869885,-6.9154726],
[107.5870127,-6.9155152],
[107.5869697,-6.9158693],
[107.5869697,-6.9160797],
[107.5875786,-6.9161356],
[107.5869697,-6.916077],
[107.5867739,-6.9160664],
[107.5867364,-6.91633],
[107.5867364,-6.916559],
[107.5866908,-6.9166415],
[107.5867203,-6.9168572],
[107.5867471,-6.9170888],
[107.5867617,-6.9171441],
[107.5867444,-6.9170888],
[107.5867176,-6.9168519],
[107.5863153,-6.9168998],
[107.5862643,-6.9167879],
[107.5863153,-6.9169078],
[107.5863421,-6.9169956],
[107.5859881,-6.9169983],
[107.5853631,-6.9169424],
[107.58517,-6.9168998],
[107.5853631,-6.9169424],
[107.5853792,-6.9166974],
[107.5855213,-6.9167081],
[107.5853846,-6.9166974],
[107.5851861,-6.9167001],
[107.5853792,-6.9166974],
[107.5853872,-6.916543],
[107.5857359,-6.9165989],
[107.5853872,-6.9165377],
[107.5851941,-6.916527],
[107.5853899,-6.916543],
[107.5854087,-6.9163859],
[107.5851995,-6.9163513],
[107.5854141,-6.9163832],
[107.5854275,-6.9162208],
[107.5855991,-6.9158294],
[107.5852236,-6.9157149],
[107.5856018,-6.9158294],
[107.5861222,-6.9159865],
[107.586149,-6.9163273],
[107.5862268,-6.916322],
[107.5862965,-6.9163646],
[107.5864521,-6.9163486],
[107.5865674,-6.9163752],
[107.5867418,-6.916338],
[107.5865647,-6.9163779],
[107.5864494,-6.9163486],
[107.5862938,-6.9163646],
[107.5862187,-6.916322],
[107.5861436,-6.9163246],
[107.5861302,-6.9164737],
[107.5859344,-6.9164498],
[107.5857735,-6.9164072],
[107.5854087,-6.9163859],
[107.5854302,-6.9162155],
[107.5856233,-6.9162794],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5834349,-6.9110762],
[107.5836306,-6.9112092],
[107.5834267,-6.9116352],
[107.583239,-6.9115606],
[107.5834267,-6.9116352],
[107.5836306,-6.9112092],
[107.5838023,-6.9113423],
[107.583636,-6.9116299],
[107.5838023,-6.9113423],
[107.5844621,-6.9118109],
[107.5844192,-6.9119707],
[107.584505,-6.9122316],
[107.5845694,-6.9123754],
[107.5845425,-6.9126044],
[107.5840544,-6.9125512],
[107.5839471,-6.9128334],
[107.5845479,-6.9128387],
[107.5839471,-6.9128334],
[107.5837969,-6.9131689],
[107.5847035,-6.9132062],
[107.5837969,-6.9131689],
[107.5836441,-6.9139414],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5830862,-6.9138388],
[107.5831746,-6.913342],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5853103,-6.9154833],
[107.5856881,-6.9154143],
[107.5855915,-6.91514],
[107.5856881,-6.9154143],
[107.5856881,-6.9155794],
[107.5856291,-6.9158377],
[107.5857927,-6.9158829],
[107.5859697,-6.9153903],
[107.5856881,-6.9154143],
[107.5859697,-6.9153903],
[107.5859992,-6.9152945],
[107.586077,-6.9152732],
[107.5866376,-6.9153664],
[107.586959,-6.9154167],
[107.5866376,-6.9153664],
[107.5868307,-6.9147646],
[107.5871445,-6.913798],
[107.5879277,-6.9140909],
[107.5883381,-6.9142986],
[107.5885358,-6.9141388],
[107.5882496,-6.9139285],
[107.5878311,-6.9137395],
[107.5876434,-6.9136915],
[107.5872169,-6.9135105],
[107.5871445,-6.913798],
[107.5872169,-6.9135105],
[107.5868736,-6.9133587],
[107.5867851,-6.9136543],
[107.5868092,-6.9137315],
[107.5866376,-6.9145409],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5867739,-6.9160664],
[107.5861222,-6.9159865],
[107.5861651,-6.9159929],
[107.5861503,-6.9157705],
[107.5861288,-6.9152753],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5857367,-6.9126875],
[107.5863523,-6.9130875],
[107.5867492,-6.9133005],
[107.5868736,-6.9133587],
[107.5867546,-6.9133005],
[107.5866473,-6.9135562],
[107.5867546,-6.9132952],
[107.5863523,-6.9130822],
[107.5861162,-6.9132846],
[107.5861377,-6.9135189],
[107.5860411,-6.9135402],
[107.5859285,-6.9148236],
[107.5857783,-6.9148449],
[107.585789,-6.9149887],
]])
def jalanKelurahanDungusCariang(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5767156,-6.9174426],
[107.5770075,-6.9174589],
[107.5770182,-6.9173577],
[107.5770075,-6.9174589],
[107.5780053,-6.917576],
[107.5781233,-6.9171447],
[107.5780053,-6.917576],
[107.5781286,-6.9175973],
[107.5787509,-6.9176825],
[107.5788689,-6.9176879],
[107.5789923,-6.9177305],
[107.580607,-6.9179169],
[107.5812024,-6.9180074],
[107.5820017,-6.9180926],
[107.5820178,-6.9178796],
[107.5820822,-6.9177092],
[107.5820178,-6.9178796],
[107.5820017,-6.9180926],
[107.5826938,-6.9181831],
[107.5830907,-6.9182417],
[107.5843996,-6.9183802],
[107.5850876,-6.9184706],
[107.5849951,-6.9184494],
[107.5850648,-6.9176399],
[107.5851589,-6.9176585],
[107.5850648,-6.9176399],
[107.5850809,-6.9172938],
[107.5844104,-6.9173257],
[107.5839222,-6.9172885],
[107.5844104,-6.9173257],
[107.5844265,-6.9170808],
[107.5844104,-6.9173257],
[107.5843996,-6.9183802],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5850809,-6.9172938],
[107.5851171,-6.9168794],
[107.58517,-6.9168998],
[107.5851171,-6.9168794],
[107.5851117,-6.9166984],
[107.5851861,-6.9167001],
[107.5851117,-6.9166984],
[107.5851225,-6.9165173],
[107.5851941,-6.916527],
[107.5851225,-6.9165173],
[107.5851332,-6.9163362],
[107.5851995,-6.9163513],
[107.5851332,-6.9163362],
[107.5851868,-6.9157025],
[107.5852236,-6.9157149],
[107.5843232,-6.9154256],
[107.5842749,-6.9155747],
[107.5842856,-6.9157025],
[107.5842749,-6.9155747],
[107.5843232,-6.9154256],
[107.5838725,-6.9152552],
[107.5838243,-6.9156812],
[107.5838296,-6.915596],
[107.5836687,-6.9155747],
[107.5836633,-6.9154416],
[107.5836204,-6.915399],
[107.5836687,-6.9151966],
[107.5838725,-6.9152552],
[107.5836687,-6.9151966],
[107.5835399,-6.9151806],
[107.583497,-6.915367],
[107.5834058,-6.915383],
[107.5833576,-6.9156013],
[107.5834058,-6.915383],
[107.583497,-6.915367],
[107.5835399,-6.9151806],
[107.5832878,-6.9150741],
[107.5834756,-6.9144138],
[107.5835882,-6.9139931],
[107.5836366,-6.9139975],
[107.5835882,-6.9139931],
[107.5830303,-6.9138653],
[107.5824724,-6.9137268],
[107.5824724,-6.9136576],
[107.5825368,-6.91337],
[107.5826173,-6.9131463],
[107.5827138,-6.9127842],
[107.5828587,-6.9123102],
[107.5830089,-6.9118842],
[107.5831913,-6.9115221],
[107.583239,-6.9115606],
[107.5831913,-6.9115221],
[107.5833844,-6.9110481],
[107.5834349,-6.9110762],
[107.5833844,-6.9110481],
[107.5826226,-6.9105635],
[107.5824402,-6.9109043],
[107.5826226,-6.9105635],
[107.5821613,-6.9102386],
[107.5821291,-6.9103398],
[107.5821184,-6.910654],
[107.5819789,-6.9108883],
[107.5819789,-6.9110747],
[107.5819789,-6.9108883],
[107.5821184,-6.910654],
[107.5821291,-6.9103398],
[107.5821613,-6.9102386],
[107.581657,-6.9099351],
[107.5815229,-6.9098446],
[107.5813566,-6.9101801],
[107.581244,-6.9105315],
[107.5813674,-6.9105688],
[107.581244,-6.9105315],
[107.5813566,-6.9101801],
[107.5815229,-6.9098446],
[107.5813888,-6.9096901],
[107.5811528,-6.9100097],
[107.5809811,-6.9103185],
[107.5810348,-6.9104037],
[107.5810026,-6.9104676],
[107.5806217,-6.9102173],
[107.5810079,-6.9096262],
[107.5810723,-6.9096262],
[107.5811313,-6.909525],
[107.5813888,-6.9096901],
[107.5811313,-6.909525],
[107.5807236,-6.9092907],
[107.5804983,-6.9091096],
[107.5801443,-6.9088807],
[107.5798814,-6.9087369],
[107.5790499,-6.9081351],
[107.5798814,-6.9087369],
[107.5798653,-6.9088487],
[107.5797419,-6.9088221],
[107.579581,-6.9087102],
[107.5793289,-6.9085451],
[107.5791519,-6.9084493],
[107.578889,-6.908428],
[107.5788085,-6.908412],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5788757,-6.9112659],
[107.5781462,-6.9110636],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5782803,-6.9117612],
[107.5780013,-6.9117026],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.576847,-6.9168861],
[107.5773318,-6.9169331],
[107.5774338,-6.9168479],
[107.5775947,-6.9164166],
[107.5776752,-6.9162408],
[107.5777663,-6.9157562],
[107.57782,-6.9154633],
[107.5772031,-6.9153408],
[107.57782,-6.9154633],
[107.5778736,-6.9154101],
[107.5782062,-6.915458],
[107.5785549,-6.9156124],
[107.5784798,-6.9159053],
[107.5781633,-6.9158468],
[107.5780507,-6.9158042],
[107.5777663,-6.9157562],
[107.5780507,-6.9158042],
[107.5781687,-6.9158468],
[107.5784798,-6.9159053],
[107.5784369,-6.9160544],
[107.5782277,-6.9160065],
[107.5784369,-6.9160544],
[107.578394,-6.9163154],
[107.5790324,-6.9164805],
[107.5791128,-6.9164059],
[107.5791718,-6.9162621],
[107.5791128,-6.9164059],
[107.5790324,-6.9164805],
[107.578394,-6.9163154],
[107.5782491,-6.9169065],
[107.5781286,-6.9175973],
[107.5787509,-6.9176825],
[107.5788689,-6.9176879],
[107.5789465,-6.9173006],
[107.5789519,-6.9170556],
[107.5782491,-6.9169065],
[107.5789519,-6.9170556],
[107.5789465,-6.9173006],
[107.5790377,-6.9171941],
[107.5793274,-6.9163527],
[107.5796546,-6.9164539],
[107.5796707,-6.9165391],
[107.5798907,-6.9166083],
[107.5799336,-6.9165178],
[107.5801213,-6.9165444],
[107.5806578,-6.9167254],
[107.5807597,-6.9165444],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5775873,-6.9136243],
[107.5787955,-6.9139343],
[107.5786346,-6.9142752],
[107.5784951,-6.914813],
[107.5779962,-6.9147385],
[107.5778736,-6.9154101],
[107.5782062,-6.915458],
[107.5784951,-6.914813],
[107.5784522,-6.9149089],
[107.5786453,-6.9149834],
[107.5785702,-6.9153136],
[107.5785112,-6.9155905],
[107.5785549,-6.9156124],
[107.5791362,-6.9156811],
[107.5793561,-6.9157317],
[107.579289,-6.9159607],
[107.5793561,-6.9157317],
[107.5795412,-6.915753],
[107.5793274,-6.9163527],
[107.5794634,-6.9159793],
[107.5802734,-6.9162056],
[107.5802224,-6.9163654],
[107.5801313,-6.9164133],
[107.5800991,-6.9165464],
[107.5801313,-6.9164133],
[107.5802224,-6.9163654],
[107.5802734,-6.9162056],
[107.580724,-6.9163521],
[107.5807616,-6.9160938],
[107.5808179,-6.9158435],
[107.5803941,-6.9157184],
[107.5803244,-6.9159394],
[107.5807616,-6.9160938],
[107.5803244,-6.9159394],
[107.5795412,-6.915753],
[107.579686,-6.9151405],
[107.5793936,-6.915058],
[107.5793695,-6.9151166],
[107.5791013,-6.9150553],
[107.5790423,-6.9152311],
[107.5791013,-6.9150553],
[107.5793695,-6.9151166],
[107.5792971,-6.9153722],
[107.5792408,-6.9154161],
[107.5791388,-6.9156811],
[107.5792408,-6.9154161],
[107.5785729,-6.915311],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5805838,-6.9179197],
[107.5806643,-6.917696],
[107.5807501,-6.917704],
[107.5808225,-6.9175176],
[107.5808252,-6.9174084],
[107.5808494,-6.9173445],
[107.5807635,-6.9173099],
[107.5810344,-6.9166682],
[107.5808157,-6.9164871],
[107.580821,-6.9163966],
[107.580724,-6.9163521],
[107.580821,-6.9163966],
[107.5810249,-6.9160664],
[107.5814219,-6.9162155],
[107.5812234,-6.9166469],
[107.5811697,-6.9167427],
[107.5810344,-6.9166682],
[107.5811697,-6.9167427],
[107.5812234,-6.9166469],
[107.5817705,-6.9169078],
[107.5818564,-6.9169078],
[107.5819958,-6.9169558],
[107.5820441,-6.9168386],
[107.5819958,-6.9169558],
[107.5822426,-6.9170303],
[107.5821729,-6.9172593],
[107.5822426,-6.9170303],
[107.5825055,-6.9171102],
[107.5828166,-6.9171475],
[107.5827361,-6.9177066],
[107.5826938,-6.9181831],
[107.5830907,-6.9182417],
[107.5831224,-6.9178877],
[107.5829829,-6.9178717],
[107.5829346,-6.9177705],
[107.5830634,-6.9168812],
[107.5831331,-6.9168066],
[107.5834496,-6.9168546],
[107.5837768,-6.9168919],
[107.5838144,-6.9169717],
[107.5837071,-6.9180634],
[107.5833745,-6.9180315],
[107.5833289,-6.9179503],
[107.5833316,-6.9179023],
[107.5831224,-6.9178877],
[107.5833316,-6.9179023],
[107.583455,-6.9168546],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5851107,-6.9168255],
[107.5844186,-6.9166924],
[107.5836247,-6.9165273],
[107.5833779,-6.9164634],
[107.5827449,-6.9164101],
[107.5827289,-6.9165273],
[107.582863,-6.9166125],
[107.5833833,-6.9166125],
[107.5835872,-6.9166604],
[107.5836247,-6.9165273],
[107.5844186,-6.9166924],
[107.5844669,-6.9163302],
[107.5846171,-6.9161012],
[107.5844669,-6.9163302],
[107.5837159,-6.9161598],
[107.5836408,-6.9161066],
[107.5834316,-6.9160746],
[107.5835764,-6.9154249],
[107.5836204,-6.915399],
[107.5835764,-6.9154249],
[107.5834316,-6.9160746],
[107.5833779,-6.9164634],
[107.5834316,-6.9160746],
[107.5830078,-6.9160001],
[107.5829059,-6.9160001],
[107.582525,-6.9158882],
[107.5824499,-6.9163462],
[107.5825481,-6.916367],
[107.5824499,-6.9163462],
[107.5822426,-6.9170303],
[107.5824499,-6.9163462],
[107.5819054,-6.9162051],
[107.5820422,-6.9157311],
[107.5820207,-6.9156992],
[107.5817042,-6.9156459],
[107.5816882,-6.915606],
[107.5816935,-6.9155421],
[107.5816399,-6.9154888],
[107.5816506,-6.9154356],
[107.5816962,-6.9152811],
[107.5816372,-6.9152172],
[107.5817203,-6.9147885],
[107.5820181,-6.914887],
[107.5820663,-6.9148657],
[107.5822541,-6.9149084],
[107.582187,-6.9152944],
[107.5822541,-6.9149084],
[107.5822863,-6.9147726],
[107.5825277,-6.9148444],
[107.5824901,-6.9150681],
[107.5825277,-6.9148444],
[107.5825786,-6.9148365],
[107.5825974,-6.9146075],
[107.5825786,-6.9148365],
[107.5830588,-6.9150015],
[107.583107,-6.9150042],
[107.5832116,-6.914674],
[107.583107,-6.9150042],
[107.5832878,-6.9150741],
[107.583107,-6.9150042],
[107.5830185,-6.9151187],
[107.58293,-6.9154888],
[107.5826511,-6.9154249],
[107.5825921,-6.9155927],
[107.582525,-6.9158882],
[107.5825062,-6.9159921],
[107.582187,-6.9159175],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5824724,-6.9137268],
[107.5823858,-6.9138174],
[107.5823192,-6.9143792],
[107.5822863,-6.9147726],
[107.5811283,-6.9144964],
[107.5811604,-6.9141396],
[107.5812302,-6.9137615],
[107.5811604,-6.9141396],
[107.5811283,-6.9144964],
[107.5810263,-6.9150023],
[107.5812785,-6.9150236],
[107.5812892,-6.9150822],
[107.5813965,-6.9151035],
[107.5812892,-6.9150822],
[107.5811229,-6.9157478],
[107.5815735,-6.9158544],
[107.5811229,-6.9157478],
[107.5810249,-6.9160664],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5798653,-6.9088487],
[107.5798571,-6.9090062],
[107.5792724,-6.9088518],
[107.5794226,-6.9088891],
[107.5793904,-6.9090968],
[107.5793904,-6.9092778],
[107.5792885,-6.9092778],
[107.5793904,-6.9092778],
[107.5796748,-6.9092778],
[107.579723,-6.9093258],
[107.5798411,-6.9093311],
[107.5798571,-6.9090062],
[107.5798411,-6.9093311],
[107.5798303,-6.9094536],
[107.5796157,-6.9094163],
[107.5795567,-6.9093577],
[107.5795567,-6.9092778],
[107.5795567,-6.9093577],
[107.5796157,-6.9094163],
[107.5798303,-6.9094536],
[107.5798089,-6.9097891],
[107.5796587,-6.9097731],
[107.5796372,-6.909837],
[107.5795192,-6.9098051],
[107.5796372,-6.909837],
[107.5796587,-6.9097731],
[107.5798089,-6.9097891],
[107.5798035,-6.9100766],
[107.5792992,-6.9099808],
[107.5793743,-6.9097252],
[107.5792992,-6.9099808],
[107.5798035,-6.9100766],
[107.5797713,-6.9104494],
[107.578913,-6.9101885],
[107.5797713,-6.9104494],
[107.5798357,-6.9105027],
[107.5801844,-6.9106305],
[107.5802595,-6.9107157],
[107.5803453,-6.910737],
[107.5805545,-6.9102098],
[107.5806217,-6.9102173],
[107.5805545,-6.9102098],
[107.5802434,-6.9099968],
[107.5798089,-6.9097891],
[107.5798035,-6.9100766],
[107.5797713,-6.9104494],
[107.579723,-6.9109926],
[107.5796587,-6.9112482],
[107.5799001,-6.9113494],
[107.5796587,-6.9112482],
[107.5795246,-6.9116423],
[107.5796587,-6.9117062],
[107.5795246,-6.9116423],
[107.5794655,-6.9117595],
[107.5790847,-6.9115678],
[107.5794655,-6.9117595],
[107.5793851,-6.9120098],
[107.5789291,-6.9118127],
[107.5793851,-6.9120098],
[107.5793314,-6.9121163],
[107.5788379,-6.9119352],
[107.5793314,-6.9121163],
[107.5792456,-6.9123187],
[107.5789935,-6.9122122],
[107.5792456,-6.9123187],
[107.5792027,-6.9124465],
[107.5794226,-6.912521],
[107.5792027,-6.9124465],
[107.5790471,-6.9127074],
[107.5792134,-6.912782],
[107.5790471,-6.9127074],
[107.5789774,-6.9129897],
[107.5793422,-6.9130908],
[107.5789774,-6.9129897],
[107.5789345,-6.9132932],
[107.5793958,-6.9134263],
[107.5789345,-6.9132932],
[107.5788594,-6.9135755],
[107.5787955,-6.9139343],
[107.5797797,-6.9141558],
[107.5796748,-6.9141346],
[107.5800261,-6.9130243],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5810263,-6.9150023],
[107.5804959,-6.914912],
[107.5804422,-6.9149653],
[107.5803618,-6.9149413],
[107.5804047,-6.9147789],
[107.5803886,-6.9148375],
[107.5803618,-6.9149413],
[107.5803537,-6.9149893],
[107.580182,-6.9149307],
[107.5803537,-6.9149893],
[107.5803081,-6.915149],
[107.5797475,-6.9149919],
[107.579686,-6.9151405],
[107.5793936,-6.915058],
[107.5795088,-6.9147097],
[107.5795893,-6.9147283],
[107.5796161,-6.9146964],
[107.5797797,-6.9141558],
[107.5803027,-6.914289],
[107.5811283,-6.9144964],
[107.5807155,-6.9143921],
[107.5807963,-6.9141612],
[107.5807104,-6.9141079],
[107.5806407,-6.9141052],
[107.5807104,-6.9141079],
[107.5807963,-6.9141612],
[107.5808472,-6.9141159],
[107.5808633,-6.9139987],
[107.5805549,-6.9138922],
[107.5808633,-6.9139987],
[107.5808472,-6.9141159],
[107.5807963,-6.9141612],
[107.5807155,-6.9143921],
[107.5808855,-6.9144348],
[107.5809331,-6.9142304],
[107.5808855,-6.9144348],
[107.5807641,-6.9149546],
[107.5808855,-6.9144348],
[107.5803027,-6.914289],
[107.580233,-6.9144461],
[107.5801445,-6.9147762],
[107.5803886,-6.9148375],
[107.5801445,-6.9147762],
[107.5800426,-6.9148748],
[107.5797663,-6.9148295],
[107.5797207,-6.9148535],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.581657,-6.9099351],
[107.5813715,-6.9107838],
[107.5811623,-6.9114814],
[107.580878,-6.9114441],
[107.5807599,-6.911721],
[107.5804542,-6.9116358],
[107.5804864,-6.9115453],
[107.5804542,-6.9116358],
[107.5802986,-6.9121684],
[107.5803898,-6.912195],
[107.5802986,-6.9121684],
[107.5802611,-6.9122163],
[107.5794081,-6.9119607],
[107.5797568,-6.9120672],
[107.5796871,-6.9122323],
[107.5797568,-6.9120672],
[107.5802611,-6.9122163],
[107.5801377,-6.9126743],
[107.579907,-6.9126184],
[107.5801377,-6.9126743],
[107.5800261,-6.9130243],
[107.5797595,-6.9129512],
[107.5800261,-6.9130243],
[107.5801377,-6.9126743],
[107.5801752,-6.9125358],
[107.5804274,-6.9126876],
[107.5807358,-6.9127648],
[107.5809772,-6.911998],
[107.5805856,-6.9118542],
[107.5809826,-6.911998],
[107.5811703,-6.9119287],
[107.5813018,-6.9117983],
[107.5813339,-6.9117503],
[107.5813822,-6.9116598],
[107.5813339,-6.9117503],
[107.5810308,-6.9116598],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5799214,-6.9141892],
[107.5804274,-6.9126876],
[107.5807358,-6.9127648],
[107.580879,-6.9128259],
[107.5808548,-6.9129057],
[107.5807878,-6.912959],
[107.5808548,-6.9129057],
[107.580879,-6.9128232],
[107.5809192,-6.912714],
[107.5812572,-6.9127832],
[107.5811874,-6.9130415],
[107.5812572,-6.9127832],
[107.5826173,-6.9131463],
[107.5825368,-6.91337],
[107.5822335,-6.9133105],
[107.5821611,-6.9135661],
[107.5822335,-6.9133105],
[107.5813323,-6.9130522],
[107.5812089,-6.9130761],
[107.5811338,-6.9133318],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5827138,-6.9127842],
[107.5821254,-6.9126154],
[107.5810471,-6.9123038],
[107.5821227,-6.912618],
[107.5822756,-6.9122452],
[107.5815406,-6.9120322],
[107.5822756,-6.9122452],
[107.5824285,-6.9118911],
[107.5816989,-6.9116595],
]])
def jalanKelurahanGaruda(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5729636,-6.9155205],
[107.5731415,-6.9155822],
[107.5732327,-6.9156515],
[107.5733587,-6.9156834],
[107.5735492,-6.9157713],
[107.573348,-6.9159337],
[107.5730303,-6.9163057],
[107.5733507,-6.9159337],
[107.5735492,-6.9157713],
[107.5737476,-6.9158778],
[107.5734472,-6.9162719],
[107.5736323,-6.9164529],
[107.5737262,-6.9164503],
[107.5739113,-6.916184],
[107.5739971,-6.9162266],
[107.5739113,-6.916184],
[107.5737262,-6.9164503],
[107.5736323,-6.9164529],
[107.573446,-6.9166438],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5725292,-6.9158665],
[107.5729203,-6.9162178],
[107.5729766,-6.9161486],
[107.5729203,-6.9162178],
[107.5730303,-6.9163057],
[107.573446,-6.9166438],
[107.5738617,-6.9169766],
[107.5742453,-6.9170805],
[107.5743445,-6.9170166],
[107.5744813,-6.9170459],
[107.5745457,-6.9171391],
[107.5752914,-6.9173095],
[107.5758546,-6.9173521],
[107.5759914,-6.9169021],
[107.5758546,-6.9173521],
[107.5767156,-6.9174426],
[107.576847,-6.9168861],
[107.5765171,-6.9168409],
[107.5764528,-6.9167743],
[107.5764367,-6.9166438],
[107.5764528,-6.9167743],
[107.5765171,-6.9168409],
[107.576847,-6.9168861],
[107.5770321,-6.916074],
[107.5774452,-6.9142767],
[107.5775873,-6.9136243],
[107.577987,-6.9117897],
[107.5784376,-6.9098406],
[107.5783115,-6.909782],
[107.5783384,-6.9097048],
[107.5784751,-6.9097154],
[107.5784376,-6.9098406],
[107.5784751,-6.9097154],
[107.5786978,-6.9086637],
[107.5788963,-6.908227],
[107.5787271,-6.908121],
[107.5788963,-6.908227],
[107.5791012,-6.9079],
[107.5790556,-6.9079719],
[107.5781562,-6.9074106],
[107.5780409,-6.9076289],
[107.5780087,-6.9077701],
[107.5780409,-6.9076289],
[107.5778719,-6.9075544],
[107.5780409,-6.9076289],
[107.5781562,-6.9074106],
[107.5777807,-6.9071816],
[107.5776788,-6.9073813],
[107.5777807,-6.9071816],
[107.5775983,-6.9070431],
[107.5773999,-6.907344],
[107.5775983,-6.9070431],
[107.5771665,-6.9067955],
[107.5765281,-6.9063428],
[107.5761902,-6.9069686],
[107.5758039,-6.9068088],
[107.5761902,-6.9069686],
[107.5765281,-6.9063428],
[107.5761285,-6.9060872],
[107.5758549,-6.9065905],
[107.5761285,-6.9060872],
[107.5754177,-6.9056052],
[107.5751361,-6.9054375],
[107.5754177,-6.9056052],
[107.5751253,-6.9061591],
[107.5748947,-6.9064467],
[107.5750127,-6.9063002],
[107.5753909,-6.9065292],
[107.5754713,-6.9064307],
[107.5756403,-6.9064973],
[107.5759381,-6.9059607],
[107.5756832,-6.9057836],
[107.5755491,-6.9059967],
[107.5753936,-6.9062816],
[107.5751253,-6.9061591],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5737476,-6.9158778],
[107.5739852,-6.915599],
[107.5741636,-6.9152395],
[107.5735333,-6.9149932],
[107.5741636,-6.9152395],
[107.5743219,-6.9152648],
[107.5749656,-6.9137098],
[107.5743219,-6.9152648],
[107.5749334,-6.915595],
[107.5747617,-6.9160316],
[107.5741287,-6.9157707],
[107.5747617,-6.9160316],
[107.5743445,-6.9170166],
[107.5747617,-6.9160316],
[107.5744813,-6.9170459],
[107.5747617,-6.9160316],
[107.5749334,-6.915595],
[107.576017,-6.9158559],
[107.5765374,-6.9159731],
[107.5770321,-6.916074],
[107.5765374,-6.9159731],
[107.577106,-6.9135074],
[107.5775873,-6.9136243],
[107.577106,-6.9135074],
[107.5774118,-6.9121707],
[107.5773688,-6.9123465],
[107.5767197,-6.9121121],
[107.5770202,-6.9114784],
[107.5774547,-6.9116435],
[107.577987,-6.9117897],
[107.5774547,-6.9116435],
[107.577519,-6.9113133],
[107.5774547,-6.9116435],
[107.5770202,-6.9114784],
[107.5775459,-6.9105518],
[107.5775888,-6.91044],
[107.577562,-6.9103494],
[107.577283,-6.9101897],
[107.577562,-6.909721],
[107.5774064,-6.909982],
[107.577224,-6.9098861],
[107.5774815,-6.9094974],
[107.5776639,-6.9091938],
[107.5779643,-6.9087039],
[107.5781467,-6.9086133],
[107.5780179,-6.9085015],
[107.5779267,-6.9084589],
[107.5778034,-6.9083311],
[107.5779267,-6.9084589],
[107.5780233,-6.9085015],
[107.5781413,-6.9086133],
[107.5782915,-6.908608],
[107.5786978,-6.9086637],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.576017,-6.9158559],
[107.5761265,-6.9153624],
[107.5750911,-6.9150828],
[107.5761265,-6.9153624],
[107.5764025,-6.9140311],
[107.5758017,-6.9137967],
[107.5759612,-6.9138593],
[107.5762308,-6.9133175],
[107.577106,-6.9135074],
[107.5762308,-6.9133175],
[107.5764883,-6.9127583],
[107.577234,-6.91295],
[107.5764883,-6.9127583],
[107.5767197,-6.9121121],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5749334,-6.915595],
[107.5758022,-6.9131228],
[107.5753516,-6.9129897],
[107.575483,-6.9130269],
[107.5756734,-6.9125423],
[107.575483,-6.9130269],
[107.5758022,-6.9131228],
[107.5762308,-6.9133175],
[107.5758022,-6.9131228],
[107.5758558,-6.9130323],
[107.5766337,-6.9112536],
[107.5761723,-6.9110832],
[107.5766337,-6.9112536],
[107.5770202,-6.9114784],
[107.5766337,-6.9112536],
[107.5768107,-6.9109553],
[107.5767678,-6.9108701],
[107.5770306,-6.9104175],
[107.5767678,-6.9108701],
[107.5768107,-6.9109553],
[107.577283,-6.9101897],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5737868,-6.9084887],
[107.5746753,-6.9087599],
[107.5752063,-6.908909],
[107.574627,-6.9099368],
[107.5742139,-6.9097664],
[107.5746753,-6.9087599],
[107.5742139,-6.9097664],
[107.5734708,-6.9094448],
[107.5742139,-6.9097664],
[107.574627,-6.9099368],
[107.5750132,-6.9100859],
[107.5755121,-6.9092498],
[107.5756999,-6.9092924],
[107.5757857,-6.9090634],
[107.5754316,-6.9089196],
[107.5757857,-6.9090634],
[107.5756999,-6.9092924],
[107.5755121,-6.9092498],
[107.5750132,-6.9100859],
[107.5754263,-6.9101924],
[107.5760164,-6.9092764],
[107.5754263,-6.9101924],
[107.5752653,-6.9106504],
[107.5754263,-6.9101924],
[107.5766225,-6.9107037],
[107.5759024,-6.9103908],
[107.5762685,-6.9097983],
[107.576365,-6.9094788],
[107.5761934,-6.9093936],
[107.576306,-6.9090528],
[107.5764026,-6.9086054],
[107.5767567,-6.9086693],
[107.5768586,-6.9084457],
[107.5767567,-6.9086693],
[107.5770302,-6.9087119],
[107.5773253,-6.9082699],
[107.5770302,-6.9087119],
[107.5772448,-6.9087758],
[107.577336,-6.9087652],
[107.577395,-6.908648],
[107.577336,-6.9087652],
[107.5775238,-6.9087545],
[107.5779643,-6.9087039],
[107.5775238,-6.9087545],
[107.5770839,-6.9090208],
[107.5770517,-6.9092818],
[107.5770034,-6.9098516],
[107.577224,-6.9098861],
[107.5770034,-6.9098516],
[107.5770517,-6.9092764],
[107.5770839,-6.9090208],
[107.5769873,-6.9090155],
[107.5768532,-6.9089516],
[107.5767674,-6.9089835],
[107.5767298,-6.9090847],
[107.5766011,-6.9096492],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5771665,-6.9067955],
[107.5770051,-6.907072],
[107.5768174,-6.9069868],
[107.5770051,-6.907072],
[107.5766135,-6.9077324],
[107.5762756,-6.907514],
[107.5766135,-6.9077324],
[107.5765009,-6.9078868],
[107.5763936,-6.9080732],
[107.5764901,-6.9081477],
[107.5764026,-6.9086054],
[107.5759966,-6.9084939],
[107.5760932,-6.9082117],
[107.5759966,-6.9084939],
[107.575723,-6.9084619],
[107.5756586,-6.9086483],
[107.575723,-6.9084619],
[107.5756426,-6.9084513],
[107.5757713,-6.9080892],
[107.5757713,-6.9079134],
[107.576002,-6.9074182],
[107.5751919,-6.9070241],
[107.5752992,-6.9067045],
[107.575428,-6.9067844],
[107.5752992,-6.9067045],
[107.5753909,-6.9065292],
[107.5752992,-6.9067045],
[107.5751919,-6.9070241],
[107.57509,-6.9071359],
[107.5748701,-6.90704],
[107.5746931,-6.9069229],
[107.5743926,-6.9067631],
[107.5744879,-6.9068124],
[107.574339,-6.9071199],
[107.5742478,-6.907072],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5739905,-6.9081123],
[107.5742721,-6.9081522],
[107.5742453,-6.9079925],
[107.5742158,-6.9079073],
[107.5742426,-6.9077182],
[107.5742158,-6.9079073],
[107.5742453,-6.9079925],
[107.5742721,-6.9081522],
[107.5749856,-6.9082801],
[107.5751519,-6.9079579],
[107.5749856,-6.9082801],
[107.5754228,-6.9083946],
[107.5755006,-6.9082082],
[107.5754228,-6.9083946],
[107.5755301,-6.9084265],
[107.5754282,-6.9086901],
[107.5755301,-6.9084265],
[107.5756426,-6.9084513],
]])
def jalanKelurahanKebonJeruk(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.592997,-6.9194399],
[107.5938698,-6.9195477],
[107.5951519,-6.9197021],
[107.5954952,-6.9197421],
[107.5956937,-6.9197687],
[107.5972386,-6.9199498],
[107.5976544,-6.9200004],
[107.5983705,-6.9200829],
[107.5997385,-6.9202507],
[107.6004761,-6.9203385],
[107.6005619,-6.9200589],
[107.6005673,-6.9198033],
[107.6005619,-6.9200589],
[107.6004761,-6.9203385],
[107.6018038,-6.9204983],
[107.6021498,-6.9205462],
[107.6031502,-6.9206607],
[107.6041051,-6.9207779],
[107.6041775,-6.9198513],
[107.6041909,-6.9194519],
[107.6042151,-6.9191164],
[107.6042419,-6.9186477],
[107.6042875,-6.9183176],
[107.6043438,-6.9183309],
[107.6042875,-6.9183202],
[107.6043948,-6.91713],
[107.6044752,-6.9158253],
[107.604545,-6.9158066],
[107.6044752,-6.9158253],
[107.6045745,-6.9148294],
[107.6046228,-6.9147629],
[107.6045745,-6.9148268],
[107.6030376,-6.9147602],
[107.6027881,-6.9147842],
[107.6027881,-6.9151463],
[107.6027881,-6.9147842],
[107.6025628,-6.9148241],
[107.6015168,-6.9147708],
[107.6004385,-6.9146777],
[107.5994568,-6.9146537],
[107.5992235,-6.9147309],
[107.5990786,-6.9146191],
[107.5989258,-6.9147069],
[107.5982847,-6.9146723],
[107.5981855,-6.9147149],
[107.5982847,-6.9146723],
[107.5989258,-6.9147069],
[107.5990786,-6.9146191],
[107.5992262,-6.9147336],
[107.5990089,-6.9148081],
[107.5980702,-6.9148107],
[107.5981185,-6.9142994],
[107.5980702,-6.9148107],
[107.5973567,-6.9148426],
[107.5973031,-6.9146829],
[107.5979307,-6.9147521],
[107.5973031,-6.9146829],
[107.595506,-6.9146349],
[107.5953773,-6.9145764],
[107.5936124,-6.9143421],
[107.5929579,-6.9143474],
[107.5928399,-6.9143634],
[107.5927862,-6.914406],
[107.5927916,-6.9145071],
[107.592856,-6.9145604],
[107.5929365,-6.914603],
[107.5929472,-6.9148107],
[107.5928877,-6.915772],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5929574,-6.9158226],
[107.5930423,-6.9146035],
[107.5931067,-6.914513],
[107.5932301,-6.9144757],
[107.5935922,-6.9144837],
[107.5954965,-6.9147686],
[107.595506,-6.9146349],
[107.5954992,-6.9147686],
[107.5954617,-6.9151574],
[107.5972724,-6.915159],
[107.5971812,-6.9152921],
[107.5978732,-6.9152921],
[107.5971812,-6.9152921],
[107.5956201,-6.9153134],
[107.5954592,-6.9152602],
[107.5954592,-6.915159],
[107.5954592,-6.9152602],
[107.5954914,-6.9160536],
[107.5932149,-6.9160289],
[107.5954914,-6.9160536],
[107.5954806,-6.9163838],
[107.5933169,-6.9165122],
[107.5942522,-6.916453],
[107.5942951,-6.9171347],
[107.5942522,-6.916453],
[107.5954806,-6.9163838],
[107.5955987,-6.9163838],
[107.5955504,-6.9172945],
[107.5955987,-6.9163838],
[107.5958562,-6.9163838],
[107.5958722,-6.9166501],
[107.5960546,-6.9169803],
[107.5959634,-6.9174329],
[107.5960546,-6.9169803],
[107.5958722,-6.9166554],
[107.5958562,-6.9163838],
[107.5959259,-6.9163785],
[107.5958883,-6.9161495],
[107.5959259,-6.9163785],
[107.5966984,-6.9163732],
[107.5966876,-6.9166767],
[107.5967252,-6.9167353],
[107.5967145,-6.9169962],
[107.5967252,-6.9167353],
[107.5966876,-6.9166767],
[107.5966984,-6.9163732],
[107.5974011,-6.9163465],
[107.5973904,-6.9164956],
[107.5972831,-6.9170069],
[107.5973904,-6.9164956],
[107.5974011,-6.9163465],
[107.598195,-6.9163252],
[107.5982165,-6.9170921],
[107.598195,-6.9163199],
[107.598136,-6.9155318],
[107.5972187,-6.915585],
[107.598136,-6.9155318],
[107.5980702,-6.9148107],
[107.5973567,-6.9148426],
[107.5972724,-6.915159],
[107.5973567,-6.9148426],
[107.5980702,-6.9148107],
[107.598136,-6.9155318],
[107.598195,-6.9163252],
[107.5995703,-6.9162367],
[107.5992753,-6.9147988],
[107.5995489,-6.9147829],
[107.5992753,-6.9147988],
[107.5992235,-6.9147309],
[107.5992753,-6.9147988],
[107.5995703,-6.9162367],
[107.5999405,-6.9162047],
[107.5998976,-6.9160343],
[107.5998815,-6.915816],
[107.5996937,-6.9148787],
[107.5998815,-6.915816],
[107.5998976,-6.916029],
[107.5999405,-6.9162047],
[107.6013406,-6.9161994],
[107.6012923,-6.9156722],
[107.6014533,-6.9156562],
[107.6014586,-6.9155604],
[107.6018019,-6.9155497],
[107.6018127,-6.9152994],
[107.601582,-6.9153473],
[107.6015168,-6.9147708],
[107.601582,-6.9153473],
[107.6018127,-6.9152994],
[107.6018019,-6.9155497],
[107.6014586,-6.9155604],
[107.6014533,-6.9156562],
[107.6012923,-6.9156722],
[107.6013406,-6.9161994],
[107.6016142,-6.9162047],
[107.6016356,-6.9156349],
[107.6016142,-6.9162047],
[107.6023008,-6.9161195],
[107.6023116,-6.9153686],
[107.6024779,-6.915374],
[107.6024832,-6.9158266],
[107.6024779,-6.915374],
[107.6027514,-6.9153846],
[107.6027193,-6.9161089],
[107.6023008,-6.9161195],
[107.6027193,-6.9161089],
[107.6031055,-6.9160982],
[107.6044752,-6.9158253],
[107.6031055,-6.9160982],
[107.6031484,-6.9168012],
[107.6027407,-6.9168065],
[107.60273,-6.916929],
[107.6026388,-6.9169343],
[107.60273,-6.916929],
[107.6027407,-6.9168065],
[107.6028748,-6.9168065],
[107.6028802,-6.9169503],
[107.6028748,-6.9168065],
[107.6031484,-6.9168012],
[107.6031699,-6.9170089],
[107.6043948,-6.91713],
[107.6042875,-6.9183176],
[107.6034971,-6.9180633],
[107.6030948,-6.9179302],
[107.6031699,-6.9170089],
[107.6030948,-6.9179302],
[107.603025,-6.9182284],
[107.6030948,-6.9179302],
[107.6034971,-6.9180633],
[107.6033684,-6.9185692],
[107.6033684,-6.9186437],
[107.6037921,-6.9186704],
[107.6038619,-6.9185905],
[107.6042419,-6.9186477],
[107.6042151,-6.9191164],
[107.6036419,-6.9191017],
[107.6032986,-6.9190751],
[107.6033684,-6.9186437],
[107.6033684,-6.9185692],
[107.6027032,-6.9185266],
[107.6027032,-6.9189526],
[107.6029982,-6.9189633],
[107.6033093,-6.9190005],
[107.6029982,-6.9189633],
[107.6029928,-6.9191017],
[107.602907,-6.9191337],
[107.6029124,-6.9193627],
[107.6029928,-6.9194053],
[107.6030197,-6.9196609],
[107.6032342,-6.9196662],
[107.6032986,-6.9190751],
[107.6032705,-6.9193414],
[107.6041909,-6.9194519],
[107.6041775,-6.9198513],
[107.6032503,-6.919794],
[107.6032342,-6.9196662],
[107.6032503,-6.919794],
[107.6032369,-6.9198792],
[107.6021948,-6.9198239],
[107.6032369,-6.9198792],
[107.6032141,-6.9201819],
[107.6026696,-6.9200807],
[107.6032168,-6.9201819],
[107.6031658,-6.9204348],
[107.6031502,-6.9206607],
[107.6021498,-6.9205462],
[107.6021895,-6.9198677],
[107.6018515,-6.9198464],
[107.6021895,-6.9198677],
[107.6021948,-6.9198239],
[107.6022351,-6.9193085],
[107.6018837,-6.9193085],
[107.6022378,-6.9193112],
[107.6022538,-6.9184511],
[107.6027032,-6.9185266],
[107.6022512,-6.9184485],
[107.6022485,-6.9179799],
[107.6025757,-6.9179692],
[107.6022485,-6.9179799],
[107.6022458,-6.917884],
[107.6016182,-6.917892],
[107.6022458,-6.9178867],
[107.6022726,-6.9173115],
[107.6026776,-6.9173594],
[107.6026723,-6.9175805],
[107.6026803,-6.9173594],
[107.6031363,-6.9173861],
[107.6026776,-6.9173648],
[107.6022753,-6.9173142],
[107.6022699,-6.9169467],
[107.6022109,-6.9161266],
[107.6022699,-6.9169441],
[107.602278,-6.9173168],
[107.6015162,-6.9172955],
[107.6015484,-6.9168296],
[107.6015189,-6.9172955],
[107.6012882,-6.9172876],
[107.6008323,-6.9172663],
[107.6007894,-6.9177242],
[107.6008296,-6.9172609],
[107.6007974,-6.9172583],
[107.6008135,-6.9171837],
[107.6008189,-6.917],
[107.6007062,-6.9168855],
[107.600505,-6.9166432],
[107.6004004,-6.9163982],
[107.6003924,-6.9162038],
[107.5999405,-6.9162047],
[107.5995703,-6.9162367],
[107.5991908,-6.9162571],
[107.5992095,-6.9165447],
[107.5994456,-6.9166325],
[107.5998211,-6.9168935],
[107.5998586,-6.9166911],
[107.5998211,-6.9168935],
[107.5998104,-6.9171864],
[107.5982165,-6.9170921],
[107.5972831,-6.9170069],
[107.5967145,-6.9169962],
[107.5964254,-6.9169893],
[107.5963315,-6.9170639],
[107.5962886,-6.9169893],
[107.5963154,-6.9167337],
[107.5962886,-6.9169973],
[107.5963369,-6.9170612],
[107.5960043,-6.9174526],
[107.5959634,-6.9174329],
[107.5955504,-6.9172945],
[107.5952291,-6.9171864],
[107.5942951,-6.9171347],
[107.5941616,-6.9171411],
[107.5932954,-6.9170661],
[107.5941616,-6.9171411],
[107.5941643,-6.9181529],
[107.5941482,-6.918334],
[107.5931129,-6.9181849],
[107.5941536,-6.9183313],
[107.5940248,-6.9187041],
[107.5933757,-6.9186242],
[107.5940248,-6.9187041],
[107.593939,-6.9190076],
[107.5938698,-6.9195477],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5941643,-6.9181529],
[107.5944015,-6.9181847],
[107.5944444,-6.9178652],
[107.5950291,-6.9178652],
[107.5950559,-6.9177427],
[107.5952008,-6.9177054],
[107.5955226,-6.9177534],
[107.5955504,-6.9172945],
[107.5955226,-6.917748],
[107.5954904,-6.9183179],
[107.5943961,-6.91819],
[107.5954958,-6.9183179],
[107.5961503,-6.9183072],
[107.5954958,-6.9183179],
[107.5954422,-6.9187812],
[107.5953992,-6.9189409],
[107.5951632,-6.9193882],
[107.5951519,-6.9197021],
[107.5954952,-6.9197421],
[107.5956085,-6.9194095],
[107.5956889,-6.9190314],
[107.5957908,-6.9186054],
[107.5956889,-6.9190314],
[107.5956085,-6.9194149],
[107.5954952,-6.9197421],
[107.5956937,-6.9197687],
[107.5958713,-6.9192711],
[107.5959947,-6.9191113],
[107.596279,-6.9192019],
[107.5964453,-6.9192338],
[107.5966867,-6.9192871],
[107.5967564,-6.91909],
[107.5965794,-6.9190527],
[107.5965526,-6.9191539],
[107.5965794,-6.9190527],
[107.596456,-6.9190368],
[107.5964507,-6.9192338],
[107.596456,-6.9190368],
[107.5963273,-6.9190527],
[107.596279,-6.9192019],
[107.5959947,-6.9191113],
[107.5961503,-6.9187332],
[107.5964829,-6.9188557],
[107.5967779,-6.9188983],
[107.5967564,-6.91909],
[107.5967779,-6.9188983],
[107.5968289,-6.9185868],
[107.5965875,-6.9185149],
[107.5962119,-6.9184856],
[107.5961503,-6.9187306],
[107.5962093,-6.9184909],
[107.5962441,-6.9183152],
[107.5961476,-6.9183072],
[107.5962441,-6.9183179],
[107.5965821,-6.918443],
[107.5965875,-6.9185149],
[107.5965821,-6.9184403],
[107.596743,-6.9176256],
[107.5963407,-6.9175137],
[107.5962978,-6.9175723],
[107.5960043,-6.9174526],
[107.5959705,-6.9174924],
[107.5961503,-6.9183072],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5972831,-6.9170069],
[107.597159,-6.9175675],
[107.5974702,-6.91769],
[107.5973951,-6.9183557],
[107.5969123,-6.9182439],
[107.597159,-6.9175675],
[107.5969123,-6.9182439],
[107.5968289,-6.9185868],
[107.597379,-6.9186699],
[107.5973951,-6.9183557],
[107.597379,-6.9186699],
[107.5973575,-6.9190118],
[107.5967779,-6.9188983],
[107.5973575,-6.9190118],
[107.5972386,-6.9199498],
[107.5976544,-6.9200004],
[107.5977706,-6.9193739],
[107.5977116,-6.9193153],
[107.5977867,-6.9186656],
[107.597379,-6.9186699],
[107.5977867,-6.9186656],
[107.5982802,-6.9187082],
[107.5983705,-6.9200829],
[107.5982802,-6.9187082],
[107.5982641,-6.9177869],
[107.5981193,-6.9177923],
[107.5979154,-6.9177656],
[107.597733,-6.9177443],
[107.5979154,-6.9177656],
[107.5981193,-6.9177923],
[107.5982641,-6.9177869],
[107.5982165,-6.9170921],
[107.5982641,-6.9177869],
[107.5982748,-6.9180532],
[107.5989722,-6.9181224],
[107.5990795,-6.9180745],
[107.5990956,-6.9178562],
[107.5989668,-6.9178455],
[107.5989132,-6.9177656],
[107.5987254,-6.9177071],
[107.5989132,-6.9177656],
[107.5989668,-6.9178455],
[107.5990956,-6.9178562],
[107.5991868,-6.9174994],
[107.5994765,-6.9175473],
[107.5997822,-6.9175739],
[107.5998104,-6.9171864],
[107.6007974,-6.9172583],
[107.5998104,-6.9171864],
[107.5997822,-6.9175739],
[107.6000612,-6.9176378],
[107.5997822,-6.9175739],
[107.599734,-6.9177443],
[107.6003831,-6.9178455],
[107.6004206,-6.9174994],
[107.600544,-6.9174727],
[107.6005547,-6.9172384],
[107.600544,-6.9174727],
[107.6004206,-6.9174994],
[107.6003831,-6.9178455],
[107.6004004,-6.9182369],
[107.6005667,-6.9182795],
[107.6005399,-6.9185458],
[107.6004433,-6.9186097],
[107.6002395,-6.9186097],
[107.6002234,-6.9188973],
[107.5996387,-6.9188281],
[107.599628,-6.9183168],
[107.5999713,-6.9183168],
[107.599628,-6.9183168],
[107.5996119,-6.9180293],
[107.5999874,-6.9180825],
[107.6004004,-6.9182369],
[107.6003831,-6.9178455],
[107.599734,-6.9177443],
[107.5996119,-6.9180293],
[107.5996199,-6.9181631],
[107.5993812,-6.9181304],
[107.5994765,-6.9175473],
[107.5993812,-6.9181304],
[107.5990795,-6.9180745],
[107.5989722,-6.9181224],
[107.5989198,-6.9187535],
[107.5993812,-6.9187855],
[107.5993812,-6.9181304],
[107.5993812,-6.9187855],
[107.5996387,-6.9188281],
[107.5998157,-6.9190783],
[107.5995421,-6.919105],
[107.5998157,-6.9190783],
[107.5997621,-6.9197706],
[107.5997385,-6.9202507],
[107.5997621,-6.9197706],
[107.5993436,-6.919744],
[107.5992793,-6.9198026],
[107.5992846,-6.9199304],
[107.5992793,-6.9198026],
[107.5993436,-6.919744],
[107.5997621,-6.9197706],
[107.5997889,-6.9194085],
[107.5991291,-6.9193499],
[107.5991291,-6.9190996],
[107.5987804,-6.919073],
[107.5987643,-6.9187482],
[107.5989198,-6.9187535],
[107.5987643,-6.9187482],
[107.5982802,-6.9187082],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.6018038,-6.9204983],
[107.6018515,-6.9198464],
[107.6018837,-6.9193085],
[107.6015508,-6.9193359],
[107.6015616,-6.9196714],
[107.6011592,-6.9196608],
[107.6011217,-6.919762],
[107.6011592,-6.9198951],
[107.6011217,-6.919762],
[107.6011592,-6.9196608],
[107.6011807,-6.919304],
[107.6015508,-6.9193359],
[107.6015562,-6.9189099],
[107.6017761,-6.9189099],
[107.6015562,-6.9189099],
[107.6015776,-6.9185318],
[107.60117,-6.9185265],
[107.6015776,-6.9185318],
[107.6016182,-6.917892],
[107.6012451,-6.9178981],
[107.6012882,-6.9172876],
[107.6012451,-6.9178981],
[107.60117,-6.9185265],
[107.6010788,-6.9192667],
[107.6011807,-6.919304],
[107.6010788,-6.9192667],
[107.6006872,-6.9190111],
[107.6002234,-6.9188973],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.6004385,-6.9146777],
[107.6004271,-6.9149108],
[107.5999765,-6.9150493],
[107.6001857,-6.915912],
[107.6002501,-6.915928],
[107.6002554,-6.9162049],
[107.600588,-6.9161996],
[107.600529,-6.9158641],
[107.6002501,-6.9159227],
[107.600529,-6.9158641],
[107.6006739,-6.9158321],
[107.6007382,-6.9157842],
[107.6007114,-6.9156457],
[107.6009528,-6.9156085],
[107.6010011,-6.9155339],
[107.6015053,-6.9154274],
[107.6014732,-6.9152357],
[107.6013551,-6.9152517],
[107.6013716,-6.9154557],
[107.6010011,-6.9155339],
[107.6008777,-6.9153315],
[107.6006041,-6.9154061],
[107.6004271,-6.9149055],
]])
def jalanKelurahanMaleber(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5688412,-6.9100128],
[107.569164,-6.9105174],
[107.569341,-6.9106346],
[107.5692847,-6.9107491],
[107.5696974,-6.9114267],
[107.5699509,-6.9108835],
[107.5696974,-6.9114267],
[107.5700515,-6.9119965],
[107.5701373,-6.9119139],
[107.5700515,-6.9119965],
[107.5704029,-6.9125876],
[107.5705826,-6.9123799],
[107.5706255,-6.9122947],
[107.5706952,-6.9120551],
[107.5706523,-6.9120471],
[107.5706952,-6.9120551],
[107.5708293,-6.912063],
[107.5709715,-6.9120737],
[107.5708293,-6.912063],
[107.5710466,-6.9113122],
[107.5708293,-6.912063],
[107.5706952,-6.9120551],
[107.5706255,-6.9122947],
[107.5705826,-6.9123799],
[107.5704029,-6.9125876],
[107.5706335,-6.9129604],
[107.5707784,-6.9128432],
[107.571009,-6.9125743],
[107.5710546,-6.9125743],
[107.5711968,-6.9126062],
[107.5712799,-6.9123719],
[107.5711968,-6.9126062],
[107.5710546,-6.9125743],
[107.571009,-6.9125743],
[107.5707784,-6.9128432],
[107.5706335,-6.9129604],
[107.5709473,-6.9134849],
[107.571229,-6.9133278],
[107.5714865,-6.9133997],
[107.571229,-6.9133278],
[107.5709473,-6.9134849],
[107.5716769,-6.9147284],
[107.5717761,-6.9146565],
[107.5720175,-6.9138817],
[107.5721946,-6.9135195],
[107.5720256,-6.9134477],
[107.5721946,-6.9135195],
[107.5722482,-6.913421],
[107.5723825,-6.9130857],
[107.5721198,-6.9129207],
[107.5722113,-6.9127933],
[107.5721959,-6.9127141],
[107.5718432,-6.9126089],
[107.5721959,-6.9127141],
[107.5722113,-6.9127933],
[107.5721198,-6.9129207],
[107.5723825,-6.9130857],
[107.5733039,-6.9134641],
[107.5723825,-6.9130857],
[107.5722482,-6.913421],
[107.5721946,-6.9135195],
[107.5720175,-6.9138817],
[107.5717761,-6.9146565],
[107.5716769,-6.9147284],
[107.5718647,-6.9150186],
[107.572157,-6.9148296],
[107.5725674,-6.9148882],
[107.5725754,-6.9148296],
[107.5725674,-6.9148882],
[107.572157,-6.9148296],
[107.5718647,-6.9150186],
[107.5721892,-6.9154953],
[107.572318,-6.9153142],
[107.5721892,-6.9154953],
[107.5724226,-6.9157908],
[107.572554,-6.9155805],
[107.572503,-6.9154846],
[107.5723662,-6.915426],
[107.572503,-6.9154846],
[107.572554,-6.9155805],
[107.572664,-6.9156071],
[107.5728008,-6.9155565],
[107.5729214,-6.9155139],
[107.5729636,-6.9155205],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5735333,-6.9149932],
[107.5731403,-6.9148526],
[107.5733817,-6.9149379],
[107.5735078,-6.9142189],
[107.5735641,-6.9139686],
[107.5742373,-6.9141444],
[107.5738457,-6.9151136],
[107.5742373,-6.9141417],
[107.574468,-6.9136757],
[107.57424,-6.914147],
[107.5735587,-6.9139713],
[107.5731537,-6.9138568],
[107.5722482,-6.913421],
[107.5731537,-6.9138568],
[107.5733039,-6.9134641],
[107.5739476,-6.9137037],
[107.5733039,-6.9134641],
[107.5734675,-6.9129448],
[107.5740469,-6.9131525],
[107.5734675,-6.9129448],
[107.5737223,-6.9123404],
[107.5745994,-6.9126759],
[107.5745833,-6.9127717],
[107.5745941,-6.9128383],
[107.574645,-6.9128783],
[107.5747738,-6.912849],
[107.5748086,-6.9127904],
[107.5748355,-6.9127584],
[107.5753516,-6.9129897],
[107.5748355,-6.9127611],
[107.5747604,-6.9127345],
[107.5749052,-6.9124735],
[107.574645,-6.9123244],
[107.5749079,-6.9124735],
[107.5747604,-6.9127345],
[107.5745967,-6.9126759],
[107.5740871,-6.9124762],
[107.574181,-6.9122259],
[107.5740871,-6.9124762],
[107.5737223,-6.9123404],
[107.5732932,-6.9121726],
[107.5736097,-6.9113685],
[107.5732932,-6.9121726],
[107.5729794,-6.9120448],
[107.5727299,-6.9126386],
[107.5729794,-6.9120422],
[107.5729016,-6.9120129],
[107.5731576,-6.9113992],
[107.5733064,-6.9109505],
[107.5731576,-6.9113992],
[107.5729016,-6.9120129],
[107.5719977,-6.9116561],
[107.5721237,-6.911451],
[107.5719977,-6.9116561],
[107.5718662,-6.9119729],
[107.5719977,-6.9116561],
[107.571771,-6.9115695],
[107.5717187,-6.9116827],
[107.571771,-6.9115695],
[107.5717974,-6.9115781],
[107.5719453,-6.911215],
[107.5717981,-6.9112033],
[107.5719453,-6.911215],
[107.5717974,-6.9115781],
[107.5715459,-6.9114949],
[107.5713824,-6.9119103],
[107.5715459,-6.9114949],
[107.57131,-6.9114123],
[107.5714226,-6.9110928],
[107.57131,-6.9114123],
[107.5710466,-6.9113122],
[107.5711517,-6.9109676],
[107.5710466,-6.9113122],
[107.5706314,-6.9111354],
[107.5705402,-6.9113484],
[107.5705617,-6.9114496],
[107.5705053,-6.9115987],
[107.5704436,-6.9115774],
[107.5704597,-6.9115108],
[107.5703363,-6.9114443],
[107.5704517,-6.9110848],
[107.5706314,-6.9111354],
[107.5705821,-6.9111211],
[107.5709613,-6.9101795],
[107.5708916,-6.9101555],
[107.5709613,-6.9101795],
[107.5705821,-6.9111211],
[107.5704463,-6.9110821],
[107.5699509,-6.9108835],
[107.5696658,-6.9107653],
[107.5703524,-6.909125],
[107.5705804,-6.909133],
[107.570854,-6.9085925],
[107.5705804,-6.909133],
[107.5703524,-6.909125],
[107.5696658,-6.9107653],
[107.569341,-6.9106346],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5696075,-6.9094825],
[107.5696906,-6.9092908],
[107.5693588,-6.9092193],
[107.5696906,-6.9092908],
[107.5697684,-6.9090911],
[107.5695056,-6.9090351],
[107.5697684,-6.9090911],
[107.5698569,-6.9088914],
[107.5695834,-6.9088354],
[107.5698569,-6.9088914],
[107.5699481,-6.9087023],
[107.5697282,-6.9086331],
[107.5699481,-6.9087023],
[107.5700366,-6.9084973],
[107.5696569,-6.9084019],
[107.5700366,-6.9084973],
[107.5701129,-6.9083221],
[107.5698607,-6.9082528],
[107.5701129,-6.9083221],
[107.570854,-6.9085925],
[107.5701129,-6.9083221],
[107.5701665,-6.9082049],
[107.5698661,-6.9081197],
[107.5701665,-6.9082049],
[107.5702041,-6.908125],
[107.5703113,-6.908157],
[107.5702041,-6.908125],
[107.5702631,-6.9079652],
[107.5700002,-6.9078694],
[107.5702631,-6.9079652],
[107.5702952,-6.9079173],
[107.5705125,-6.9079985],
[107.570719,-6.9080877],
[107.5705125,-6.9079985],
[107.5702952,-6.9079173],
[107.5703435,-6.9077203],
[107.569968,-6.9076297],
[107.5703435,-6.9077203],
[107.5704133,-6.9075499],
[107.5700753,-6.9074806],
[107.5704133,-6.9075499],
[107.5706332,-6.9076138],
[107.5705125,-6.9079985],
[107.5706332,-6.9076138],
[107.5704133,-6.9075499],
[107.570483,-6.907257],
[107.5700807,-6.9071558],
[107.570483,-6.907257],
[107.5709604,-6.9073794],
[107.570483,-6.907257],
[107.5705098,-6.9070013],
[107.5702309,-6.9069641],
[107.5705098,-6.9070013],
[107.5705366,-6.9069215],
[107.5707995,-6.9069854],
[107.5705366,-6.9069215],
[107.570542,-6.9068256],
[107.5702255,-6.9067777],
[107.570542,-6.9068256],
[107.5705742,-6.9067138],
[107.5708585,-6.9067777],
[107.5705742,-6.9067138],
[107.570593,-6.9065859],
[107.5702094,-6.9065487],
[107.570593,-6.9065859],
[107.5705957,-6.906522],
[107.5707727,-6.9065487],
[107.5705957,-6.906522],
[107.5706225,-6.9063889],
[107.5702952,-6.9063463],
[107.5706225,-6.9063889],
[107.5706654,-6.9061972],
[107.5702952,-6.9061493],
[107.5706654,-6.9061972],
[107.570719,-6.9059629],
[107.5702952,-6.9058936],
[107.570719,-6.9059629],
[107.5708961,-6.9059788],
[107.570719,-6.9059629],
[107.5707459,-6.9057392],
[107.570365,-6.9056487],
[107.5707459,-6.9057392],
[107.5707673,-6.9055368],
[107.5703274,-6.9054569],
[107.5707673,-6.9055368],
[107.5708371,-6.9053664],
[107.5706654,-6.9053025],
[107.5708371,-6.9053664],
[107.5708961,-6.9052066],
[107.570542,-6.9051108],
[107.5708961,-6.9052066],
[107.5709765,-6.9049776],
[107.5709578,-6.9050296],
[107.5705313,-6.9048871],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5758411,-6.9122926],
[107.5737543,-6.9114911],
[107.5736578,-6.9113633],
[107.5736097,-6.9113685],
[107.5736604,-6.9113659],
[107.5736953,-6.9112754],
[107.5737892,-6.9112434],
[107.5739179,-6.9109692],
[107.5742505,-6.9110704],
[107.5760771,-6.9117866],
[107.5742532,-6.9110704],
[107.5739179,-6.9109665],
[107.5742183,-6.9102769],
[107.5761723,-6.9110832],
[107.574221,-6.9102769],
[107.5737194,-6.9100665],
[107.5733064,-6.9109505],
[107.5737221,-6.9100665],
[107.5732742,-6.9098349],
[107.5730677,-6.909739],
[107.5732715,-6.9098322],
[107.5734512,-6.9094301],
[107.5737248,-6.908602],
[107.5737868,-6.9084887],
[107.5739448,-6.9081067],
[107.5739905,-6.9081123],
[107.5739448,-6.9081067],
[107.5736658,-6.9080375],
[107.5737838,-6.907694],
[107.5739367,-6.907694],
[107.5742478,-6.907072],
[107.5739367,-6.907694],
[107.5737838,-6.907694],
[107.5736658,-6.9080375],
[107.5731535,-6.9079523],
[107.5737731,-6.9064718],
[107.5741084,-6.906597],
[107.5743926,-6.9067631],
[107.5741084,-6.906597],
[107.5737731,-6.9064718],
[107.5731535,-6.9079523],
[107.5728906,-6.9078964],
[107.5729577,-6.9078218],
[107.5730382,-6.907505],
[107.5729577,-6.9078218],
[107.5728906,-6.9078964],
[107.5726573,-6.9085807],
[107.5728906,-6.9078964],
[107.5726788,-6.9078511],
[107.5729657,-6.9069831],
[107.5732527,-6.9070576],
[107.5729657,-6.9069831],
[107.5729604,-6.9068979],
[107.5730623,-6.9064692],
[107.5730221,-6.9064239],
[107.5730757,-6.9062135],
[107.5733627,-6.90632],
[107.5734325,-6.9062934],
[107.5734566,-6.9061763],
[107.5734995,-6.9060724],
[107.5737007,-6.905476],
[107.5737999,-6.9055079],
[107.5743256,-6.9057928],
[107.5742291,-6.9060538],
[107.5741888,-6.9061283],
[107.5743095,-6.9061922],
[107.5744436,-6.9062082],
[107.5745643,-6.9058993],
[107.5745107,-6.9058461],
[107.5745831,-6.9055905],
[107.5747682,-6.9052044],
[107.575074,-6.9053961],
[107.574685,-6.9060218],
[107.575074,-6.9053961],
[107.5751361,-6.9054375],
[107.575074,-6.9053961],
[107.5747682,-6.9052044],
[107.5741271,-6.904781],
[107.5746166,-6.9051038],
[107.5745375,-6.9052363],
[107.5745053,-6.9052549],
[107.5744705,-6.9053455],
[107.5745322,-6.9054067],
[107.5744061,-6.9057023],
[107.5743551,-6.9057209],
[107.5743256,-6.9057928],
[107.5737999,-6.9055079],
[107.5741271,-6.904781],
[107.573816,-6.9045014],
[107.573706,-6.9044401],
[107.5735478,-6.9046958],
[107.573706,-6.9044401],
[107.5734485,-6.9042697],
[107.5733439,-6.9044481],
[107.5734485,-6.9042697],
[107.5731079,-6.9040407],
[107.5729684,-6.904291],
[107.5731079,-6.9040407],
[107.5728772,-6.9038836],
[107.572778,-6.9040194],
[107.5728772,-6.9038836],
[107.572609,-6.9037798],
[107.5722684,-6.9042751],
[107.5723542,-6.9041526],
[107.5725098,-6.9042617],
[107.5723542,-6.9041526],
[107.572609,-6.9037798],
[107.5723327,-6.9035428],
[107.572204,-6.9037958],
[107.5720645,-6.9041233],
[107.572204,-6.9037958],
[107.5723327,-6.9035428],
[107.5719251,-6.9032739],
[107.5717802,-6.9035295],
[107.5719251,-6.9032739],
[107.571799,-6.903194],
[107.5715388,-6.9037239],
[107.571799,-6.903194],
[107.5709002,-6.902592],
[107.5715469,-6.9030262],
[107.5711499,-6.9036679],
[107.5709434,-6.9041313],
[107.5713511,-6.9043043],
[107.5714369,-6.9040194],
[107.5715388,-6.9037239],
[107.571681,-6.9037904],
[107.571858,-6.9039129],
[107.5718741,-6.9040061],
[107.5714369,-6.9040194],
[107.5718741,-6.9040061],
[107.5720645,-6.9041233],
[107.5722684,-6.9042751],
[107.5721557,-6.9044721],
[107.5722281,-6.9045067],
[107.5721557,-6.9044721],
[107.5720404,-6.9046825],
[107.5713913,-6.9043629],
[107.5712947,-6.904544],
[107.5713913,-6.9043629],
[107.5713511,-6.9043043],
[107.5713913,-6.9043629],
[107.5720404,-6.9046825],
[107.5717829,-6.9051271],
[107.57163,-6.9050339],
[107.5717829,-6.9051271],
[107.5717078,-6.9052842],
[107.5715576,-6.9056331],
[107.5713242,-6.9055718],
[107.5711472,-6.9054893],
[107.5712813,-6.9051618],
[107.5711472,-6.9054893],
[107.570997,-6.9054307],
[107.5708371,-6.9053664],
[107.570997,-6.9054307],
[107.5708961,-6.9059788],
[107.5707727,-6.9065487],
[107.5708897,-6.9066209],
[107.5708585,-6.9067777],
[107.5707995,-6.9069854],
[107.5708585,-6.9067777],
[107.5708897,-6.9066209],
[107.5709353,-6.9066183],
[107.5710989,-6.9066289],
[107.5713752,-6.9066795],
[107.5712733,-6.9073905],
[107.5712384,-6.9074491],
[107.5709604,-6.9073794],
[107.570719,-6.9080877],
[107.5706564,-6.9082532],
[107.570719,-6.9080877],
[107.5709604,-6.9073794],
[107.5712384,-6.9074491],
[107.5710158,-6.908176],
[107.570938,-6.9083783],
[107.5708736,-6.9083517],
[107.570938,-6.9083783],
[107.570854,-6.9085925],
[107.570938,-6.9083783],
[107.5710158,-6.908176],
[107.5713913,-6.9083411],
[107.5710936,-6.9093289],
[107.571343,-6.9094088],
[107.5714101,-6.9094275],
[107.5713135,-6.9098349],
[107.5715388,-6.9099227],
[107.5713913,-6.9104047],
[107.5714825,-6.910426],
[107.5713913,-6.9104047],
[107.5709434,-6.9102263],
[107.5709613,-6.9101795],
[107.5710936,-6.909771],
[107.5710667,-6.9098615],
[107.5712169,-6.9099147],
[107.5710667,-6.9098615],
[107.5709434,-6.9102263],
[107.5713913,-6.9104047],
[107.5712786,-6.9107242],
[107.571233,-6.9107242],
[107.5711517,-6.9109676],
[107.570989,-6.9109079],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5731576,-6.9113992],
[107.5722645,-6.9110155],
[107.5721814,-6.9112551],
[107.5719453,-6.911215],
[107.5722404,-6.9104856],
[107.5728948,-6.9107652],
[107.5730987,-6.9103897],
[107.5730424,-6.9104909],
[107.5728439,-6.9103977],
[107.5730424,-6.9104909],
[107.5728948,-6.9107652],
[107.5733064,-6.9109505],
[107.5728948,-6.9107652],
[107.5722404,-6.9104856],
[107.5723369,-6.9102166],
[107.5715388,-6.9099227],
[107.5723369,-6.9102166],
[107.5725703,-6.9097054],
[107.5727473,-6.9097959],
[107.5725703,-6.9097054],
[107.5717737,-6.9094924],
[107.5714101,-6.9094275],
[107.571343,-6.9094088],
[107.5719588,-6.907687],
[107.5716503,-6.9076018],
[107.5716074,-6.9075033],
[107.5712733,-6.9073905],
[107.5716074,-6.9075033],
[107.5716503,-6.9076018],
[107.5713913,-6.9083411],
[107.5716503,-6.9076018],
[107.5719588,-6.907687],
[107.5724201,-6.9077829],
[107.5726788,-6.9078511],
[107.5724201,-6.9077829],
[107.5724201,-6.9076311],
[107.5721626,-6.9075699],
[107.5723209,-6.9067471],
[107.5721626,-6.9075699],
[107.5716583,-6.9074527],
[107.5721626,-6.9075699],
[107.5724201,-6.9076311],
[107.5724201,-6.9077829],
[107.5719614,-6.9089492],
[107.5717737,-6.9094924],
[107.5719614,-6.9089492],
[107.5729351,-6.9093033],
[107.5729914,-6.9092847],
[107.5732918,-6.9084992],
[107.5737248,-6.908602],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5708961,-6.9059788],
[107.5710438,-6.9060206],
[107.5711472,-6.9054893],
[107.5710438,-6.9060206],
[107.5709353,-6.9066183],
[107.5710438,-6.9060206],
[107.5712101,-6.9060499],
[107.5713242,-6.9055718],
[107.5712101,-6.9060499],
[107.5710989,-6.9066289],
[107.5712101,-6.9060499],
[107.5714622,-6.9060952],
[107.5714729,-6.9060605],
[107.5715576,-6.9056331],
[107.5714729,-6.9060605],
[107.5714622,-6.9060952],
[107.5714434,-6.9062922],
[107.5713752,-6.9066795],
[107.5714434,-6.9062922],
[107.5718323,-6.906428],
[107.5717572,-6.9067475],
[107.5718323,-6.906428],
[107.5720308,-6.9064839],
[107.5723876,-6.9066969],
[107.5729604,-6.9068979],
[107.5730623,-6.9064692],
[107.5730221,-6.9064239],
[107.5722615,-6.9060632],
[107.5721113,-6.9060179],
[107.5720711,-6.9062603],
[107.5723151,-6.9063455],
[107.5726102,-6.9064946],
[107.5723125,-6.9063428],
[107.5720711,-6.9062603],
[107.5714729,-6.9060605],
[107.5720711,-6.9062603],
[107.5720308,-6.9064839],
[107.5720711,-6.9062603],
[107.5721113,-6.9060179],
[107.5721515,-6.9058129],
[107.57224,-6.9055706],
[107.5717078,-6.9052842],
[107.5717829,-6.9051271],
[107.5720094,-6.9047425],
[107.5725297,-6.9050354],
[107.5724332,-6.9052378],
[107.57224,-6.9055706],
[107.5724332,-6.9052378],
[107.572527,-6.9053496],
[107.5723285,-6.905757],
[107.5725834,-6.9058848],
[107.5726531,-6.9058555],
[107.5727604,-6.9055733],
[107.5729642,-6.9051099],
[107.5733439,-6.9044481],
[107.5731386,-6.9043377],
[107.5730688,-6.9043298],
[107.5727094,-6.9049688],
[107.572527,-6.9053496],
[107.5727094,-6.9049688],
[107.5729642,-6.9051099],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.573816,-6.9045014],
[107.5735225,-6.9049559],
[107.5732355,-6.9056402],
[107.5736057,-6.9057613],
[107.5734995,-6.9060724],
[107.5730934,-6.9059863],
[107.5732355,-6.9056402],
[107.5730934,-6.9059863],
[107.5730558,-6.9060902],
[107.5734566,-6.9061763],
[107.5731222,-6.9061048],
[107.5730757,-6.9062135],
[107.5728386,-6.9061328],
[107.5726642,-6.9060822],
[107.5721515,-6.9058129],
]])
def close(self):
self.kelurahan.close()
self.kantor.close()
self.jalan.close()
| 36.383885
| 64
| 0.471471
| 141,769
| 0.999866
| 0
| 0
| 0
| 0
| 0
| 0
| 162
| 0.001143
|
679d339786e1a3d3431ad8eb7251f79813420fa0
| 8,226
|
py
|
Python
|
sparseconvnet/utils.py
|
THU-luvision/Occuseg
|
163e1fba6f5d9afd4ee2a4202118bc81d8f7c5e4
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T18:26:11.000Z
|
2022-03-29T18:26:11.000Z
|
sparseconvnet/utils.py
|
THU-luvision/Occuseg
|
163e1fba6f5d9afd4ee2a4202118bc81d8f7c5e4
|
[
"BSD-3-Clause"
] | null | null | null |
sparseconvnet/utils.py
|
THU-luvision/Occuseg
|
163e1fba6f5d9afd4ee2a4202118bc81d8f7c5e4
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, glob, os
from .sparseConvNetTensor import SparseConvNetTensor
from .metadata import Metadata
import sparseconvnet as scn
import pdb
def toLongTensor(dimension, x):
if hasattr(x, 'type') and x.type() == 'torch.LongTensor':
return x
elif isinstance(x, (list, tuple)):
assert len(x) == dimension
return torch.LongTensor(x)
else:
return torch.LongTensor(dimension).fill_(x)
def optionalTensor(a, b):
return getattr(a, b) if hasattr(a, b) else torch.Tensor()
def optionalTensorReturn(a):
return a if a.numel() else None
def threadDatasetIterator(d):
try:
import queue
except BaseException:
import Queue as queue
import threading
def iterator():
def worker(i):
for k in range(i, len(d), 8):
q.put(d[k])
q = queue.Queue(16)
for i in range(8):
t = threading.Thread(target=worker, args=(i,))
t.start()
for _ in range(len(d)):
item = q.get()
yield item
q.task_done()
q.join()
return iterator
def concatenate_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = torch.cat([i.features for i in input], 1)
return output
def extract_featrue(input, start, end):
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
output.features = input.features[:,start:end]
return output
def upsample_feature(lr, hr, stride, bilinear = False):
# pdb.set_trace()
loc_lr = lr.get_spatial_locations().cuda().int()
loc_hr = hr.get_spatial_locations().cuda().int()
batch_size = torch.max(loc_hr[:,3]).item() + 1
output = SparseConvNetTensor()
output.metadata = hr.metadata
output.spatial_size = hr.spatial_size
lr_start_index = 0
hr_start_index = 0
for k in range(batch_size):
if not bilinear:
correspondence = scn.SCN.ResolutionBasedScattering(lr.metadata,loc_lr[loc_lr[:,3] == k,0:3], loc_hr[loc_hr[:,3] == k,0:3], stride)
correspondence += lr_start_index
correspondence[correspondence < 0] = 0
output_feature = torch.index_select(lr.features,0,correspondence.long())
else:
location_lr = loc_lr[loc_lr[:,3] == k,0:3]
location_hr = loc_hr[loc_hr[:,3] == k,0:3]
candidate = location_hr.clone().float()
candidate = (candidate - (stride - 1) / 2) / stride
ceil_candidate = torch.ceil(candidate)
floor_candidate = torch.floor(candidate)
diff = [ceil_candidate - candidate, candidate - floor_candidate]
anchors = [ceil_candidate, floor_candidate]
# ceil, up
# floor, bottom
for x in [0,1]:
for y in [0,1]:
for z in [0,1]:
w = (1-diff[x][:,0])*(1-diff[y][:,1])*(1-diff[z][:,2])
query = location_hr.clone()
query[:,0] = anchors[x][:,0]
query[:,1] = anchors[y][:,1]
query[:,2] = anchors[z][:,2]
if x==0 and y ==0 and z==0:
weight = w
lr_candidates = query
else:
weight = torch.cat([weight,w],0)
lr_candidates = torch.cat([lr_candidates ,query],0)
neighbor_correspondence = scn.SCN.ResolutionBasedScattering(lr.metadata,location_lr, lr_candidates, 1).long()
weight[neighbor_correspondence < 0] = 0
neighbor_correspondence.requires_grad = False
weight.requires_grad = False
neighbor_correspondence[neighbor_correspondence < 0] = 0
hr_feature = torch.index_select(lr.features,0,neighbor_correspondence + lr_start_index) * weight.view([-1,1]).expand(-1,lr.features.shape[1])
output_feature = sum([hr_feature[i * location_hr.shape[0]: i * location_hr.shape[0] + location_hr.shape[0]] for i in range(8)])
total_weight = sum([weight[i * location_hr.shape[0]: i * location_hr.shape[0] + location_hr.shape[0]] for i in range(8)])
output_feature /= total_weight.view([-1,1]).expand(-1,lr.features.shape[1])
if k == 0:
output.features = output_feature
else:
output.features = torch.cat([output.features, output_feature ], 0)
lr_start_index = lr_start_index + torch.sum(loc_lr[:,3] == k)
hr_start_index = hr_start_index + torch.sum(loc_hr[:,3] == k)
return output
def add_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = sum([i.features for i in input])
return output
def append_tensors(tensors):
spatial_size=tensors[0].spatial_size
dimension=len(spatial_size)
x=SparseConvNetTensor(
features=torch.cat([t.features for t in tensors],0),
metadata=Metadata(dimension),
spatial_size=spatial_size)
for t in tensors:
x.metadata.appendMetadata(t.metadata,spatial_size)
return x
class AddCoords(torch.nn.Module):
def forward(self, input):
output = SparseConvNetTensor()
if input.features.numel():
with torch.no_grad():
coords = input.get_spatial_locations()
d = (input.spatial_size.type_as(input.features)-1)/2
coords=coords[:,:-1].type_as(input.features)/ d[None,:] - 1
output.features = torch.cat([input.features,coords],1)
else:
output.features = input.features
output.metadata = input.metadata
output.spatial_size = input.spatial_size
return output
def compare_sparse(x, y):
cL,cR,L,R = x.metadata.compareSparseHelper(y.metadata, x.spatial_size)
if x.features.is_cuda:
cL=cL.cuda()
cR=cR.cuda()
L=L.cuda()
R=R.cuda()
e = 0
if cL.numel():
e += (x.features[cL]-y.features[cR]).pow(2).sum()
if L.numel():
e += x.features[L].pow(2).sum()
if R.numel():
e += y.features[R].pow(2).sum()
return e / (cL.numel() + L.numel() + R.numel())
def spectral_norm_svd(module):
w=module.weight
if w.ndimension()==3:
w=w.view(-1,w.size(2))
_,s,_=torch.svd(w)
return s[0]
def pad_with_batch_idx(x,idx): #add a batch index to the list of coordinates
return torch.cat([x,torch.LongTensor(x.size(0),1).fill_(idx)],1)
def batch_location_tensors(location_tensors):
a=[]
for batch_idx, lt in enumerate(location_tensors):
if lt.numel():
a.append(pad_with_batch_idx(lt,batch_idx))
return torch.cat(a,0)
def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
if use_cuda:
model.cpu()
if epoch>0:
f=exp_name+'-%09d-'%epoch+name2+'.pth'
assert os.path.isfile(f)
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
else:
f=sorted(glob.glob(exp_name+'-*-'+name2+'.pth'))
if len(f)>0:
f=f[-1]
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
epoch=int(f[len(exp_name)+1:-len(name2)-5])
if use_cuda:
model.cuda()
return epoch+1
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def checkpoint_save(model,exp_name,name2,epoch, use_cuda=True):
f=exp_name+'-%04d-'%epoch+name2+'.pth'
model.cpu()
torch.save(model.state_dict(),f)
if use_cuda:
model.cuda()
#remove previous checkpoints unless they are a power of 2 to save disk space
epoch=epoch-1
f=exp_name+'-%04d-'%epoch+name2+'.pth'
if os.path.isfile(f):
if not is_power2(epoch):
os.remove(f)
| 35.153846
| 155
| 0.600049
| 609
| 0.074034
| 531
| 0.064551
| 0
| 0
| 0
| 0
| 484
| 0.058838
|
679e2250d3e4704bdc0cc067419d5a8f3eb454fa
| 12,983
|
py
|
Python
|
python-numpy-lists/numpylists.py
|
tosinayanda/python-starter-kit
|
9faee168ff82e46b6ef8102ae72ea936fd099961
|
[
"MIT"
] | null | null | null |
python-numpy-lists/numpylists.py
|
tosinayanda/python-starter-kit
|
9faee168ff82e46b6ef8102ae72ea936fd099961
|
[
"MIT"
] | null | null | null |
python-numpy-lists/numpylists.py
|
tosinayanda/python-starter-kit
|
9faee168ff82e46b6ef8102ae72ea936fd099961
|
[
"MIT"
] | null | null | null |
#
import numpy as np
#create numpy arrays
#
#Generate array
height=np.round(np.random.normal(1.75,0.20,5000),2)
weight=np.round(np.random.normal(60.32,15,5000),2)
np_city=np.column_stack((height,weight))
print(np_city.shape)
cars=["Toyota","Chevrolet","Ford","Honda","Brabus"]
cars_np=np.array(cars)
weight=[20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23]
baseball=[[74, 180], [74, 215], [72, 210], [72, 210], [73, 188], [69, 176], [69, 209], [71, 200], [76, 231], [71, 180], [73, 188], [73, 180], [74, 185], [74, 160], [69, 180], [70, 185], [73, 189], [75, 185], [78, 219], [79, 230], [76, 205], [74, 230], [76, 195], [72, 180], [71, 192], [75, 225], [77, 203], [74, 195], [73, 182], [74, 188], [78, 200], [73, 180], [75, 200], [73, 200], [75, 245], [75, 240], [74, 215], [69, 185], [71, 175], [74, 199], [73, 200], [73, 215], [76, 200], [74, 205], [74, 206], [70, 186], [72, 188], [77, 220], [74, 210], [70, 195], [73, 200], [75, 200], [76, 212], [76, 224], [78, 210], [74, 205], [74, 220], [76, 195], [77, 200], [81, 260], [78, 228], [75, 270], [77, 200], [75, 210], [76, 190], [74, 220], [72, 180], [72, 205], [75, 210], [73, 220], [73, 211], [73, 200], [70, 180], [70, 190], [70, 170], [76, 230], [68, 155], [71, 185], [72, 185], [75, 200], [75, 225], [75, 225], [75, 220], [68, 160], [74, 205], [78, 235], [71, 250], [73, 210], [76, 190], [74, 160], [74, 200], [79, 205], [75, 222], [73, 195], [76, 205], [74, 220], [74, 220], [73, 170], [72, 185], [74, 195], [73, 220], [74, 230], [72, 180], [73, 220], [69, 180], [72, 180], [73, 170], [75, 210], [75, 215], [73, 200], [72, 213], [72, 180], [76, 192], [74, 235], [72, 185], [77, 235], [74, 210], [77, 222], [75, 210], [76, 230], [80, 220], [74, 180], [74, 190], [75, 200], [78, 210], [73, 194], [73, 180], [74, 190], [75, 240], [76, 200], [71, 198], [73, 200], [74, 195], [76, 210], [76, 220], [74, 190], [73, 210], [74, 225], [70, 180], [72, 185], [73, 170], [73, 185], [73, 185], [73, 180], [71, 178], [74, 175], [74, 200], [72, 204], [74, 211], [71, 190], [74, 210], [73, 190], [75, 190], [75, 185], [79, 290], [73, 175], [75, 185], [76, 200], [74, 220], [76, 170], [78, 220], [74, 190], [76, 220], [72, 205], [74, 200], [76, 250],
[74, 225], [75, 215], [78, 210], [75, 215], [72, 195], [74, 200], [72, 194], [74, 220], [70, 180], [71, 180], [70, 170], [75, 195], [71, 180], [71, 170], [73, 206], [72, 205], [71, 200], [73, 225], [72, 201], [75, 225], [74, 233], [74, 180], [75, 225], [73, 180], [77, 220], [73, 180], [76, 237], [75, 215], [74, 190], [76, 235], [75, 190], [73, 180], [71, 165], [76, 195], [75, 200], [72, 190], [71, 190], [77, 185], [73, 185], [74, 205], [71, 190], [72, 205], [74, 206], [75, 220], [73, 208], [72, 170], [75, 195], [75, 210], [74, 190], [72, 211], [74, 230], [71, 170], [70, 185], [74, 185], [77, 241], [77, 225], [75, 210], [75, 175], [78, 230], [75, 200], [76, 215], [73, 198], [75, 226], [75, 278], [79, 215], [77, 230], [76, 240], [71, 184], [75, 219], [74, 170], [69, 218], [71, 190], [76, 225], [72, 220], [72, 176], [70, 190], [72, 197], [73, 204], [71, 167], [72, 180], [71, 195], [73, 220], [72, 215], [73, 185], [74, 190], [74, 205], [72, 205], [75, 200], [74, 210], [74, 215], [77, 200], [75, 205], [73, 211], [72, 190], [71, 208], [74, 200], [77, 210], [75, 232], [75, 230], [75, 210], [78, 220], [78, 210], [74, 202], [76, 212], [78, 225], [76, 170], [70, 190], [72, 200], [80, 237], [74, 220], [74, 170], [71, 193], [70, 190], [72, 150], [71, 220], [74, 200], [71, 190], [72, 185], [71, 185], [74, 200], [69, 172], [76, 220], [75, 225], [75, 190], [76, 195], [73, 219], [76, 190], [73, 197], [77, 200], [73, 195], [72, 210], [72, 177], [77, 220], [77, 235], [71, 180], [74, 195], [74, 195], [73, 190], [78, 230], [75, 190], [73, 200], [70, 190], [74, 190], [72, 200], [73, 200], [73, 184], [75, 200], [75, 180], [74, 219], [76, 187], [73, 200], [74, 220], [75, 205], [75, 190], [72, 170], [73, 160], [73, 215], [72, 175], [74, 205], [78, 200], [76, 214], [73, 200], [74, 190], [75, 180], [70, 205], [75, 220], [71, 190], [72, 215], [78, 235], [75, 191], [73, 200], [73, 181], [71, 200], [75, 210], [77, 240], [72, 185], [69, 165], [73, 190], [74, 185], [72, 175], [70, 155], [75, 210], [70, 170], [72, 175], [72, 220], [74, 210], [73, 205], [74, 200], [76, 205], [75, 195], [80, 240], [72, 150], [75, 200], [73, 215], [74, 202], [74, 200], [73, 190], [75, 205], [75, 190], [71, 160], [73, 215], [75, 185], [74, 200], [74, 190], [72, 210], [74, 185], [74, 220], [74, 190], [73, 202], [76, 205], [75, 220], [72, 175], [73, 160], [73, 190], [73, 200], [72, 229], [72, 206], [72, 220], [72, 180], [71, 195], [75, 175], [75, 188], [74, 230], [73, 190], [75, 200], [79, 190], [74, 219], [76, 235], [73, 180], [74, 180], [74, 180], [72, 200], [74, 234], [74, 185], [75, 220], [78, 223], [74, 200], [74, 210], [74, 200], [77, 210], [70, 190], [73, 177], [74, 227], [73, 180], [71, 195], [75, 199], [71, 175], [72, 185], [77, 240], [74, 210], [70, 180], [77, 194], [73, 225], [72, 180], [76, 205], [71, 193], [76, 230], [78, 230], [75, 220], [73, 200], [78, 249], [74, 190], [79, 208], [75, 245], [76, 250],
[72, 160], [75, 192], [75, 220], [70, 170], [72, 197], [70, 155], [74, 190], [71, 200], [76, 220], [73, 210], [76, 228], [71, 190], [69, 160], [72, 184], [72, 180], [69, 180], [73, 200], [69, 176], [73, 160], [74, 222], [74, 211], [72, 195], [71, 200], [72, 175], [72, 206], [76, 240], [76, 185], [76, 260], [74, 185], [76, 221], [75, 205], [71, 200], [72, 170], [71, 201], [73, 205], [75, 185], [76, 205], [75, 245], [71, 220], [75, 210], [74, 220], [72, 185], [73, 175], [73, 170], [73, 180], [73, 200], [76, 210], [72, 175], [76, 220], [73, 206], [73, 180], [73, 210], [75, 195], [75, 200], [77, 200], [73, 164], [72, 180], [75, 220], [70, 195], [74, 205], [72, 170], [80, 240], [71, 210], [71, 195], [74, 200], [74, 205], [73, 192], [75, 190], [76, 170], [73, 240], [77, 200], [72, 205], [73, 175], [77, 250], [76, 220], [71, 224], [75, 210], [73, 195], [74, 180], [77, 245], [71, 175], [72, 180], [73, 215], [69, 175], [73, 180], [70, 195], [74, 230], [76, 230], [73, 205], [73, 215], [75, 195], [73, 180], [79, 205], [74, 180], [73, 190], [74, 180], [77, 190], [75, 190], [74, 220], [73, 210], [77, 255], [73, 190], [77, 230], [74, 200], [74, 205], [73, 210], [77, 225], [74, 215], [77, 220], [75, 205], [77, 200], [75, 220], [71, 197], [74, 225], [70, 187], [79, 245], [72, 185], [72, 185], [70, 175], [74, 200], [74, 180], [72, 188], [73, 225], [72, 200], [74, 210], [74, 245], [76, 213], [82, 231], [74, 165], [74, 228], [70, 210], [73, 250], [73, 191], [74, 190], [77, 200], [72, 215], [76, 254], [73, 232], [73, 180], [72, 215], [74, 220], [74, 180], [71, 200], [72, 170], [75, 195], [74, 210], [74, 200], [77, 220], [70, 165], [71, 180], [73, 200], [76, 200], [71, 170], [75, 224], [74, 220], [72, 180], [76, 198], [79, 240], [76, 239], [73, 185], [76, 210], [78, 220], [75, 200], [76, 195], [72, 220], [72, 230], [73, 170], [73, 220], [75, 230], [71, 165], [76, 205], [70, 192], [75, 210], [74, 205], [75, 200], [73, 210], [71, 185], [71, 195], [72, 202], [73, 205], [73, 195], [72, 180], [69, 200], [73, 185], [78, 240], [71, 185], [73, 220], [75, 205], [76, 205], [70, 180], [74, 201], [77, 190], [75, 208], [79, 240], [72, 180], [77, 230], [73, 195], [75, 215], [75, 190], [75, 195], [73, 215], [73, 215], [76, 220], [77, 220], [75, 230], [70, 195], [71, 190], [71, 195], [75, 209], [74, 204], [69, 170], [70, 185], [75, 205], [72, 175], [75, 210], [73, 190], [72, 180], [72, 180], [72, 160], [76, 235], [75, 200], [74, 210], [69, 180], [73, 190], [72, 197], [72, 203], [75, 205], [77, 170], [76, 200], [80, 250], [77, 200], [76, 220], [79, 200], [71, 190], [75, 170], [73, 190], [76, 220], [77, 215], [73, 206], [76, 215], [70, 185], [75, 235], [73, 188], [75, 230], [70, 195], [69, 168], [71, 190], [72, 160], [72, 200], [73, 200], [70, 189], [70, 180], [73, 190], [76, 200], [75, 220], [72, 187], [73, 240], [79, 190], [71, 180], [72, 185], [74, 210], [74, 220], [74, 219], [72, 190], [76, 193], [76, 175], [72, 180], [72, 215], [71, 210], [72, 200], [72, 190], [70, 185], [77, 220], [74, 170], [72, 195], [76, 205], [71, 195], [76, 210], [71, 190], [73, 190], [70, 180], [73, 220], [73, 190], [72, 186], [71, 185], [71, 190], [71, 180], [72, 190], [72, 170], [74, 210], [74, 240], [74, 220], [71, 180], [72, 210], [75, 210], [72, 195], [71, 160], [72, 180], [72, 205], [72, 200], [72, 185], [74, 245], [74, 190], [77, 210], [75, 200], [73, 200], [75, 222], [73, 215], [76, 240], [72, 170], [77, 220], [75, 156], [72, 190], [71, 202], [71, 221], [75, 200], [72, 190], [73, 210], [73, 190], [71, 200], [70, 165], [75, 190], [71, 185], [76, 230], [73, 208], [68, 209], [71, 175], [72, 180], [74, 200], [77, 205], [72, 200], [76, 250], [78, 210], [81, 230], [72, 244], [73, 202], [76, 240], [72, 200], [72, 215], [74, 177], [76, 210], [73, 170], [76, 215], [75, 217], [70, 198], [71, 200], [74, 220], [72, 170], [73, 200], [76, 230], [76, 231], [73, 183], [71, 192], [68, 167], [71, 190], [71, 180], [74, 180], [77, 215], [69, 160], [72, 205], [76, 223], [75, 175], [76, 170], [75, 190], [76, 240], [72, 175], [74, 230], [76, 223], [74, 196], [72, 167], [75, 195], [78, 190], [77, 250], [70, 190], [72, 190], [79, 190], [74, 170], [71, 160], [68, 150], [77, 225], [75, 220], [71, 209], [72, 210], [70, 176], [72, 260], [72, 195], [73, 190], [72, 184], [74, 180], [72, 195], [72, 195], [75, 219], [72, 225], [73, 212], [74, 202], [72, 185], [78, 200], [75, 209], [72, 200], [74, 195], [75, 228], [75, 210], [76, 190], [74, 212], [74, 190], [73, 218], [74, 220], [71, 190], [74, 235], [75, 210], [76, 200], [74, 188], [76, 210], [76, 235], [73, 188], [75, 215], [75, 216], [74, 220], [68, 180], [72, 185], [75, 200], [71, 210], [70, 220], [72, 185], [73, 231], [72, 210], [75, 195], [74, 200], [70, 205], [76, 200], [71, 190], [82, 250], [72, 185], [73, 180], [74, 170], [71, 180], [75, 208], [77, 235], [72, 215], [74, 244], [72, 220], [73, 185], [78, 230], [77, 190], [73, 200], [73, 180], [73, 190], [73, 196],
[73, 180], [76, 230], [75, 224], [70, 160], [73, 178], [72, 205], [73, 185], [75, 210], [74, 180], [73, 190], [73, 200], [76, 257], [73, 190], [75, 220], [70, 165], [77, 205], [72, 200], [77, 208], [74, 185], [75, 215], [75, 170], [75, 235], [75, 210], [72, 170],
[74, 180], [71, 170], [76, 190], [71, 150], [75, 230], [76, 203], [83, 260], [75, 246], [74, 186], [76, 210],
[72, 198], [72, 210], [75, 215], [75, 180], [72, 200], [77, 245], [73, 200], [72, 192], [70, 192], [74, 200], [72, 192],
[74, 205], [72, 190], [71, 186], [70, 170], [71, 197], [76, 219], [74, 200], [76, 220], [74, 207], [74, 225], [74, 207],
[75, 212], [75, 225], [71, 170], [71, 190], [74, 210], [77, 230], [71, 210], [74, 200], [75, 238], [77, 234], [76, 222],
[74, 200], [76, 190], [72, 170], [71, 220], [72, 223], [75, 210], [73, 215], [68, 196], [72, 175], [69, 175], [73, 189],
[73, 205], [75, 210], [70, 180], [70, 180], [74, 197], [75, 220], [74, 228], [74, 190], [73, 204], [74, 165], [75, 216],
[77, 220], [73, 208], [74, 210], [76, 215], [74, 195], [75, 200], [73, 215], [76, 229], [78, 240], [75, 207], [73, 205],
[77, 208], [74, 185], [72, 190], [74, 170], [72, 208], [71, 225], [73, 190], [75, 225], [73, 185], [67, 180], [67, 165],
[76, 240], [74, 220], [73, 212], [70, 163], [75, 215], [70, 175], [72, 205], [77, 210], [79, 205], [78, 208], [74, 215],
[75, 180], [75, 200], [78, 230], [76, 211], [75, 230], [69, 190], [75, 220], [72, 180], [75, 205], [73, 190], [74, 180],
[75, 205], [75, 190], [73, 195]]
weight_np=np.array(weight)
#print(type(weight_np))
#print(weight_np)
light=weight_np < 21
lowweight=weight_np[light]
print(lowweight)
np_baseball=np.array(baseball)
print(np_baseball.shape)
#Basic Operations on numpy arrays
#
#Statistical Operations on numpy arrays
#
# np_baseball is available
# Print mean height (first column)
avg = np.mean(np_baseball[:,0])
print("Average: " + str(avg))
# Print median height. Replace 'None'
med = np.median(np_baseball[:,0])
print("Median: " + str(med))
# Print out the standard deviation on height. Replace 'None'
stddev = np.std(np_baseball[:,0])
print("Standard Deviation: " + str(stddev))
# Print out correlation between first and second column. Replace 'None'
corr = np.corrcoef(np_baseball[:,0],np_baseball[:,1])
print("Correlation: " + str(corr))
| 177.849315
| 4,931
| 0.484942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 477
| 0.03674
|
679f8e5b12103c54dd655de826901d7a4752b208
| 11,818
|
py
|
Python
|
sysinv/sysinv/sysinv/sysinv/puppet/nfv.py
|
MarioCarrilloA/config
|
06a6f142d154970ce658e979822cd84ce447f612
|
[
"Apache-2.0"
] | null | null | null |
sysinv/sysinv/sysinv/sysinv/puppet/nfv.py
|
MarioCarrilloA/config
|
06a6f142d154970ce658e979822cd84ce447f612
|
[
"Apache-2.0"
] | null | null | null |
sysinv/sysinv/sysinv/sysinv/puppet/nfv.py
|
MarioCarrilloA/config
|
06a6f142d154970ce658e979822cd84ce447f612
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2017-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import utils
from sysinv.helm import helm
from sysinv.puppet import openstack
class NfvPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for vim configuration"""
SERVICE_NAME = 'vim'
SERVICE_PORT = 4545
PLATFORM_KEYRING_SERVICE = 'CGCS'
def get_secure_static_config(self):
kspass = self._get_service_password(self.SERVICE_NAME)
return {
'nfv::keystone::auth::password': kspass,
}
def get_system_config(self):
system = self._get_system()
if system.system_mode == constants.SYSTEM_MODE_SIMPLEX:
single_hypervisor = True
single_controller = True
else:
single_hypervisor = False
single_controller = False
config = {
'nfv::keystone::auth::public_url': self.get_public_url(),
'nfv::keystone::auth::internal_url': self.get_internal_url(),
'nfv::keystone::auth::admin_url': self.get_admin_url(),
'nfv::keystone::auth::auth_name':
self._get_service_user_name(self.SERVICE_NAME),
'nfv::keystone::auth::region':
self._get_service_region_name(self.SERVICE_NAME),
'nfv::keystone::auth::tenant': self._get_service_tenant_name(),
'nfv::nfvi::host_listener_host':
self._get_management_address(),
'nfv::nfvi::platform_username':
self._operator.keystone.get_admin_user_name(),
'nfv::nfvi::platform_tenant':
self._operator.keystone.get_admin_project_name(),
'nfv::nfvi::platform_auth_host':
self._keystone_auth_address(),
'nfv::nfvi::platform_user_domain':
self._operator.keystone.get_admin_user_domain(),
'nfv::nfvi::platform_project_domain':
self._operator.keystone.get_admin_project_domain(),
'nfv::nfvi::platform_keyring_service':
self.PLATFORM_KEYRING_SERVICE,
'nfv::nfvi::keystone_region_name': self._keystone_region_name(),
'nfv::nfvi::keystone_service_name':
self._operator.keystone.get_service_name(),
'nfv::nfvi::keystone_service_type':
self._operator.keystone.get_service_type(),
'nfv::nfvi::sysinv_region_name':
self._operator.sysinv.get_region_name(),
'nfv::nfvi::patching_region_name':
self._operator.patching.get_region_name(),
'nfv::nfvi::fm_region_name':
self._operator.fm.get_region_name(),
'nfv::vim::vim_api_ip': self._get_management_address(),
'nfv::vim::vim_webserver_ip': self._get_oam_address(),
'nfv::vim::instance_single_hypervisor': single_hypervisor,
'nfv::vim::sw_mgmt_single_controller': single_controller,
# This flag is used to disable raising alarm to containerized fm
# and will be removed in future.
'nfv::alarm::fault_management_pod_disabled': True,
'nfv::event_log::fault_management_pod_disabled': True,
'nfv::vim::fault_management_pod_disabled': True,
'platform::nfv::params::service_create':
self._to_create_services(),
}
if utils.is_openstack_applied(self.dbapi):
helm_data = helm.HelmOperatorData(self.dbapi)
# The openstack services are authenticated with pod based
# keystone.
keystone_auth_data = helm_data.get_keystone_auth_data()
openstack_auth_config = {
'nfv::nfvi::openstack_username':
keystone_auth_data['admin_user_name'],
'nfv::nfvi::openstack_tenant':
keystone_auth_data['admin_project_name'],
'nfv::nfvi::openstack_auth_host':
keystone_auth_data['auth_host'],
'nfv::nfvi::openstack_user_domain':
keystone_auth_data['admin_user_domain'],
'nfv::nfvi::openstack_project_domain':
keystone_auth_data['admin_project_domain'],
'nfv::nfvi::openstack_keyring_service':
self.PLATFORM_KEYRING_SERVICE,
'nfv::alarm::openstack_username':
keystone_auth_data['admin_user_name'],
'nfv::alarm::openstack_tenant':
keystone_auth_data['admin_project_name'],
'nfv::alarm::openstack_auth_host':
keystone_auth_data['auth_host'],
'nfv::alarm::openstack_user_domain':
keystone_auth_data['admin_user_domain'],
'nfv::alarm::openstack_project_domain':
keystone_auth_data['admin_project_domain'],
'nfv::alarm::openstack_keyring_service':
self.PLATFORM_KEYRING_SERVICE,
'nfv::event_log::openstack_username':
keystone_auth_data['admin_user_name'],
'nfv::event_log::openstack_tenant':
keystone_auth_data['admin_project_name'],
'nfv::event_log::openstack_auth_host':
keystone_auth_data['auth_host'],
'nfv::event_log::openstack_user_domain':
keystone_auth_data['admin_user_domain'],
'nfv::event_log::openstack_project_domain':
keystone_auth_data['admin_project_domain'],
'nfv::event_log::openstack_keyring_service':
self.PLATFORM_KEYRING_SERVICE,
}
config.update(openstack_auth_config)
# Nova is running in a pod
nova_endpoint_data = helm_data.get_nova_endpoint_data()
nova_config = {
'nfv::nfvi::nova_endpoint_override':
nova_endpoint_data['endpoint_override'],
'nfv::nfvi::nova_region_name':
nova_endpoint_data['region_name'],
}
config.update(nova_config)
# Cinder is running in a pod
cinder_endpoint_data = helm_data.get_cinder_endpoint_data()
cinder_config = {
'nfv::nfvi::cinder_region_name':
cinder_endpoint_data['region_name'],
'nfv::nfvi::cinder_service_name':
cinder_endpoint_data['service_name'],
'nfv::nfvi::cinder_service_type':
cinder_endpoint_data['service_type'],
}
config.update(cinder_config)
# Glance is running in a pod
glance_endpoint_data = helm_data.get_glance_endpoint_data()
glance_config = {
'nfv::nfvi::glance_region_name':
glance_endpoint_data['region_name'],
'nfv::nfvi::glance_service_name':
glance_endpoint_data['service_name'],
'nfv::nfvi::glance_service_type':
glance_endpoint_data['service_type'],
}
config.update(glance_config)
# Neutron is running in a pod
neutron_endpoint_data = helm_data.get_neutron_endpoint_data()
neutron_config = {
'nfv::nfvi::neutron_region_name':
neutron_endpoint_data['region_name'],
}
config.update(neutron_config)
# Heat is running in a pod
heat_endpoint_data = helm_data.get_heat_endpoint_data()
heat_config = {
'nfv::nfvi::heat_region_name':
heat_endpoint_data['region_name'],
}
config.update(heat_config)
# Ceilometer is running in a pod
ceilometer_endpoint_data = \
helm_data.get_ceilometer_endpoint_data()
ceilometer_config = {
'nfv::nfvi::ceilometer_region_name':
ceilometer_endpoint_data['region_name'],
}
config.update(ceilometer_config)
# The openstack rabbitmq is running in a pod
nova_oslo_messaging_data = \
helm_data.get_nova_oslo_messaging_data()
rabbit_config = {
'nfv::nfvi::rabbit_host':
nova_oslo_messaging_data['host'],
'nfv::nfvi::rabbit_port':
nova_oslo_messaging_data['port'],
'nfv::nfvi::rabbit_virtual_host':
nova_oslo_messaging_data['virt_host'],
'nfv::nfvi::rabbit_userid':
nova_oslo_messaging_data['username'],
'nfv::nfvi::rabbit_password':
nova_oslo_messaging_data['password'],
}
config.update(rabbit_config)
# Listen to nova api proxy on management address
nova_api_proxy_config = {
'nfv::nfvi::compute_rest_api_host':
self._get_management_address(),
}
config.update(nova_api_proxy_config)
else:
# The openstack auth info is still required as the VIM will
# audit some keystone entities (e.g. tenants). Point it to
# the platform keystone.
openstack_auth_config = {
'nfv::nfvi::openstack_username':
self._operator.keystone.get_admin_user_name(),
'nfv::nfvi::openstack_tenant':
self._operator.keystone.get_admin_project_name(),
'nfv::nfvi::openstack_auth_host':
self._keystone_auth_address(),
'nfv::nfvi::openstack_user_domain':
self._operator.keystone.get_admin_user_domain(),
'nfv::nfvi::openstack_project_domain':
self._operator.keystone.get_admin_project_domain(),
'nfv::nfvi::openstack_keyring_service':
self.PLATFORM_KEYRING_SERVICE,
}
config.update(openstack_auth_config)
vim_disabled = {
# Disable VIM plugins for resources not yet active.
'nfv::vim::block_storage_plugin_disabled': True,
'nfv::vim::compute_plugin_disabled': True,
'nfv::vim::network_plugin_disabled': True,
'nfv::vim::image_plugin_disabled': True,
'nfv::vim::guest_plugin_disabled': True,
'nfv::vim::fault_mgmt_plugin_disabled': True,
'nfv::nfvi::nova_endpoint_disabled': True,
'nfv::nfvi::neutron_endpoint_disabled': True,
'nfv::nfvi::cinder_endpoint_disabled': True,
'nfv::alarm::fault_mgmt_endpoint_disabled': True,
'nfv::event_log::fault_mgmt_endpoint_disabled': True,
}
config.update(vim_disabled)
return config
def get_host_config(self, host):
if (constants.CONTROLLER not in utils.get_personalities(host)):
return {}
database_dir = "/opt/platform/nfv/vim/%s" % host.software_load
return {
'nfv::vim::database_dir': database_dir,
}
def get_public_url(self):
return self._format_public_endpoint(self.SERVICE_PORT)
def get_internal_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)
def get_admin_url(self):
return self._format_private_endpoint(self.SERVICE_PORT)
def _get_nova_endpoint_url(self):
return self._format_private_endpoint(
self._operator.nova.SERVICE_API_PORT)
| 42.358423
| 76
| 0.582924
| 11,585
| 0.980284
| 0
| 0
| 0
| 0
| 0
| 0
| 4,002
| 0.338636
|
679fc8ee35fed0b83bbf337e8c352e97186a807c
| 1,151
|
py
|
Python
|
qualif16/timeline.py
|
valenca/hashcode16
|
ac47b6f480a9c2ce78446aa3510178cc32f26ea5
|
[
"WTFPL"
] | 1
|
2016-02-08T17:23:18.000Z
|
2016-02-08T17:23:18.000Z
|
qualif16/timeline.py
|
valenca/hashcode16
|
ac47b6f480a9c2ce78446aa3510178cc32f26ea5
|
[
"WTFPL"
] | null | null | null |
qualif16/timeline.py
|
valenca/hashcode16
|
ac47b6f480a9c2ce78446aa3510178cc32f26ea5
|
[
"WTFPL"
] | null | null | null |
from data import *
from heapq import *
class Timeline:
def __init__(self):
self.events=[]
def addEvent(self, event):
heappush(self.events, event)
def nextEvent(self):
assert(self.events != [])
return heappop(self.events)
def nextEvents(self):
if self.events == []:
return []
cur_time = self.events[0].time
res = []
while self.events != [] and self.events[0].time == cur_time:
res.append( heappop(self.events) )
return res
def isEmpty(self):
return self.events == []
class Event:
def __init__(self,d,t,a):
self.time=t
self.drone=d
self.action=a
def __str__(self):
return "[%d] Drone at (%d,%d) - %s" % (self.time,self.drone.x,self.drone.y,self.action)
def __repr__(self):
return self.__str__()
def __cmp__(self, other):
return cmp(self.time, other.time)
if __name__ == '__main__':
q=Timeline()
d = Drone(0,0,100)
q.addEvent(Event(d,0,"load"))
q.addEvent(Event(d,0,"load"))
q.addEvent(Event(d,0,"load"))
q.addEvent(Event(d,1,"load"))
q.addEvent(Event(d,1,"load"))
q.addEvent(Event(d,2,"load"))
q.addEvent(Event(d,2,"load"))
while not q.isEmpty():
print q.nextEvents()
print ""
| 19.508475
| 89
| 0.652476
| 765
| 0.664639
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.071242
|
67a10756dbb9e4be6d237dca1eb33c024676daf2
| 5,394
|
py
|
Python
|
cfgov/v1/util/migrations.py
|
hkeeler/cfgov-refresh
|
33977186a8e9cb972e63cc22baa357d381316aec
|
[
"CC0-1.0"
] | null | null | null |
cfgov/v1/util/migrations.py
|
hkeeler/cfgov-refresh
|
33977186a8e9cb972e63cc22baa357d381316aec
|
[
"CC0-1.0"
] | null | null | null |
cfgov/v1/util/migrations.py
|
hkeeler/cfgov-refresh
|
33977186a8e9cb972e63cc22baa357d381316aec
|
[
"CC0-1.0"
] | null | null | null |
import json
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from treebeard.mp_tree import MP_Node
try:
from wagtail.core.blocks import StreamValue
except ImportError: # pragma: no cover; fallback for Wagtail < 2.0
from wagtail.wagtailcore.blocks import StreamValue
def get_page(page_cls, slug):
return page_cls.objects.get(slug=slug)
def get_free_path(apps, parent_page):
offset = 1
base_page_cls = apps.get_model('wagtailcore', 'Page')
while True:
path = MP_Node._get_path(
parent_page.path,
parent_page.depth + 1,
parent_page.numchild + offset
)
try:
base_page_cls.objects.get(path=path)
except base_page_cls.DoesNotExist:
return path
offset += 1
@transaction.atomic
def get_or_create_page(apps, page_cls_app, page_cls_name, title, slug,
parent_page, live=False, **kwargs):
page_cls = apps.get_model(page_cls_app, page_cls_name)
try:
return get_page(page_cls, slug)
except ObjectDoesNotExist:
pass
ContentType = apps.get_model('contenttypes.ContentType')
page_content_type = ContentType.objects.get_for_model(page_cls)
parent_page = get_page(parent_page.specific_class, parent_page.slug)
page = page_cls.objects.create(
title=title,
slug=slug,
depth=parent_page.depth + 1,
path=get_free_path(apps, parent_page),
content_type=page_content_type,
live=live,
**kwargs
)
parent_page.numchild += 1
parent_page.save()
return page
def is_page(page_or_revision):
""" Return True if the page_or_revision is a Page object """
return not hasattr(page_or_revision, 'content_json')
def get_stream_data(page_or_revision, field_name):
""" Get the stream field data for a given field name on a page or a
revision """
if is_page(page_or_revision):
field = getattr(page_or_revision, field_name)
return field.stream_data
else:
revision_content = json.loads(page_or_revision.content_json)
field = revision_content.get(field_name, "[]")
return json.loads(field)
def set_stream_data(page_or_revision, field_name, stream_data, commit=True):
""" Set the stream field data for a given field name on a page or a
revision. If commit is True (default) save() is called on the
page_or_revision object. """
if is_page(page_or_revision):
field = getattr(page_or_revision, field_name)
stream_block = field.stream_block
stream_value = StreamValue(stream_block, stream_data, is_lazy=True)
setattr(page_or_revision, field_name, stream_value)
else:
revision_content = json.loads(page_or_revision.content_json)
revision_content[field_name] = json.dumps(stream_data)
page_or_revision.content_json = json.dumps(revision_content)
if commit:
page_or_revision.save()
def migrate_stream_data(page_or_revision, block_path, stream_data, mapper):
""" Recursively run the mapper on fields of block_type in stream_data """
migrated = False
if isinstance(block_path, str):
block_path = [block_path, ]
if len(block_path) == 0:
return stream_data, False
# Separate out the current block name from its child paths
block_name = block_path[0]
child_block_path = block_path[1:]
for field in stream_data:
if field['type'] == block_name:
if len(child_block_path) == 0:
value = mapper(page_or_revision, field['value'])
field_migrated = True
else:
value, field_migrated = migrate_stream_data(
page_or_revision, child_block_path, field['value'], mapper
)
if field_migrated:
field['value'] = value
migrated = True
return stream_data, migrated
def migrate_stream_field(page_or_revision, field_name, block_path, mapper):
""" Run mapper on blocks within a StreamField on a page or revision. """
stream_data = get_stream_data(page_or_revision, field_name)
stream_data, migrated = migrate_stream_data(
page_or_revision, block_path, stream_data, mapper
)
if migrated:
set_stream_data(page_or_revision, field_name, stream_data)
@transaction.atomic
def migrate_page_types_and_fields(apps, page_types_and_fields, mapper):
""" Migrate Wagtail StreamFields using the given mapper function.
page_types_and_fields should be a list of 4-tuples
providing ('app', 'PageType', 'field_name', ('block_path', )).
'field_name' is the field on the 'PageType' model.
'block path' is a tuple containing block names to access the
StreamBlock type to migrate."""
for app, page_type, field_name, block_path in page_types_and_fields:
page_model = apps.get_model(app, page_type)
revision_model = apps.get_model('wagtailcore.PageRevision')
for page in page_model.objects.all():
migrate_stream_field(page, field_name, block_path, mapper)
revisions = revision_model.objects.filter(
page=page).order_by('-id')
for revision in revisions:
migrate_stream_field(revision, field_name, block_path, mapper)
| 32.299401
| 78
| 0.678532
| 0
| 0
| 0
| 0
| 1,808
| 0.335187
| 0
| 0
| 1,043
| 0.193363
|
67a10b0fb92da7a2ec247253549979648e850cef
| 8,436
|
py
|
Python
|
source/codes.py
|
Very1Fake/monitor
|
bb47352cffebd8b99bafac0a342324b042b3d826
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
source/codes.py
|
Very1Fake/monitor
|
bb47352cffebd8b99bafac0a342324b042b3d826
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
source/codes.py
|
Very1Fake/monitor
|
bb47352cffebd8b99bafac0a342324b042b3d826
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from typing import Dict
_codes: Dict[int, str] = {
# Debug (1xxxx)
# System (100xx)
10000: 'Test debug',
# Pipe (103xx)
10301: 'Reindexing parser',
# Resolver (109xx)
10901: 'Executing catalog',
10902: 'Executing target',
10903: 'Catalog executed',
10904: 'Target executed',
# SubProvider (113xx)
11301: 'Common exception while sending request',
# Information (2xxxx)
# System (200xx)
20000: 'Test information',
20001: 'Thread started',
20002: 'Thread paused',
20003: 'Thread resumed',
20004: 'Thread closing',
20005: 'Thread closed',
# Core (201xx)
20101: 'Production mode enabled',
20102: 'Signal Interrupt',
20103: 'Turning off',
20104: 'Saving success hashes started',
20105: 'Saving success hashes complete',
20106: 'Offline',
# ThreadManager (202xx)
20201: 'Pipe initialized',
20202: 'Pipe started',
20203: 'Worker initialized',
20204: 'Worker started',
20205: 'CatalogWorker initialized',
20206: 'CatalogWorker started',
# Pipe (203xx)
20301: 'Reindexing parsers started',
20302: 'Reindexing parsers complete',
20303: 'Parser reindexing complete',
# ScriptManager (205xx)
20501: 'Script loaded',
20502: 'Script unloaded',
20503: 'Script reloaded',
20504: 'Loading all indexed scripts',
20505: 'Loading all indexed scripts complete',
20506: 'Unloading all scripts',
20507: 'Unloading all scripts complete',
20508: 'Reloading all scripts',
20509: 'Reloading all scripts complete',
# ScriptIndex (206xx)
20601: 'Config loaded',
20602: 'Config dumped',
20603: 'Config does not loaded (must be dict)',
20604: 'Skipping script (config not detected)',
20605: 'Skipping script (bad config)',
20606: 'Skipping script (script incompatible with core)',
20607: 'Skipping script (script in blacklist)',
20608: 'Skipping script (script with this name is already indexed)',
20609: 'N script(s) indexed',
20610: 'Skipping config (script not in whitelist)',
# EventHandler (207xx)
20701: 'Starting loop',
20702: 'Loop started',
20703: 'Stopping loop',
20704: 'Loop stopped',
# Logger (208xx)
20801: 'Log level changed',
20802: 'Log mode changed',
20803: 'Time changed to UTC',
20804: 'Time changed to local',
# Resolver (209xx)
20901: 'Successful target execution',
20902: 'Catalog updated',
# Commands (211xx)
21101: 'Command executing',
21102: 'Command executed',
21103: 'Command execute',
# Provider (212xx)
21201: 'Proxies dumped',
21202: 'Checking proxy',
21203: 'Checking proxy (OK)',
# Keywords (215xx)
21501: 'Dumping keywords(started)',
21502: 'Dumping keywords(complete)',
21503: 'Clearing keywords(started)',
21504: 'Clearing keywords(complete)',
21505: 'Syncing keywords(started)',
21506: 'Syncing keywords(complete)',
21507: 'Loading keywords(started)',
21508: 'Loading keywords(complete)',
# Warning (3xxxx)
# System (300xx)
30000: 'Test warning',
# ThreadManager (302xx)
30201: 'Pipe was stopped',
30202: 'Worker was stopped',
30203: 'CatalogWorker was stopped',
30204: 'Lock forced released',
# Pipe (303xx)
30301: 'Parser reindexing failed',
30302: 'Catalog lost while sending (queue full)',
30303: 'Target lost while sending (queue full)',
# ScriptManager (305xx)
30501: 'Module not loaded',
30502: 'Nothing to import in script',
30503: 'Script cannot be unloaded (_unload)',
30504: 'Script cannot be unloaded (_reload)',
30505: 'Script not indexed but still loaded',
30506: 'Script already loaded',
30507: 'Max errors for script reached, unloading',
# EventHandler (307xx)
30701: 'Loop already started',
30702: 'Loop already stopped',
# Logger (308xx)
30801: 'Meaningless level change (changing to the same value)',
30802: 'Meaningless mode change (changing to the same value)',
30803: 'Meaningless time change (changing to the same value)',
# Resolver (309xx)
30901: 'Catalog lost while retrieving (script not loaded)',
30902: 'Catalog lost while retrieving (script has no Parser)',
30903: 'Target lost while retrieving (script not loaded)',
30904: 'Target lost while retrieving (script has no Parser)',
30905: 'Catalog lost while executing (script unloaded)',
30906: 'Catalog lost while executing (script has no parser)',
30907: 'Catalog lost while executing (bad result)',
30908: 'Target lost while executing (script unloaded)',
30909: 'Target lost while executing (script has no parser)',
30910: 'Target lost while executing (bad result)',
30911: 'Smart catalog expired',
30912: 'Smart target expired',
# Provider (312xx)
31201: 'Proxy added',
31202: 'Proxy removed',
31203: 'Proxies list changed',
31204: 'Proxies statistics reset',
31205: 'Proxies list cleared',
# Keywords (315xx)
31501: 'Keywords file not found',
31511: 'Absolute keyword not loaded (TypeError)',
31512: 'Absolute keyword not loaded (UniquenessError)',
31521: 'Positive keyword not loaded (TypeError)',
31522: 'Positive keyword not loaded (UniquenessError)',
31531: 'Negative keyword not loaded (TypeError)',
31532: 'Negative keyword not loaded (UniquenessError)',
# Error (4xxxx)
# System (400xx)
40000: 'Unknown error',
# ThreadManager (402xx)
40201: 'Pipe was unexpectedly stopped',
40202: 'Worker was unexpectedly stopped',
40203: 'CatalogWorker was unexpectedly stopped',
# Pipe (403xx)
40301: 'Wrong catalog received from script',
# Worker (404xx)
40401: 'Unknown status received while executing',
40402: 'Parser execution failed',
40403: 'Target lost in pipeline (script unloaded)',
# ScriptsManager (405xx)
40501: 'Can\'t load script (ImportError)',
40502: 'Can\'t load script (script not indexed)',
40503: 'Can\'t unload script (script isn\'t loaded)',
40504: 'Can\'t reload script (script isn\'t loaded)',
40505: 'Script cannot be reloaded (folder not found)',
40506: 'Script cannot be reloaded (script not in index)',
# EventHandler (407xx)
40701: 'Event execution failed',
# Logger (408xx)
40801: 'Can\'t change level (possible values (0, 1, 2, 3, 4, 5))',
40802: 'Can\'t change mode (possible values (0, 1, 2, 3))',
# Resolver (409xx)
40901: 'Unknown index type (while inserting)',
40902: 'Unknown target type (while inserting)',
40903: 'Catalog execution failed',
40904: 'Target execution failed',
# Provider (412xx)
41201: 'Bad proxy',
41202: 'Checking proxy (FAILED)',
# SubProvider (413xx)
41301: 'Severe exception while sending request',
# Keywords (415xx)
41501: 'Loading keywords (Failed)',
# Fatal (5xxxx)
# System (500xx)
50000: 'Test fatal',
# Core (501xx)
50101: 'ThreadManager unexpectedly has turned off',
# ThreadManager (502xx)
50201: 'Exception raised, emergency stop initiated',
# Pipe (503xx)
50301: 'Unexpectedly has turned off',
# Worker (504xx)
50401: 'Unexpectedly has turned off',
# CatalogWorker (510xx)
51001: 'Unexpectedly has turned off',
# RemoteThread (514xx)
51401: 'Unknown fatal error'
}
class CodeError(Exception):
pass
class Code:
__slots__ = ('code', 'title', 'message')
code: int
title: str
message: str
def __init__(self, code: int, message: str = ''):
if isinstance(code, int) and len(str(code)) == 5:
self.code = code
if code in _codes:
self.title = _codes[code]
else:
raise CodeError('Code does not exist')
else:
raise CodeError('Code must be int in range (10000 - 65535)')
self.message = message
def __str__(self) -> str:
return self.format()
def __repr__(self) -> str:
return f'Code({self.code}, {self.title})'
def format(self, mode: int = 1) -> str:
if mode == 1 and self.message:
return f'C{self.code}: {self.message}'
elif mode == 2:
return f'C{self.code} {self.title}' + (f': {self.message}' if self.message else '')
else:
return f'C{self.code}'
| 30.345324
| 95
| 0.641062
| 990
| 0.117354
| 0
| 0
| 0
| 0
| 0
| 0
| 5,411
| 0.641418
|
67a1409839afbcce2cc6a08bb9dc1126a5b4df90
| 937
|
py
|
Python
|
Stack.py
|
jdegene/ArcGIS-scripts
|
8821adc32b89525039591db83c762083a4ef750f
|
[
"MIT"
] | null | null | null |
Stack.py
|
jdegene/ArcGIS-scripts
|
8821adc32b89525039591db83c762083a4ef750f
|
[
"MIT"
] | null | null | null |
Stack.py
|
jdegene/ArcGIS-scripts
|
8821adc32b89525039591db83c762083a4ef750f
|
[
"MIT"
] | null | null | null |
# Erstellt aus vielen TIFF Datei eine stacked Datei mit dem ArcGIS
# Tool composite bands
import arcpy
import os
arcpy.env.overwriteOutput = True # Ueberschreiben fuer ArcGIS aktivieren
arcpy.env.pyramid = "NONE" # Verhindert dass Pyramiden berechnet werden
arcpy.env.rasterStatistics = "NONE" # Verhindert dass Statistiken berechnet werden
inFol = "D:/Test/NDVI_tif/"
outFol = "D:/Test/NDVI_file/"
month = ("jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec")
half = ("a", "b")
datList = ""
for i in range(1981,2013):
iStr = str(i)[2:4]
for ii in month:
for iii in half:
datName = inFol + "geo" + iStr + ii + "15" + iii + ".tif"
datList = datList + ";" + datName
datList = datList [1:] #Da sonst datList mit einem ; beginnt
arcpy.CompositeBands_management(datList, outFol + "NDVIstack.tif")
#compRas.save(outFol + "NDVIstack.tif")
| 28.393939
| 92
| 0.640342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 445
| 0.47492
|
67a2a922aab66937ea10eabfea17b426aac61814
| 2,106
|
py
|
Python
|
tests/test_frozenordereddict.py
|
tirkarthi/frozenordereddict
|
8837a7e2b55cf8531793b0ec5ad40d56c500ec0f
|
[
"MIT"
] | 2
|
2016-01-14T18:03:42.000Z
|
2020-11-03T22:13:03.000Z
|
tests/test_frozenordereddict.py
|
tirkarthi/frozenordereddict
|
8837a7e2b55cf8531793b0ec5ad40d56c500ec0f
|
[
"MIT"
] | 4
|
2017-10-24T06:03:24.000Z
|
2020-11-03T22:23:06.000Z
|
tests/test_frozenordereddict.py
|
tirkarthi/frozenordereddict
|
8837a7e2b55cf8531793b0ec5ad40d56c500ec0f
|
[
"MIT"
] | 6
|
2015-12-02T11:34:33.000Z
|
2021-11-04T04:31:11.000Z
|
from collections import OrderedDict
from unittest import TestCase
from frozenordereddict import FrozenOrderedDict
class TestFrozenOrderedDict(TestCase):
ITEMS_1 = (
("b", 2),
("a", 1),
)
ITEMS_2 = (
("d", 4),
("c", 3),
)
ODICT_1 = OrderedDict(ITEMS_1)
ODICT_2 = OrderedDict(ITEMS_2)
def test_init_from_items(self):
fod = FrozenOrderedDict(self.ITEMS_1)
self.assertEqual(list(self.ITEMS_1), list(fod.items()))
def test_init_from_ordereddict(self):
fod = FrozenOrderedDict(self.ODICT_1)
self.assertEqual(list(self.ITEMS_1), list(fod.items()))
def test_setitem(self):
def doit():
fod = FrozenOrderedDict()
fod[1] = "b"
self.assertRaises(TypeError, doit)
def test_delitem(self):
def doit():
fod = FrozenOrderedDict(self.ITEMS_1)
del fod[1]
self.assertRaises(TypeError, doit)
def test_copy_no_items(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy()
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(fod1.items(), fod2.items())
self.assertEqual(repr(fod1), repr(fod2))
self.assertEqual(len(fod1), len(fod2))
self.assertEqual(hash(fod1), hash(fod2))
def test_copy_tuple_items(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy(self.ITEMS_2)
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(list(fod1.items()) + list(self.ITEMS_2), list(fod2.items()))
def test_copy_ordereddict_items(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy(self.ODICT_2)
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(list(fod1.items()) + list(self.ITEMS_2), list(fod2.items()))
def test_copy_kwargs(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy(**self.ODICT_2)
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(dict(list(fod1.items()) + list(self.ODICT_2.items())), fod2)
| 27.350649
| 85
| 0.625831
| 1,983
| 0.941595
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.007123
|
67a4b479d6f75f2f17d3b85691a149733addfde8
| 7,560
|
py
|
Python
|
tests/test_data_gateway/test_dummy_serial.py
|
aerosense-ai/data-gateway
|
019b8e4a114e16d363a3167171a457cefdbf004f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_data_gateway/test_dummy_serial.py
|
aerosense-ai/data-gateway
|
019b8e4a114e16d363a3167171a457cefdbf004f
|
[
"Apache-2.0"
] | 34
|
2021-12-20T14:51:57.000Z
|
2022-03-30T16:47:04.000Z
|
tests/test_data_gateway/test_dummy_serial.py
|
aerosense-ai/data-gateway
|
019b8e4a114e16d363a3167171a457cefdbf004f
|
[
"Apache-2.0"
] | null | null | null |
import random
import unittest
from serial.serialutil import SerialException
from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string
from tests.base import BaseTestCase
class DummySerialTest(BaseTestCase):
def setUp(self):
"""Set up the test environment:
1. Create a random serial port name.
2. Create a random baud rate.
"""
self.random_serial_port = random_string()
self.random_baudrate = random_string(5, constants.NUMBERS)
def test_write_closed_port(self):
"""Tests writing-to a closed DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
ds_instance.close()
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
with self.assertRaises(SerialException):
ds_instance.write(rand_write_str1)
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
def test_write_and_read_to_closed_port(self):
"""Tests writing-to and reading-from a closed DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
ds_instance.write(rand_write_str1)
ds_instance.close()
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
with self.assertRaises(SerialException):
ds_instance.read(rand_write_len2)
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
def test_repr_port(self):
"""Tests describing a DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(self.random_serial_port in str(ds_instance))
def test_open_port(self):
"""Tests opening an already-open DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
with self.assertRaises(SerialException):
ds_instance.open()
ds_instance.close()
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
ds_instance.open()
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
def test_close(self):
"""Tests closing a DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
self.assertFalse(ds_instance.close())
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
def test_write_and_read_no_data_present(self): # pylint: disable=C0103
"""Tests writing and reading with an unspecified response."""
rand_write_len1 = random.randint(256, 1024)
rand_read_len2 = random.randint(1, 16) # give it some order of magnitudes less
rand_write_bytes1 = random_bytes(rand_write_len1)
ds_instance = DummySerial(port=self.random_serial_port, baudrate=self.random_baudrate)
ds_instance.write(rand_write_bytes1)
while 1:
ds_instance.read(rand_read_len2) # discard this data
if not ds_instance.in_waiting:
empty_data = ds_instance.read(rand_read_len2)
break
self.assertEqual(constants.NO_DATA_PRESENT, empty_data)
def test_writing_non_bytes_data_raises_type_error(self):
"""Ensures that errors are raised if attempting to write non-bytes data"""
rand_write_len = random.randint(256, 1024)
rand_write_string = random_string(rand_write_len)
ds = DummySerial(port=self.random_serial_port, baudrate=self.random_baudrate)
with self.assertRaises(TypeError):
ds.write(rand_write_string)
def test_negative_read_size(self):
"""Ensures that errors are raised if attempting to access more or less data than in the buffer"""
rand_write_len = random.randint(256, 1024)
rand_write_bytes = random_bytes(rand_write_len)
ds = DummySerial(port=self.random_serial_port, baudrate=self.random_baudrate)
ds.write(rand_write_bytes)
with self.assertRaises(exceptions.DSIOError):
ds.read(-1)
def test_timeout_with_large_read_size(self):
"""Ensures that errors are raised if attempting to access more or less data than in the buffer"""
rand_write_len = random.randint(256, 1024)
rand_write_bytes = random_bytes(rand_write_len)
ds = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_bytes: rand_write_bytes}
)
ds.write(rand_write_bytes)
result = ds.read(rand_write_len + 2)
self.assertEqual(len(result), rand_write_len)
def test_partial_read(self):
"""Ensures that errors are raised if attempting to access more or less data than in the buffer"""
rand_write_len = random.randint(256, 1024)
rand_write_bytes = random_bytes(rand_write_len)
ds = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_bytes: rand_write_bytes}
)
ds.write(rand_write_bytes)
result = ds.read(rand_write_len - 2)
self.assertEqual(len(result), rand_write_len - 2)
self.assertEqual(ds.in_waiting, 2)
def test_write_bytearray(self):
"""Ensures that errors are raised if attempting to access more or less data than in the buffer"""
rand_write_len = random.randint(256, 1024)
rand_write_bytearray = bytearray(random_bytes(rand_write_len))
ds = DummySerial(
port=self.random_serial_port,
baudrate=self.random_baudrate,
)
ds.write(rand_write_bytearray)
if __name__ == "__main__":
unittest.main()
| 41.538462
| 119
| 0.694577
| 7,292
| 0.96455
| 0
| 0
| 0
| 0
| 0
| 0
| 1,243
| 0.164418
|
67a4dc5dd5440ed57b743f18f84e2d218d7c1ec4
| 5,216
|
py
|
Python
|
site/flask/lib/python2.7/site-packages/speaklater.py
|
theholyhades1/tartanHacks2015
|
a801b473f21cfbd136e2a5a74423e8c72d14f900
|
[
"MIT"
] | 32
|
2015-01-19T12:13:26.000Z
|
2021-11-11T00:11:22.000Z
|
site/flask/lib/python2.7/site-packages/speaklater.py
|
theholyhades1/tartanHacks2015
|
a801b473f21cfbd136e2a5a74423e8c72d14f900
|
[
"MIT"
] | 10
|
2020-06-05T19:42:26.000Z
|
2022-03-11T23:38:35.000Z
|
site/flask/lib/python2.7/site-packages/speaklater.py
|
theholyhades1/tartanHacks2015
|
a801b473f21cfbd136e2a5a74423e8c72d14f900
|
[
"MIT"
] | 9
|
2015-07-18T01:03:56.000Z
|
2019-05-24T09:36:40.000Z
|
# -*- coding: utf-8 -*-
r"""
speaklater
~~~~~~~~~~
A module that provides lazy strings for translations. Basically you
get an object that appears to be a string but changes the value every
time the value is evaluated based on a callable you provide.
For example you can have a global `lazy_gettext` function that returns
a lazy string with the value of the current set language.
Example:
>>> from speaklater import make_lazy_string
>>> sval = u'Hello World'
>>> string = make_lazy_string(lambda: sval)
This lazy string will evaluate to the value of the `sval` variable.
>>> string
lu'Hello World'
>>> unicode(string)
u'Hello World'
>>> string.upper()
u'HELLO WORLD'
If you change the value, the lazy string will change as well:
>>> sval = u'Hallo Welt'
>>> string.upper()
u'HALLO WELT'
This is especially handy when combined with a thread local and gettext
translations or dicts of translatable strings:
>>> from speaklater import make_lazy_gettext
>>> from threading import local
>>> l = local()
>>> l.translations = {u'Yes': 'Ja'}
>>> lazy_gettext = make_lazy_gettext(lambda: l.translations.get)
>>> yes = lazy_gettext(u'Yes')
>>> print yes
Ja
>>> l.translations[u'Yes'] = u'Si'
>>> print yes
Si
Lazy strings are no real strings so if you pass this sort of string to
a function that performs an instance check, it will fail. In that case
you have to explicitly convert it with `unicode` and/or `string` depending
on what string type the lazy string encapsulates.
To check if a string is lazy, you can use the `is_lazy_string` function:
>>> from speaklater import is_lazy_string
>>> is_lazy_string(u'yes')
False
>>> is_lazy_string(yes)
True
New in version 1.2: It's now also possible to pass keyword arguments to
the callback used with `make_lazy_string`.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
def is_lazy_string(obj):
"""Checks if the given object is a lazy string."""
return isinstance(obj, _LazyString)
def make_lazy_string(__func, *args, **kwargs):
"""Creates a lazy string by invoking func with args."""
return _LazyString(__func, args, kwargs)
def make_lazy_gettext(lookup_func):
"""Creates a lazy gettext function dispatches to a gettext
function as returned by `lookup_func`.
Example:
>>> translations = {u'Yes': u'Ja'}
>>> lazy_gettext = make_lazy_gettext(lambda: translations.get)
>>> x = lazy_gettext(u'Yes')
>>> x
lu'Ja'
>>> translations[u'Yes'] = u'Si'
>>> x
lu'Si'
"""
def lazy_gettext(string):
if is_lazy_string(string):
return string
return make_lazy_string(lookup_func(), string)
return lazy_gettext
class _LazyString(object):
"""Class for strings created by a function call.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting.
"""
__slots__ = ('_func', '_args', '_kwargs')
def __init__(self, func, args, kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
value = property(lambda x: x._func(*x._args, **x._kwargs))
def __contains__(self, key):
return key in self.value
def __nonzero__(self):
return bool(self.value)
def __dir__(self):
return dir(unicode)
def __iter__(self):
return iter(self.value)
def __len__(self):
return len(self.value)
def __str__(self):
return str(self.value)
def __unicode__(self):
return unicode(self.value)
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
def __mod__(self, other):
return self.value % other
def __rmod__(self, other):
return other % self.value
def __mul__(self, other):
return self.value * other
def __rmul__(self, other):
return other * self.value
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def __getattr__(self, name):
if name == '__members__':
return self.__dir__()
return getattr(self.value, name)
def __getstate__(self):
return self._func, self._args, self._kwargs
def __setstate__(self, tup):
self._func, self._args, self._kwargs = tup
def __getitem__(self, key):
return self.value[key]
def __copy__(self):
return self
def __repr__(self):
try:
return 'l' + repr(self.value)
except Exception:
return '<%s broken>' % self.__class__.__name__
if __name__ == '__main__':
import doctest
doctest.testmod()
| 25.950249
| 78
| 0.637078
| 2,253
| 0.43194
| 0
| 0
| 0
| 0
| 0
| 0
| 2,778
| 0.532592
|
67a75973cb787f7c7e91d28c32afde2e4db5408b
| 848
|
py
|
Python
|
test/test_graph.py
|
mits58/Python-Graph-Library
|
aa85788ad63e356944d77a4c251ad707562dd9c0
|
[
"MIT"
] | null | null | null |
test/test_graph.py
|
mits58/Python-Graph-Library
|
aa85788ad63e356944d77a4c251ad707562dd9c0
|
[
"MIT"
] | null | null | null |
test/test_graph.py
|
mits58/Python-Graph-Library
|
aa85788ad63e356944d77a4c251ad707562dd9c0
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
from graph import Graph
class TestGraphClass(unittest.TestCase):
"""
Test Class for Graph Class
"""
def test_generating_graph_object(self):
"""
Testing Generation of Graph Object from Adjacent Matrix
"""
# setup
A = np.array([[0, 1, 1, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 1],
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 0]])
# run
G = Graph(A, is_directed=False)
# validation
self.assertEqual(G.number_of_vertex(), 5) # |V|
self.assertEqual(G.number_of_edge(), 5) # |E|
self.assertTrue(np.array_equal(G.adjacency_matrix(), A)) # Adj Mat.
if __name__ == '__main__':
unittest.main()
| 24.941176
| 78
| 0.483491
| 735
| 0.866745
| 0
| 0
| 0
| 0
| 0
| 0
| 175
| 0.206368
|
67a783ee0f0ec9ab1fa4d600a15705146b7bc899
| 260
|
py
|
Python
|
09_cumledeki_kelime_sayisi.py
|
kabatasmirac/We_WantEd_OrnekCozumler
|
0f022361659fb78cd3f644910f3611d45df64317
|
[
"MIT"
] | 1
|
2020-06-09T13:09:23.000Z
|
2020-06-09T13:09:23.000Z
|
09_cumledeki_kelime_sayisi.py
|
kabatasmirac/We_WantEd_OrnekCozumler
|
0f022361659fb78cd3f644910f3611d45df64317
|
[
"MIT"
] | null | null | null |
09_cumledeki_kelime_sayisi.py
|
kabatasmirac/We_WantEd_OrnekCozumler
|
0f022361659fb78cd3f644910f3611d45df64317
|
[
"MIT"
] | null | null | null |
def kelime_sayisi(string):
counter = 1
for i in range(0,len(string)):
if string[i] == ' ':
counter += 1
return counter
cumle = input("Cumlenizi giriniz : ")
print("Cumlenizdeki kelime sayisi = {}".format(kelime_sayisi(cumle)))
| 26
| 69
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.223077
|
67a9204ea3bc6abf715d94ea6ccb879d61991881
| 909
|
py
|
Python
|
pdns-mysql-domain-exp/lib/db.py
|
kilgoretrout1985/pdns-mysql-domain-exp
|
9692971da82d625b242c740d9be8e2130a483249
|
[
"MIT"
] | null | null | null |
pdns-mysql-domain-exp/lib/db.py
|
kilgoretrout1985/pdns-mysql-domain-exp
|
9692971da82d625b242c740d9be8e2130a483249
|
[
"MIT"
] | null | null | null |
pdns-mysql-domain-exp/lib/db.py
|
kilgoretrout1985/pdns-mysql-domain-exp
|
9692971da82d625b242c740d9be8e2130a483249
|
[
"MIT"
] | null | null | null |
import MySQLdb
def domains_from_db(connection_data: dict, at_a_time: int = 1000) -> list:
domains = []
for connect_params in connection_data:
if 'charset' not in connect_params:
connect_params['charset'] = 'utf8'
db = MySQLdb.connect(**connect_params)
cursor = db.cursor()
cursor.execute("SELECT count(id) FROM domains")
data = cursor.fetchall()
max_i = int(data[0][0])
if max_i > 0:
for i in range(0, max_i, at_a_time):
cursor.execute("SELECT name FROM domains ORDER BY id ASC LIMIT %d, %d" % (i, at_a_time))
data = cursor.fetchall()
for rec in data:
domain = rec[0]
if domain.count('.') == 1: # no subdomains
domains.append(domain.lower().strip())
db.close()
return domains
| 34.961538
| 104
| 0.537954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 128
| 0.140814
|
67a972ea6a872e759ef7065f8c8e54aa921e3f54
| 3,370
|
py
|
Python
|
barni/_result.py
|
Thrameos/barni
|
e5ba76f9bb04a15a272b5159b25e6425733102c4
|
[
"MIT"
] | 8
|
2020-03-16T23:21:59.000Z
|
2021-08-12T12:26:44.000Z
|
barni/_result.py
|
johnromo04/barni
|
3d758f21a9317b8826019261548339c047923b96
|
[
"MIT"
] | 6
|
2020-03-17T16:57:14.000Z
|
2020-08-04T17:51:45.000Z
|
barni/_result.py
|
johnromo04/barni
|
3d758f21a9317b8826019261548339c047923b96
|
[
"MIT"
] | 3
|
2020-03-17T00:47:28.000Z
|
2020-07-29T18:19:10.000Z
|
###############################################################################
# Copyright (c) 2019 Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
#
# Written by M. Monterial, K. Nelson
# monterial1@llnl.gov
#
# LLNL-CODE-805904
#
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED,INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
###############################################################################
'''
Module for handling the results of identification.
@author monterial1
'''
from typing import List
from collections import UserList
import textwrap
from ._architecture import NuclideResult, Serializable
from ._reader import registerReader
__all__ = ["NuclideResultList"]
class NuclideResultList(Serializable, UserList):
""" List of nuclide results.
"""
def addNuclideResult(self, input: NuclideResult):
self.data.append(input)
def toXml(self):
xml = "<NuclideResultList>\n"
for tmp in self.data:
xml += textwrap.indent(tmp.toXml(), " ")
xml += "</NuclideResultList>\n"
return xml
def loadNuclideResult(context, element):
""" Loads in a nuclide result
"""
out = NuclideResult(nuclide=None, score=None, prediction=None)
for node in element.childNodes:
# skip all but elements
if node.nodeType != node.ELEMENT_NODE:
continue
if node.tagName == "nuclide":
out.nuclide = str(node.firstChild.nodeValue)
continue
if node.tagName == "score":
out.score = float(node.firstChild.nodeValue)
continue
if node.tagName == "prediction":
out.prediction = int(node.firstChild.nodeValue)
continue
context.raiseElementError(element, node)
return out
def loadNuclideResultList(context, element):
""" Loads a list of nuclide results
"""
out = NuclideResultList()
for node in element.childNodes:
# skip all but elements
if node.nodeType != node.ELEMENT_NODE:
continue
if node.tagName == "NuclideResult":
out.addTemplate(loadNuclideResult(context, node))
continue
context.raiseElementError(element, node)
return out
registerReader("NuclideResult", loadNuclideResult)
registerReader("NuclideResultList", loadNuclideResultList)
| 34.387755
| 81
| 0.67003
| 379
| 0.112463
| 0
| 0
| 0
| 0
| 0
| 0
| 1,801
| 0.534421
|
67a9af0c056744f8b59776cc12a80777352c44e7
| 2,976
|
py
|
Python
|
work/code/5fold/paddle_model.py
|
kkoren/2021CCFBDCI-QAmatch-rank5
|
379f89ad43ffcfbd2c15ad6ac4f93e8fa5b27dc3
|
[
"Apache-2.0"
] | null | null | null |
work/code/5fold/paddle_model.py
|
kkoren/2021CCFBDCI-QAmatch-rank5
|
379f89ad43ffcfbd2c15ad6ac4f93e8fa5b27dc3
|
[
"Apache-2.0"
] | null | null | null |
work/code/5fold/paddle_model.py
|
kkoren/2021CCFBDCI-QAmatch-rank5
|
379f89ad43ffcfbd2c15ad6ac4f93e8fa5b27dc3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %Christian
"""
"""
#BASE +BN层
#dropout改为0.15
"""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddlenlp as ppnlp
class QuestionMatching_base(nn.Layer):
'''
base模型
dropout改为0.15
'''
def __init__(self, pretrained_model, dropout=None, rdrop_coef=0.0):
super().__init__()
self.ptm = pretrained_model
self.dropout = nn.Dropout(dropout if dropout is not None else 0.15)
#线性变换层,Out=XW+b
self.classifier = nn.Linear(self.ptm.config["hidden_size"], 2)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None,
do_evaluate=False):
_, cls_embedding1 = self.ptm(input_ids, token_type_ids, position_ids,
attention_mask)
cls_embedding1 = self.dropout(cls_embedding1)
logits1 = self.classifier(cls_embedding1)
kl_loss = 0.0
return logits1, kl_loss
class QuestionMatching_BN(nn.Layer):
'''
base模型+BN
dropout改为0.15
'''
def __init__(self, pretrained_model, dropout=None, rdrop_coef=0.0):
super().__init__()
self.ptm = pretrained_model
self.dropout = nn.Dropout(dropout if dropout is not None else 0.15)
self.linear=nn.Linear(self.ptm.config["hidden_size"], self.ptm.config["max_position_embeddings"])
self.batchnorm1d=nn.BatchNorm1D(self.ptm.config["max_position_embeddings"])
self.relu=nn.ReLU()
# self.relu=nn.GELU()
#线性变换层,Out=XW+b
self.classifier = nn.Linear(self.ptm.config["max_position_embeddings"], 2)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None,
do_evaluate=False):
#pretrained_model返回
#sequence_output[batch_size, sequence_length, hidden_size],
#pooled_output [batch_size, hidden_size] The output of first token ([CLS]) in sequence
_, cls_embedding1 = self.ptm(input_ids,
token_type_ids, #用于区分当前token是属于哪个句子的
position_ids,#明确每个token是在什么位置上,从0到最后依次编号。
attention_mask,#指定对哪些词进行self-Attention操作(padding的位置是不需要参与attention计算的)
)
cls_embedding1 = self.dropout(cls_embedding1)
cls_embedding1=self.linear(cls_embedding1)
cls_embedding1=self.batchnorm1d(cls_embedding1)
cls_embedding1=self.relu(cls_embedding1)
cls_embedding1=self.dropout(cls_embedding1)
logits1 = self.classifier(cls_embedding1)
kl_loss = 0.0
return logits1, kl_loss
| 30.367347
| 108
| 0.584341
| 2,897
| 0.923788
| 0
| 0
| 0
| 0
| 0
| 0
| 791
| 0.252232
|
67aa649ee72d5a267bbc9cdfc568c9bcaf20b9fc
| 21,917
|
py
|
Python
|
q2_mlab/plotting/app.py
|
patrickimran/regression-benchmarking
|
90a9dd1f4196d76145d17d733dffc13830fd95fa
|
[
"BSD-3-Clause"
] | null | null | null |
q2_mlab/plotting/app.py
|
patrickimran/regression-benchmarking
|
90a9dd1f4196d76145d17d733dffc13830fd95fa
|
[
"BSD-3-Clause"
] | 29
|
2020-04-22T16:39:02.000Z
|
2021-08-02T15:43:11.000Z
|
q2_mlab/plotting/app.py
|
patrickimran/regression-benchmarking
|
90a9dd1f4196d76145d17d733dffc13830fd95fa
|
[
"BSD-3-Clause"
] | 4
|
2019-12-30T17:06:04.000Z
|
2020-08-14T17:55:31.000Z
|
from functools import partialmethod
import pandas as pd
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import sqlite3
import click
import json
import pkg_resources
from itertools import combinations
from q2_mlab.db.schema import RegressionScore
from q2_mlab.plotting.components import (
Mediator,
ComponentMixin,
Plottable,
ButtonComponent,
ScatterComponent,
SegmentComponent,
DataSourceComponent,
SelectComponent,
)
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
from bokeh.models import (
ColumnDataSource,
CheckboxButtonGroup,
TextInput,
Legend,
LegendItem,
)
from bokeh.models.widgets import (
Div,
)
from bokeh.palettes import (
Category20,
Set3,
)
from bokeh.layouts import column, row
from bokeh.server.server import Server
groups = ['parameters_id', 'dataset', 'target', 'level', 'algorithm']
drop_cols = ['artifact_uuid', 'datetime', 'CV_IDX', 'id']
target_map = {
'age_v2': 'age',
'BL_AGE': 'age',
'age': 'age',
'bmi_v2': 'bmi',
'BMI': 'bmi',
'bmi': 'bmi'
}
with pkg_resources.resource_stream(
__name__, "standard_deviations.json"
) as f:
TARGET_SD = json.load(f)
def _get_standardized_mae(df_row, norm_dict):
"""
"""
mae = df_row['MAE']
target = df_row['target']
dataset = df_row['dataset']
cv_fold = df_row['CV_IDX']
level = df_row['level']
key = f"({dataset}, {target}, {level}, {cv_fold})"
sd = norm_dict.get(key, 1)
standardized_mae = mae / sd
return standardized_mae
def process_db_df(df):
# remap values for consistency
df['level'] = df['level'].replace('none', 'MG')
df['target'] = df['target'].map(target_map)
df['standardized_MAE'] = df.apply(_get_standardized_mae, axis=1,
args=(TARGET_SD,))
group_stats = df.drop(
drop_cols, axis=1
).groupby(
groups
).agg(
['var', 'mean']
)
group_stats.columns = agg_columns = ['_'.join(col).strip() for
col in group_stats.columns.values]
group_stats.reset_index(inplace=True)
min_by = ['dataset', 'target']
group_mins = group_stats[agg_columns + min_by].groupby(min_by).min()
indices = group_stats[['dataset', 'target']].to_records(
index=False).tolist()
expanded_group_mins = group_mins.loc[indices]
expanded_group_mins.index = group_stats.index
relative_group_stats = (group_stats / expanded_group_mins)[agg_columns]
relative_group_stats.columns = ['relative_' + col for
col in relative_group_stats]
group_stats = group_stats.join(relative_group_stats)
return group_stats
def find_segments(group_stats, across, groupby):
"""
TODO makes some assumptions about the guarantees on pairs when there are
more than 2 categories
"""
seg_cols = groupby.copy()
seg_cols.remove(across)
group_counts = group_stats[seg_cols + [across]].groupby(seg_cols).count()
max_n_pairs = group_counts[across].max()
category_values = group_stats[across].unique()
where = (group_counts[across] == max_n_pairs)
keep_repeats = group_stats.set_index(seg_cols).loc[where]
keep_repeats_parts = []
for i, sub_group in enumerate(category_values):
where = keep_repeats[across] == sub_group
keep_repeats_parts.append(keep_repeats.loc[where])
keep_repeats_parts[i].columns = [col + '_' + sub_group for
col in keep_repeats_parts[i].columns]
segment_df = pd.concat(keep_repeats_parts,
axis=1
)
return segment_df
class TextInputComponent(ComponentMixin):
def __init__(self, text_input_kwargs):
super().__init__()
self.text_input = TextInput(**text_input_kwargs)
self.layout = self.text_input
self.input_text_callback = None
def set_mediator(self, mediator):
super().set_mediator(mediator)
event_name = 'text-change'
text_change = self.make_attr_old_new_callback(event_name)
self.input_text_callback = text_change
self.text_input.on_change('value', self.input_text_callback)
class CheckboxButtonGroupComponent(ComponentMixin):
def __init__(self, checkbox_kwargs):
super().__init__()
self.checkbox = CheckboxButtonGroup(**checkbox_kwargs)
self.checkbox_change = None
self.layout = self.checkbox
def set_mediator(self, mediator):
super().set_mediator(mediator)
event_name = 'checkbox-change'
self.checkbox_change = self.make_attr_old_new_callback(event_name)
self.checkbox.on_change('active', self.checkbox_change)
class SegmentComponentExt(SegmentComponent):
def redraw(self, x, y, seg_0, seg_1, data):
self.data_source.data = data
self.segment.glyph.x0 = '_'.join([x, seg_0])
self.segment.glyph.x1 = '_'.join([x, seg_1])
self.segment.glyph.y0 = '_'.join([y, seg_0])
self.segment.glyph.y1 = '_'.join([y, seg_1])
palettes = {
'Category20': Category20,
'Set3': Set3,
}
DEFAULTS = {
'segment_variable': 'dataset',
'x': 'MAE_mean',
'y': 'MAE_var',
'x_axis_type': 'log',
'y_axis_type': 'log',
'cmap': 'Category20'
}
class AlgorithmScatter(Mediator, Plottable):
def __init__(self, x, y, engine, cmap=None):
super().__init__()
self.x = x
self.y = y
self.engine = engine
self.data = None
self.scatter = None
if cmap is None:
self.cmap = Category20
else:
self.cmap = cmap
self.line_segment_variable = DEFAULTS['segment_variable']
self.data_raw = None
self.data_static = None
self.data = None
self.seg_0, self.seg_1 = None, None
self.scatter_source = None
self.x_axis_type = DEFAULTS['x_axis_type']
self.y_axis_type = DEFAULTS['y_axis_type']
self.axis_types = ['linear', 'log']
self.line_segment_pairs = {
'dataset': ['finrisk', 'sol'],
'level': ['16S', 'MG'],
'target': ['age', 'bmi'],
}
self.scatter_tools = 'pan,wheel_zoom,box_select,lasso_select,'\
'reset,box_zoom,save'
self.segment = None
self.segment_source = None
self.segment_button = None
self.segment_variable_select = None
self.x_var_select = None
self.y_var_select = None
self.dataset_bars = None
self.dataset_bars_source = None
self.dataset_bars_figure = None
self.level_bars = None
self.level_bars_source = None
self.level_bars_figure = None
self.target_bars = None
self.target_bars_source = None
self.target_bars_figure = None
self.query_button = None
self.query_input = None
self.query_row = None
self.layout = None
def notify(self,
component,
event_name,
*args, **kwargs,
):
if (event_name == 'dropdown-select') and \
(component is self.x_var_select):
self.x = component.select.value
self.scatter.scatter.glyph.x = self.x
self.scatter.layout.xaxis.axis_label = self.x
self.segment.segment.glyph.x0 = '_'.join([self.x, self.seg_0])
self.segment.segment.glyph.x1 = '_'.join([self.x, self.seg_1])
if (event_name == 'dropdown-select') and \
(component is self.y_var_select):
self.y = component.select.value
self.scatter.scatter.glyph.y = self.y
self.scatter.layout.yaxis.axis_label = self.y
self.segment.segment.glyph.y0 = '_'.join([self.y, self.seg_0])
self.segment.segment.glyph.y1 = '_'.join([self.y, self.seg_1])
if (event_name == 'selected-indices') and \
(component is self.scatter_source):
selected_indices = self.scatter_source.data_source.selected.indices
self.dataset_bars_source.data = self.get_dataset_counts(
indices=selected_indices,
)
self.level_bars_source.data = self.get_level_counts(
indices=selected_indices,
)
self.target_bars_source.data = self.get_target_counts(
indices=selected_indices,
)
if (event_name == 'button-click') and \
(component is self.query_button):
df = self.handle_query(self.query_input.text_input.value)
# need to update self.data due to how the hbars are currently
# written
self.data = df
self.scatter_source.data_source.data = df.to_dict(orient='list')
segment_source = find_segments(
df,
across=self.line_segment_variable,
groupby=['parameters_id', 'algorithm', 'level', 'dataset',
'target'],
)
self.segment.segment.data_source.data = segment_source.to_dict(
orient='list',
)
selected_indices = self.scatter_source.data_source.selected.indices
self.dataset_bars_source.data = self.get_dataset_counts(
indices=selected_indices,
)
self.level_bars_source.data = self.get_level_counts(
indices=selected_indices,
)
self.target_bars_source.data = self.get_target_counts(
indices=selected_indices,
)
if (event_name == 'checkbox-change') and \
(component is self.segment_button):
active = self.segment_button.checkbox.active
if 0 in active:
self.segment.segment.visible = True
else:
self.segment.segment.visible = False
if (event_name == 'dropdown-select') and \
(component is self.segment_variable_select):
new_segment_variable = self.segment_variable_select.select.value
self.line_segment_variable = new_segment_variable
new_segment_data = find_segments(
self.data,
across=self.line_segment_variable,
groupby=['parameters_id', 'algorithm', 'level', 'dataset',
'target']
)
line_segment_ends = self.line_segment_pairs[new_segment_variable]
self.segment.redraw(
self.x,
self.y,
*line_segment_ends,
new_segment_data
)
def plot(self):
self.data_raw = pd.read_sql_table(RegressionScore.__tablename__,
con=self.engine,
)
# TODO this is temporary
self.data_raw = self.data_raw.loc[
self.data_raw['algorithm'] != 'MLPRegressor'
]
self.data = df = process_db_df(self.data_raw)
self.data_static = df
self.seg_0, self.seg_1 = self.line_segment_pairs[
self.line_segment_variable
]
# ## Data Setup
scatter_source = ColumnDataSource(df)
self.scatter_source = DataSourceComponent(scatter_source)
self.scatter_source.set_mediator(self)
# ## General Setup
algorithms = sorted(df['algorithm'].unique())
levels = sorted(df['level'].unique())
datasets = sorted(df['dataset'].unique())
targets = sorted(df['target'].unique())
plot_width = 600
self.line_segment_pairs = {
'level': ['16S', 'MG'],
'target': ['age', 'bmi'],
}
dataset_combinations = combinations(["finrisk", "imsms", "sol"], r=2)
for dataset_pair in dataset_combinations:
d1, d2 = dataset_pair
self.line_segment_pairs[f"{d1}-to-{d2}"] = [d1, d2]
categorical_variables = ['parameters_id', 'target', 'algorithm',
'level', 'dataset']
plottable_variables = list(sorted(
df.columns.drop(categorical_variables)
))
color_scheme = self.cmap[len(algorithms)]
algorithm_cmap = factor_cmap('algorithm', palette=color_scheme,
factors=algorithms,
)
figure_kwargs = dict(x_axis_type=self.x_axis_type,
y_axis_type=self.y_axis_type,
plot_height=400,
tools=self.scatter_tools,
output_backend='webgl',
)
# ## Segment Plot
segment_source = ColumnDataSource(
find_segments(self.data, across=self.line_segment_variable,
groupby=['parameters_id', 'algorithm', 'level',
'dataset']
)
)
self.segment_source = DataSourceComponent(scatter_source)
self.segment = SegmentComponentExt(data_source=segment_source)
segment_kwargs = {
'x0': self.x + '_' + self.seg_0,
'x1': self.x + '_' + self.seg_1,
'y0': self.y + '_' + self.seg_0,
'y1': self.y + '_' + self.seg_1,
'line_width': 0.1,
'line_color': '#A9A9A9',
}
self.segment.plot(
figure_kwargs=figure_kwargs,
segment_kwargs=segment_kwargs,
)
# ## Segment Visible button
self.segment_button = CheckboxButtonGroupComponent(
checkbox_kwargs=dict(
labels=['Segments'],
active=[0],
)
)
self.segment_button.set_mediator(self)
self.segment_variable_select = SelectComponent(
select_kwargs=dict(
value=self.line_segment_variable,
title='Segment Variable',
options=list(self.line_segment_pairs.keys()),
)
)
self.segment_variable_select.set_mediator(self)
# ## Scatter plot
self.scatter = ScatterComponent()
scatter_kwargs = dict(x=self.x, y=self.y, source=scatter_source,
# legend_field='algorithm',
fill_color=algorithm_cmap,
name='scatter',
)
self.scatter.plot(
figure=self.segment.layout,
scatter_kwargs=scatter_kwargs,
)
scatter = self.scatter.layout
scatter.toolbar.logo = None
scatter.xaxis.axis_label = self.x
scatter.yaxis.axis_label = self.y
self.scatter.scatter.glyph.line_color = 'white'
self.scatter.scatter.glyph.line_width = 0.1
self.scatter.scatter.nonselection_glyph.line_color = 'white'
transform = algorithm_cmap['transform']
legend_fig = figure(outline_line_alpha=0, toolbar_location=None)
legend_items = []
for i, (alg, color) in enumerate(zip(transform.factors,
transform.palette)):
legend_fig.circle(fill_color=color, name=f'circ{i}',
line_color='white',
)
renderers = legend_fig.select(name=f'circ{i}')
legend_item = LegendItem(
label=alg,
renderers=renderers,
)
legend_items.append(legend_item)
legend = Legend(
items=legend_items,
location='top_left',
)
legend_fig.add_layout(legend)
scatter.plot_width = plot_width
scatter.plot_height = 500
# ## Variable Selection
self.x_var_select = SelectComponent(
select_kwargs=dict(
value=self.x,
title='X variable',
options=plottable_variables
)
)
self.x_var_select.set_mediator(self)
x_select = self.x_var_select.select
self.y_var_select = SelectComponent(
select_kwargs=dict(
value=self.y,
title='Y variable',
options=plottable_variables
)
)
self.y_var_select.set_mediator(self)
y_select = self.y_var_select.select
# ## Dataset Stacked Hbars
data_getter = self.get_dataset_counts
self.dataset_bars_source = ColumnDataSource(data_getter())
self.dataset_bars_figure = figure(y_range=datasets, plot_height=100)
self.dataset_bars = self.dataset_bars_figure.hbar_stack(
algorithms, y='dataset',
height=0.9,
color=color_scheme,
source=self.dataset_bars_source,
)
self.dataset_bars_figure.toolbar_location = None
self.dataset_bars_figure.plot_width = plot_width
# ## Level Stacked Hbars
data_getter = self.get_level_counts
self.level_bars_source = ColumnDataSource(data_getter())
self.level_bars_figure = figure(y_range=levels, plot_height=100)
self.level_bars = self.level_bars_figure.hbar_stack(
algorithms, y='level',
height=0.9,
color=color_scheme,
source=self.level_bars_source,
)
self.level_bars_figure.toolbar_location = None
self.level_bars_figure.plot_width = plot_width
# ## Target Stacked Hbars
data_getter = self.get_target_counts
self.target_bars_source = ColumnDataSource(data_getter())
self.target_bars_figure = figure(y_range=targets, plot_height=100)
self.target_bars = self.target_bars_figure.hbar_stack(
algorithms, y='target',
height=0.9,
color=color_scheme,
source=self.target_bars_source,
)
self.target_bars_figure.toolbar_location = None
self.target_bars_figure.plot_width = plot_width
# ## Text input
button_width = 100
self.query_input = TextInputComponent(
text_input_kwargs=dict(
title='Enter query',
width=plot_width - button_width
)
)
self.query_button = ButtonComponent(
button_kwargs=dict(
label='Execute',
width=button_width,
)
)
self.query_button.set_mediator(self)
self.query_row = row(self.query_input.layout,
column(
Div(text="", height=8),
self.query_button.layout,
))
# ## Layout
variable_selection = row(x_select, y_select,
)
segment_selection = row(
self.segment_variable_select.layout,
column(
Div(text="", height=8),
self.segment_button.layout,
)
)
self.layout = row(
column(
self.query_row,
variable_selection,
segment_selection,
row(
scatter,
column(
self.dataset_bars_figure,
self.level_bars_figure,
self.target_bars_figure,
legend_fig,
),
),
),
)
return self
def handle_query(self, text):
if text != '':
df = self.data_static.query(text).reset_index(drop=True)
else:
df = self.data_static
return df
def get_counts_by(self, category, by, indices=None):
# TODO consider switching orientation of counts and by
data = self.subset_selected(indices)
counts = pd.crosstab(data[by], data[category])
# algorithms = list(counts.index.values)
counts_dict = counts.to_dict(orient='list')
levels = sorted(self.data[by].unique())
counts_dict[by] = list(filter(lambda x: x in counts.index, levels))
return counts_dict
def subset_selected(self, indices):
# should handle None and empty list
if not indices:
# might want to grab data from the scatter plot instead
data = self.data
else:
data = self.data.reindex(indices)
return data
get_level_counts = partialmethod(get_counts_by, 'algorithm', 'level')
get_dataset_counts = partialmethod(get_counts_by, 'algorithm', 'dataset')
get_target_counts = partialmethod(get_counts_by, 'algorithm', 'target')
def app(self, doc):
doc.add_root(self.layout)
@click.group('mlab-plotting')
def mlab_plotting():
pass
@mlab_plotting.command()
@click.option(
'--db',
help='Path to SQLite database file.',
type=click.Path(exists=False),
)
@click.option(
'--color-scheme',
help='Color scheme to plot with',
type=click.Choice(
list(palettes.keys()),
),
default=DEFAULTS['cmap'],
)
def metric_scatter(db, color_scheme):
run_app(db, color_scheme)
def run_app(db, color_scheme):
# thanks https://github.com/sqlalchemy/sqlalchemy/issues/4863
def connect():
return sqlite3.connect(f"file:{db}?mode=ro", uri=True)
engine = create_engine("sqlite://", creator=connect)
bkapp = AlgorithmScatter(
DEFAULTS['x'], DEFAULTS['y'],
engine=engine,
cmap=palettes.get(color_scheme),
).plot().app
server = Server({'/': bkapp})
server.start()
server.io_loop.add_callback(server.show, "/")
server.io_loop.start()
| 33.927245
| 79
| 0.578409
| 16,969
| 0.774239
| 0
| 0
| 425
| 0.019391
| 0
| 0
| 2,387
| 0.108911
|
67add2205d4190930f5b032323a1238d7a058e8c
| 6,378
|
py
|
Python
|
gpn/distributions/base.py
|
WodkaRHR/Graph-Posterior-Network
|
139e7c45c37324c9286e0cca60360a4978b3f411
|
[
"MIT"
] | 23
|
2021-11-16T01:31:55.000Z
|
2022-03-04T05:49:03.000Z
|
gpn/distributions/base.py
|
WodkaRHR/Graph-Posterior-Network
|
139e7c45c37324c9286e0cca60360a4978b3f411
|
[
"MIT"
] | 1
|
2021-12-17T01:25:16.000Z
|
2021-12-20T10:38:30.000Z
|
gpn/distributions/base.py
|
WodkaRHR/Graph-Posterior-Network
|
139e7c45c37324c9286e0cca60360a4978b3f411
|
[
"MIT"
] | 7
|
2021-12-03T11:13:44.000Z
|
2022-02-06T03:12:10.000Z
|
import torch
import torch.distributions as D
class ExponentialFamily(D.ExponentialFamily):
"""
Shared base distribution for exponential family distributions.
"""
@property
def is_sparse(self):
"""
Whether the distribution's parameters are sparse. Just returns `False`.
"""
return False
def is_contiguous(self):
"""
Whether this distribution's parameters are contiguous. Just returns `True`.
"""
return True
def to(self, *args, **kwargs):
"""
Moves the probability distribution to the specified device.
"""
raise NotImplementedError
#--------------------------------------------------------------------------------------------------
class Likelihood(ExponentialFamily):
"""
A likelihood represents a target distribution which has a conjugate prior. Examples are the
Normal distribution for regression and the Categorical distribution for classification.
Besides this class's abstract methods, a likelihood distribution must (at least) implement the
methods/properties :code:`mean`, :code:`entropy` and :code:`log_prob`.
"""
@classmethod
def __prior__(cls):
"""
The distribution class that the prior is based on.
"""
raise NotImplementedError
@classmethod
def from_model_params(cls, x):
"""
Returns the distribution as parametrized by some model. Although this is model-dependent,
the model typically returns outputs on the real line and this method ensures that the
parameters are valid (e.g. Softmax function over logits).
Parameters
----------
x: torch.Tensor [N, ...]
The parameters of the distribution.
Returns
-------
evidence.distributions.Likelihood
The likelihood.
"""
raise NotImplementedError
@property
def sufficient_statistic_mean(self):
"""
Returns the mean (expectation) of the sufficient statistic of this distribution. That is,
it returns the average of the sufficient statistic if infinitely many samples were drawn
from this distribution.
"""
raise NotImplementedError
def uncertainty(self):
"""
Returns some measure of uncertainty of the distribution. Usually, this is the entropy but
distributions may choose to implement it differently if the entropy is intractable.
"""
return self.entropy()
#--------------------------------------------------------------------------------------------------
class ConjugatePrior(ExponentialFamily):
"""
A conjugate prior is an exponential family distribution which is conjugate for another
(exponential family) distribution that is the underlying distribution for some likelihood
function. The class of this underlying distribution must be available via the
:code:`__likelihood__` property.
Besides this class's abstract methods, a conjugate prior must (at least) implement the methods/
properties :code:`mean` and :code:`entropy`.
"""
@classmethod
def __likelihood__(cls):
"""
The distribution class that the likelihood function is based on.
"""
raise NotImplementedError
@classmethod
def from_sufficient_statistic(cls, sufficient_statistic, evidence, prior=None):
"""
Initializes this conjugate prior where parameters are computed from the given sufficient
statistic and the evidence.
Parameters
----------
sufficient_statistic: torch.Tensor [N, ...]
The sufficient statistic for arbitrarily many likelihood distributions (number of
distributions N).
evidence: torch.Tensor [N]
The evidence for all likelihood distributions (i.e. the "degree of confidence").
prior: tuple of (torch.Tensor[...], torch.Tensor [1]), default: None
Optional prior to set on the sufficient statistic and the evidence. There always exists
a bijective mapping between these priors and priors on the distribution's parameters.
Returns
-------
Self
An instance of this class.
"""
raise NotImplementedError
def log_likeli_mean(self, data):
"""
Computes the mean (expectation) of the log-probability of observing the given data. The data
is assumed to be distributed according to this prior's likelihood distribution.
Parameters
----------
data: torch.Tensor [N, ...]
The observed values in the support of the likelihood distribution. The number of
observations must be equal to the batch shape of this distribution (number of
observations N).
Returns
-------
torch.Tensor [N]
The expectation of the log-probability for all observed values.
"""
raise NotImplementedError
@property
def predictive_distribution(self):
"""
Returns the posterior predictive distribution.
Returns
-------
evidence.distributions.PosteriorPredictive
The predictive distribution.
"""
raise NotImplementedError
@property
def mean_distribution(self):
"""
Computes the mean of this distribution and returns the likelihood distribution parametrized
with this mean.
Returns
-------
torch.distributions.ExponentialFamily
The distribution that is defined by :meth:`__likelihood__`.
"""
raise NotImplementedError
#--------------------------------------------------------------------------------------------------
class PosteriorPredictive(D.Distribution):
"""
A posterior predictive distribution, typically obtained from a :class:`ConjugatePrior`.
"""
def pvalue(self, x):
"""
Computes the p-value of the given data for use in a two-sided statistical test.
Parameters
----------
x: torch.Tensor [N]
The targets for which to compute the p-values.
Returns
-------
torch.Tensor [N]
The p-values.
"""
cdf = self.cdf(x)
return 2 * torch.min(cdf, 1 - cdf)
| 33.21875
| 100
| 0.607087
| 6,022
| 0.944183
| 0
| 0
| 3,056
| 0.479147
| 0
| 0
| 4,973
| 0.779712
|
67addac624c1ac8a0bc388113f31ef1180a2d2c5
| 557
|
py
|
Python
|
demos/python/3_statements.py
|
denfromufa/mipt-course
|
ad828f9f3777b68727090bcd69feb0dd91f17465
|
[
"BSD-3-Clause"
] | null | null | null |
demos/python/3_statements.py
|
denfromufa/mipt-course
|
ad828f9f3777b68727090bcd69feb0dd91f17465
|
[
"BSD-3-Clause"
] | null | null | null |
demos/python/3_statements.py
|
denfromufa/mipt-course
|
ad828f9f3777b68727090bcd69feb0dd91f17465
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
condition = 42
# IMPORTANT: colons, _indentation_ are significant!
if condition:
print "Condition is true!"
elif True: # not 'true'!
print "I said it's true! :)"
else:
print "Condition is false :("
# of course, elif/else are optional
assert True == (not False)
# Equivalent of `for (int i = 0; i < 13; i++) {`
for i in range(0, 13):
print i, # "," at the end means "no newline"
print # newline
while True:
if condition == 42:
break
elif condition == 17:
continue
else:
print "?"
| 19.892857
| 51
| 0.601436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 276
| 0.495512
|
67ae4667834ab686277782bd3ef57e5f23b602fc
| 6,492
|
py
|
Python
|
dedupe/training.py
|
BrianSipple/dedupe
|
d276da675e319d5cc6e7cafd4963deebde0d485d
|
[
"MIT"
] | 1
|
2015-11-06T01:33:04.000Z
|
2015-11-06T01:33:04.000Z
|
dedupe/training.py
|
BrianSipple/dedupe
|
d276da675e319d5cc6e7cafd4963deebde0d485d
|
[
"MIT"
] | null | null | null |
dedupe/training.py
|
BrianSipple/dedupe
|
d276da675e319d5cc6e7cafd4963deebde0d485d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# provides functions for selecting a sample of training data
from itertools import combinations, islice
import blocking
import core
import numpy
import logging
import random
import sys
def findUncertainPairs(field_distances, data_model, bias=0.5):
"""
Given a set of field distances and a data model return the
indices of the record pairs in order of uncertainty. For example,
the first indices corresponds to the record pair where we have the
least certainty whether the pair are duplicates or distinct.
"""
probability = core.scorePairs(field_distances, data_model)
p_max = (1.0 - bias)
logging.info(p_max)
informativity = numpy.copy(probability)
informativity[probability < p_max] /= p_max
informativity[probability >= p_max] = (1 - probability[probability >= p_max])/(1-p_max)
return numpy.argsort(-informativity)
def activeLearning(candidates,
data_model,
labelPairFunction,
training_data,
training_pairs=None):
"""
Ask the user to label the record pair we are most uncertain of. Train the
data model, and update our uncertainty. Repeat until user tells us she is
finished.
"""
fields = [field for field in data_model['fields']
if data_model['fields'][field]['type'] not in ('Missing Data',
'Interaction',
'Higher Categories')]
duplicates = []
nonduplicates = []
if training_pairs:
nonduplicates.extend(training_pairs[0])
duplicates.extend(training_pairs[1])
if training_data.shape[0] == 0 :
rand_int = random.randint(0, len(candidates))
exact_match = candidates[rand_int]
training_data = addTrainingData({1:[exact_match]*2,
0:[]},
data_model,
training_data)
data_model = core.trainModel(training_data, data_model, .1)
finished = False
import time
t_train = time.time()
field_distances = core.fieldDistances(candidates, data_model)
logging.info('calculated fieldDistances in %s seconds',
str(time.time() - t_train))
seen_indices = set()
while finished == False:
logging.info('finding the next uncertain pair ...')
uncertain_indices = findUncertainPairs(field_distances,
data_model,
(len(duplicates)/
(len(nonduplicates)+1.0)))
for uncertain_index in uncertain_indices:
if uncertain_index not in seen_indices:
seen_indices.add(uncertain_index)
break
uncertain_pairs = [candidates[uncertain_index]]
(labeled_pairs, finished) = labelPairFunction(uncertain_pairs, fields)
nonduplicates.extend(labeled_pairs[0])
duplicates.extend(labeled_pairs[1])
training_data = addTrainingData(labeled_pairs, data_model, training_data)
if len(training_data) > 0:
data_model = core.trainModel(training_data, data_model, .1)
else:
raise ValueError('No training pairs given')
training_pairs = {0: nonduplicates, 1: duplicates}
return (training_data, training_pairs, data_model)
def addTrainingData(labeled_pairs, data_model, training_data=[]):
"""
Appends training data to the training data collection.
"""
fields = data_model['fields']
examples = [record_pair for example in labeled_pairs.values()
for record_pair in example]
new_training_data = numpy.empty(len(examples),
dtype=training_data.dtype)
new_training_data['label'] = [0] * len(labeled_pairs[0]) + [1] * len(labeled_pairs[1])
new_training_data['distances'] = core.fieldDistances(examples, data_model)
training_data = numpy.append(training_data, new_training_data)
return training_data
def consoleLabel(uncertain_pairs, fields):
'''Command line interface for presenting and labeling training pairs by the user'''
duplicates = []
nonduplicates = []
finished = False
for record_pair in uncertain_pairs:
label = ''
for pair in record_pair:
for field in fields:
line = "%s : %s\n" % (field, pair[field])
sys.stderr.write(line)
sys.stderr.write('\n')
sys.stderr.write('Do these records refer to the same thing?\n')
valid_response = False
while not valid_response:
sys.stderr.write('(y)es / (n)o / (u)nsure / (f)inished\n')
label = sys.stdin.readline().strip()
if label in ['y', 'n', 'u', 'f']:
valid_response = True
if label == 'y':
duplicates.append(record_pair)
elif label == 'n':
nonduplicates.append(record_pair)
elif label == 'f':
sys.stderr.write('Finished labeling\n')
finished = True
break
elif label != 'u':
sys.stderr.write('Nonvalid response\n')
raise
return ({0: nonduplicates, 1: duplicates}, finished)
def semiSupervisedNonDuplicates(data_sample,
data_model,
nonduplicate_confidence_threshold=.7,
sample_size=2000):
confidence = 1 - nonduplicate_confidence_threshold
def distinctPairs() :
data_slice = data_sample[0:sample_size]
pair_distance = core.fieldDistances(data_slice, data_model)
scores = core.scorePairs(pair_distance, data_model)
sample_n = 0
for score, pair in zip(scores, data_sample) :
if score < confidence :
yield pair
sample_n += 1
if sample_n < sample_size and len(data_sample) > sample_size :
for pair in data_sample[sample_size:] :
pair_distance = core.fieldDistances([pair], data_model)
score = core.scorePairs(pair_distance, data_model)
if score < confidence :
yield (pair)
return islice(distinctPairs(), 0, sample_size)
| 31.362319
| 91
| 0.590881
| 0
| 0
| 1,042
| 0.160505
| 0
| 0
| 0
| 0
| 1,079
| 0.166205
|
67aebac6e47b438aae9ad595766760877ca83a55
| 166
|
py
|
Python
|
Chapter04/listcmp1.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 12
|
2018-07-09T16:20:31.000Z
|
2022-03-21T22:52:15.000Z
|
Chapter04/listcmp1.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | null | null | null |
Chapter04/listcmp1.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 19
|
2018-01-09T12:49:06.000Z
|
2021-11-23T08:05:55.000Z
|
list1 = [10,9,3,7,2,1,23,1,561,1,1,96,1]
def cmp1(x,y):
if x == 1 or y==1:
c = y-x
else:
c = x-y
return c
list1.sort(cmp = cmp1)
print list1
| 12.769231
| 41
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
67aefde1df9dfdcb55a1ab80ea64b075758a46e0
| 520
|
py
|
Python
|
ObitSystem/ObitTalk/test/template.py
|
sarrvesh/Obit
|
e4ce6029e9beb2a8c0316ee81ea710b66b2b7986
|
[
"Linux-OpenIB"
] | 5
|
2019-08-26T06:53:08.000Z
|
2020-10-20T01:08:59.000Z
|
ObitSystem/ObitTalk/test/template.py
|
sarrvesh/Obit
|
e4ce6029e9beb2a8c0316ee81ea710b66b2b7986
|
[
"Linux-OpenIB"
] | null | null | null |
ObitSystem/ObitTalk/test/template.py
|
sarrvesh/Obit
|
e4ce6029e9beb2a8c0316ee81ea710b66b2b7986
|
[
"Linux-OpenIB"
] | 8
|
2017-08-29T15:12:32.000Z
|
2022-03-31T12:16:08.000Z
|
from AIPS import AIPS
from AIPSTask import AIPSTask
from AIPSData import AIPSImage
from ObitTask import ObitTask
AIPS.userno = 103
image = AIPSImage('MANDELBROT', 'MANDL', 1, 1)
mandl = AIPSTask('mandl')
mandl.outdata = image
mandl.imsize[1:] = [ 512, 512 ]
mandl.go()
try:
template = ObitTask('Template')
template.DataType = 'AIPS'
template.inName = image.name
template.inClass = image.klass
template.inDisk = image.disk
template.inSeq = image.seq
template.go()
finally:
image.zap()
| 20.8
| 46
| 0.701923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.080769
|
67afb6f388c98096e84a0f8aa3dc9e79c6d38f5b
| 5,186
|
py
|
Python
|
src/voxelize.py
|
Beskamir/BlenderDepthMaps
|
ba1201effde617078fb35f23d534372de3dd39c3
|
[
"MIT"
] | null | null | null |
src/voxelize.py
|
Beskamir/BlenderDepthMaps
|
ba1201effde617078fb35f23d534372de3dd39c3
|
[
"MIT"
] | null | null | null |
src/voxelize.py
|
Beskamir/BlenderDepthMaps
|
ba1201effde617078fb35f23d534372de3dd39c3
|
[
"MIT"
] | null | null | null |
import bpy
import bmesh
import numpy
from random import randint
import time
# pointsToVoxels() has been modified from the function generate_blocks() in https://github.com/cagcoach/BlenderPlot/blob/master/blendplot.py
# Some changes to accomodate Blender 2.8's API changes were made,
# and the function has been made much more efficient through creative usage of numpy.
def pointsToVoxels(points, name="VoxelMesh"):
# For now, we'll combine the voxels from each of the six views into one array and then just take the unique values.
# Later on, this could be re-structured to, for example, render the voxels from each face in a separate colour
points = numpy.concatenate(tuple(points.values()))
points = numpy.unique(points, axis=0)
print("Number of points:", len(points))
mesh = bpy.data.meshes.new("mesh") # add a new mesh
obj = bpy.data.objects.new(name, mesh)
bpy.context.collection.objects.link(obj) # put the object into the scene (link)
bpy.context.view_layer.objects.active = obj
obj.select_set(state=True) # select object
mesh = obj.data
bm = bmesh.new()
# 0 1 2 3 4 5 6 7
block=numpy.array([ [-1,-1,-1],[-1,-1,1],[-1,1,-1],[-1,1,1],[1,-1,-1],[1,-1,1],[1,1,-1],[1,1,1] ]).astype(float)
block*=0.5
print("Creating vertices...")
# Function to apply each point to each element of "block" as efficiently as possible
# First, produce 8 copies of each point. numpy.tile() is apparently the most efficient way to do so.
pointsTiled = numpy.tile(points, (1,8))
# This will make each tuple 24 items long. To fix this, we need to reshape pointsTiled, and split each 24-long tuple into 8 3-longs.
pointsDuplicated = numpy.reshape(pointsTiled, (pointsTiled.shape[0], 8, 3))
# Then, a lambda to piecewise add the elements of "block" to a respective set of 8 duplicate points in pointsDuplicated
blockerize = lambda x : x + block
# Apply it
pointsBlockerized = blockerize(pointsDuplicated)
# pointsBlockerized is now a 2D array of thruples. Convert back to a 1D array.
verts = numpy.reshape(pointsBlockerized, (pointsBlockerized.shape[0]*pointsBlockerized.shape[1], 3) )
#print("points shape:", points.shape)
#print("verts shape:", verts.shape)
#print("verts:", verts)
'''for pt in points:
print((block+pt))
verts=numpy.append(verts, (block+pt),axis=0)'''
printAfterCount = 100000
nextThreshold = 0
pointsDone = 0
#print(verts)
for v in verts:
bm.verts.new(v)
pointsDone += 1
if pointsDone > nextThreshold:
print(pointsDone, "vertices have been added so far.")
nextThreshold += printAfterCount
print("Calling to_mesh().")
bm.to_mesh(mesh)
print("Ensuring lookup table.")
bm.verts.ensure_lookup_table()
nextThreshold = 0
cubesDone = 0
for i in range(0,len(bm.verts),8):
bm.faces.new( [bm.verts[i+0], bm.verts[i+1],bm.verts[i+3], bm.verts[i+2]])
bm.faces.new( [bm.verts[i+4], bm.verts[i+5],bm.verts[i+1], bm.verts[i+0]])
bm.faces.new( [bm.verts[i+6], bm.verts[i+7],bm.verts[i+5], bm.verts[i+4]])
bm.faces.new( [bm.verts[i+2], bm.verts[i+3],bm.verts[i+7], bm.verts[i+6]])
bm.faces.new( [bm.verts[i+5], bm.verts[i+7],bm.verts[i+3], bm.verts[i+1]]) #top
bm.faces.new( [bm.verts[i+0], bm.verts[i+2],bm.verts[i+6], bm.verts[i+4]]) #bottom
cubesDone += 1
if cubesDone > nextThreshold:
print(cubesDone, "cubes have been made so far.")
nextThreshold += printAfterCount
if bpy.context.mode == 'EDIT_MESH':
bmesh.update_edit_mesh(obj.data)
else:
bm.to_mesh(obj.data)
obj.data.update()
bm.free
return obj
# Given a 3D array of 0 and 1's it'll place a voxel in every cell that has a 1 in it
def imagesToVoxelsInefficient(image3D):
for xValue in range(len(image3D)):
for yValue in range(len(image3D[xValue])):
for zValue in range(len(image3D[xValue][yValue])):
if(image3D[xValue][yValue][zValue]==0):
createVoxel((xValue,yValue,zValue))
# place a voxel at a given position, using mesh.primitive_cube_add is really slow so it might be worth making this faster
def createVoxel(position):
bpy.ops.mesh.primitive_cube_add(location=position,size=1)
# print(position)
if __name__ == "__main__":
# calculate the runtime of this script
startTime = time.time()
# createVoxel((1,2,3))
# Generate a 10*10*10 3D texture
testImageArray = []
for x in range(10):
yArray = []
for y in range(10):
zArray = []
for z in range(10):
zArray.append(0)
# zArray.append(randint(0,1))
yArray.append(zArray)
testImageArray.append(yArray)
# print(testImageArray)
# place voxels based on that 10*10*10 array
imagesToVoxelsInefficient(testImageArray)
# testImage = [[[0,0],[1,1]],[[1,1],[1,0]]]
stopTime = time.time()
print("Script took:",stopTime-startTime)
| 42.508197
| 140
| 0.636521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,072
| 0.399537
|
67b5f86ef31a000c3511435b9060d1043c35b90a
| 2,182
|
py
|
Python
|
storage/lustre_client_iops/lustre_client_iops.py
|
jssfy/toolpedia
|
084d592f7f1de373e6acae5856dfbb8b06b2f7a1
|
[
"Apache-2.0"
] | null | null | null |
storage/lustre_client_iops/lustre_client_iops.py
|
jssfy/toolpedia
|
084d592f7f1de373e6acae5856dfbb8b06b2f7a1
|
[
"Apache-2.0"
] | null | null | null |
storage/lustre_client_iops/lustre_client_iops.py
|
jssfy/toolpedia
|
084d592f7f1de373e6acae5856dfbb8b06b2f7a1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#-*-coding:utf-8-*-
import json
import sys
import time
# TBD: auto discovery
# data_path = "/proc/fs/lustre/llite/nvmefs-ffff883f8a4f2800/stats"
data_path = "/proc/fs/lustre/lmv/shnvme3-clilmv-ffff8859d3e2d000/md_stats"
# use a dic1/dic2 to hold sampling data
def load_data(dic):
# Open file
fileHandler = open(data_path, "r")
# Get list of all lines in file
listOfLines = fileHandler.readlines()
# Close file
fileHandler.close()
# Iterate over the lines
for line in listOfLines:
words = line.split()
if(len(words) < 2):
println("got error line, to skip")
continue
dic[words[0]] = float(words[1])
# print(dic)
# put "next - prev" into delta
def calc_delta(prev, next, delta):
for key in prev:
delta[key] = next[key] - prev[key]
# print a dictionary in the indented json format
def print_dict(dic):
print(json.dumps(dic, indent=2, sort_keys=True, ensure_ascii=False))
# calculate iops for each category except snapshot_time, all divided by snapshot_time
def calc_iops_from_delta(delta):
# in case of snapshot_time error, skip
if (delta['snapshot_time'] < 0.000001):
print("error: time gap too small")
return
for key in delta:
if ('snapshot_time' != key):
delta[key] = int(delta[key]/delta['snapshot_time'])
if __name__ == '__main__':
# dic1/dic2 are used to load prev/next kernel data interchangably
# calc delta by doing: next - prev
# calc iops by doing: delta/time_consumption
dic1 = {}
dic2 = {}
delta = {}
load_data(dic1)
prev = 1
# load_data(dic2)
# calc_delta(dic1, dic2, delta)
# calc_iops_from_delta(delta)
# print_dict(delta)
# dic1['name'] = 'anhua'
# print_dict(dic1)
# enter loop
while True:
time.sleep(2) # TBD: configurable
if prev == 1:
load_data(dic2)
prev = 2
calc_delta(dic1, dic2, delta)
else:
load_data(dic1)
prev = 1
calc_delta(dic2, dic1, delta)
calc_iops_from_delta(delta)
print_dict(delta)
| 26.289157
| 85
| 0.61824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 944
| 0.432631
|
67b63f883548e6cabc6d6344eb2af1aa23104352
| 5,716
|
py
|
Python
|
test/lsh_test.py
|
titusz/datasketch
|
a483b39fe4e444c372792e5c91c86d9d8d27a4a5
|
[
"MIT"
] | 1
|
2022-03-21T05:36:15.000Z
|
2022-03-21T05:36:15.000Z
|
test/lsh_test.py
|
tomzhang/datasketch
|
a483b39fe4e444c372792e5c91c86d9d8d27a4a5
|
[
"MIT"
] | null | null | null |
test/lsh_test.py
|
tomzhang/datasketch
|
a483b39fe4e444c372792e5c91c86d9d8d27a4a5
|
[
"MIT"
] | 2
|
2018-11-12T18:00:52.000Z
|
2022-03-21T05:36:20.000Z
|
import unittest
from hashlib import sha1
import pickle
import numpy as np
from datasketch.lsh import MinHashLSH
from datasketch.minhash import MinHash
from datasketch.weighted_minhash import WeightedMinHashGenerator
class TestMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.query, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
class TestWeightedMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.query, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
if __name__ == "__main__":
unittest.main()
| 32.662857
| 64
| 0.561407
| 5,446
| 0.952764
| 0
| 0
| 0
| 0
| 0
| 0
| 220
| 0.038488
|
67b6738fcd0ebe0de56b7b545d7adc583f1c2d45
| 4,134
|
py
|
Python
|
src/datasets/tsn_dataset.py
|
tomstark99/epic-kitchens-100-fyrp
|
cbc9e59569fb6110b900a51def1947b8a3c93699
|
[
"Apache-2.0"
] | 2
|
2021-08-31T10:02:56.000Z
|
2021-11-24T12:44:19.000Z
|
src/datasets/tsn_dataset.py
|
tomstark99/epic-kitchens-100-fyrp
|
cbc9e59569fb6110b900a51def1947b8a3c93699
|
[
"Apache-2.0"
] | null | null | null |
src/datasets/tsn_dataset.py
|
tomstark99/epic-kitchens-100-fyrp
|
cbc9e59569fb6110b900a51def1947b8a3c93699
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import Callable
from typing import List
import numpy as np
import torch.utils.data
from .video_dataset import VideoDataset
from .video_dataset import VideoRecord
LOG = logging.getLogger(__name__)
# line_profiler injects a "profile" into __builtins__. When not running under
# line_profiler we need to inject our own passthrough
if type(__builtins__) is not dict or "profile" not in __builtins__:
profile = lambda f: f
class TsnDataset(torch.utils.data.Dataset):
"""
Wraps a :class:`VideoDataset` to implement TSN sampling
"""
def __init__(
self,
dataset: VideoDataset,
num_segments: int = 3,
segment_length: int = 1,
transform: Callable = None,
random_shift: bool = True,
test_mode: bool = False,
):
"""
Args:
dataset: Video dataset to load TSN-sampled segments from.
num_segments: Number of segments per clip.
segment_length: Length of segment in number of frames.
transform: A applied to the list of frames sampled from the clip
random_shift:
test_mode: Whether to return center sampled frames from each segment.
"""
self.dataset = dataset
self.num_segments = num_segments
self.segment_length = segment_length
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
def __getitem__(self, index):
record = self.dataset.video_records[index]
if self.test_mode:
segment_start_idxs = self._get_test_indices(record)
else:
segment_start_idxs = (
self._sample_indices(record)
if self.random_shift
else self._get_val_indices(record)
)
return self._get(record, segment_start_idxs)
def __len__(self):
return len(self.dataset)
@profile
def _get(self, record: VideoRecord, segment_start_idxs: List[int]):
images = self.dataset.load_frames(
record, self._get_frame_idxs(segment_start_idxs, record)
)
if self.transform is not None:
images = self.transform(images)
metadata = record.metadata
return images, metadata
def _sample_indices(self, record: VideoRecord):
average_duration = (
record.num_frames - self.segment_length + 1
) // self.num_segments
if average_duration > 0:
offsets = np.multiply(
list(range(self.num_segments)), average_duration
) + np.random.randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(
np.random.randint(
record.num_frames - self.segment_length + 1, size=self.num_segments
)
)
else:
offsets = np.zeros((self.num_segments,))
return offsets
def _get_val_indices(self, record: VideoRecord):
if record.num_frames > self.num_segments + self.segment_length - 1:
tick = (record.num_frames - self.segment_length + 1) / float(
self.num_segments
)
offsets = np.array(
[int(tick / 2.0 + tick * x) for x in range(self.num_segments)]
)
else:
offsets = np.zeros((self.num_segments,))
return offsets
def _get_test_indices(self, record: VideoRecord):
tick = (record.num_frames - self.segment_length + 1) / float(self.num_segments)
offsets = np.array(
[int(tick / 2.0 + tick * x) for x in range(self.num_segments)]
)
return offsets
def _get_frame_idxs(
self, segment_start_idxs: List[int], record: VideoRecord
) -> List[int]:
seg_idxs = []
for seg_ind in segment_start_idxs:
p = int(seg_ind)
for i in range(self.segment_length):
seg_idxs.append(p)
if p < record.num_frames:
p += 1
return seg_idxs
| 33.33871
| 87
| 0.60716
| 3,677
| 0.889453
| 0
| 0
| 352
| 0.085148
| 0
| 0
| 617
| 0.14925
|
67b70692a042775258dace6d02203639346f7fe2
| 5,947
|
py
|
Python
|
ce_cli/function.py
|
maiot-io/cengine
|
3a1946c449e8c5e1d216215df6eeab941eb1640a
|
[
"Apache-2.0"
] | 7
|
2020-10-13T12:47:32.000Z
|
2021-03-12T12:00:14.000Z
|
ce_cli/function.py
|
maiot-io/cengine
|
3a1946c449e8c5e1d216215df6eeab941eb1640a
|
[
"Apache-2.0"
] | null | null | null |
ce_cli/function.py
|
maiot-io/cengine
|
3a1946c449e8c5e1d216215df6eeab941eb1640a
|
[
"Apache-2.0"
] | 1
|
2021-01-23T02:19:42.000Z
|
2021-01-23T02:19:42.000Z
|
import click
import ce_api
import base64
import os
from ce_cli.cli import cli, pass_info
from ce_cli.utils import check_login_status
from ce_cli.utils import api_client, api_call
from ce_api.models import FunctionCreate, FunctionVersionCreate
from ce_cli.utils import declare, notice
from tabulate import tabulate
from ce_cli.utils import format_uuid, find_closest_uuid
@cli.group()
@pass_info
def function(info):
"""Integrate your own custom logic to the Core Engine"""
check_login_status(info)
@function.command('create')
@click.argument('name', type=str)
@click.argument('local_path', type=click.Path(exists=True))
@click.argument('func_type', type=str)
@click.argument('udf_name', type=str)
@click.option('--message', type=str, help='Description of the function',
default='')
@pass_info
def create_function(info, local_path, name, func_type, udf_name, message):
"""Register a custom function to use with the Core Engine"""
click.echo('Registering the function {}.'.format(udf_name))
with open(local_path, 'rb') as file:
data = file.read()
encoded_file = base64.b64encode(data).decode()
api = ce_api.FunctionsApi(api_client(info))
api_call(api.create_function_api_v1_functions_post,
FunctionCreate(name=name,
function_type=func_type,
udf_path=udf_name,
message=message,
file_contents=encoded_file))
declare('Function registered.')
@function.command('update')
@click.argument('function_id', type=str)
@click.argument('local_path', type=click.Path(exists=True))
@click.argument('udf_name', type=str)
@click.option('--message', type=str, help='Description of the function',
default='')
@pass_info
def update_function(info, function_id, local_path, udf_name, message):
"""Add a new version to a function and update it"""
click.echo('Updating the function {}.'.format(
format_uuid(function_id)))
api = ce_api.FunctionsApi(api_client(info))
f_list = api_call(api.get_functions_api_v1_functions_get)
f_uuid = find_closest_uuid(function_id, f_list)
with open(local_path, 'rb') as file:
data = file.read()
encoded_file = base64.b64encode(data).decode()
api_call(
api.create_function_version_api_v1_functions_function_id_versions_post,
FunctionVersionCreate(udf_path=udf_name,
message=message,
file_contents=encoded_file),
f_uuid)
declare('Function updated!')
@function.command('list')
@pass_info
def list_functions(info):
"""List the given custom functions"""
api = ce_api.FunctionsApi(api_client(info))
f_list = api_call(api.get_functions_api_v1_functions_get)
declare('You have declared {count} different '
'function(s) so far. \n'.format(count=len(f_list)))
if f_list:
table = []
for f in f_list:
table.append({'ID': format_uuid(f.id),
'Name': f.name,
'Type': f.function_type,
'Created At': f.created_at})
click.echo(tabulate(table, headers='keys', tablefmt='presto'))
click.echo()
@function.command('versions')
@click.argument('function_id', type=str)
@pass_info
def list_versions(info, function_id):
"""List of versions for a selected custom function"""
api = ce_api.FunctionsApi(api_client(info))
f_list = api_call(api.get_functions_api_v1_functions_get)
f_uuid = find_closest_uuid(function_id, f_list)
v_list = api_call(
api.get_function_versions_api_v1_functions_function_id_versions_get,
f_uuid)
declare('Function with {id} has {count} '
'versions.\n'.format(id=format_uuid(function_id),
count=len(v_list)))
if v_list:
table = []
for v in v_list:
table.append({'ID': format_uuid(v.id),
'Created At': v.created_at,
'Description': v.message})
click.echo(tabulate(table, headers='keys', tablefmt='presto'))
click.echo()
@function.command('pull')
@click.argument('function_id', type=str)
@click.argument('version_id', type=str)
@click.option('--output_path', default=None, type=click.Path(),
help='Path to save the custom function')
@pass_info
def pull_function_version(info, function_id, version_id, output_path):
"""Download a version of a given custom function"""
api = ce_api.FunctionsApi(api_client(info))
# Infer the function uuid and name
f_list = api_call(api.get_functions_api_v1_functions_get)
f_uuid = find_closest_uuid(function_id, f_list)
f_name = [f.name for f in f_list if f.id == f_uuid][0]
# Infer the version uuid
v_list = api_call(
api.get_function_versions_api_v1_functions_function_id_versions_get,
f_uuid)
v_uuid = find_closest_uuid(version_id, v_list)
notice('Downloading the function with the following parameters: \n'
'Name: {f_name}\n'
'function_id: {f_id}\n'
'version_id: {v_id}\n'.format(f_name=f_name,
f_id=format_uuid(f_uuid),
v_id=format_uuid(v_uuid)))
# Get the file and write it to the output path
encoded_file = api_call(
api.get_function_version_api_v1_functions_function_id_versions_version_id_get,
f_uuid,
v_uuid)
# Derive the output path and download
if output_path is None:
output_path = os.path.join(os.getcwd(), '{}@{}.py'.format(f_name,
v_uuid))
with open(output_path, 'wb') as f:
f.write(base64.b64decode(encoded_file.file_contents))
declare('File downloaded to {}'.format(output_path))
| 35.189349
| 86
| 0.648562
| 0
| 0
| 0
| 0
| 5,559
| 0.934757
| 0
| 0
| 1,191
| 0.200269
|
67ba0ceb8217748f29955b3f1f48be862f98b8da
| 1,747
|
py
|
Python
|
office-plugin/windows-office/program/wizards/ui/event/RadioDataAware.py
|
jerrykcode/kkFileView
|
6efc3750665c9c4034798fb9fb3e74cd8144165c
|
[
"Apache-2.0"
] | 6,660
|
2018-01-13T12:16:53.000Z
|
2022-03-31T15:15:28.000Z
|
office-plugin/windows-office/program/wizards/ui/event/RadioDataAware.py
|
jerrykcode/kkFileView
|
6efc3750665c9c4034798fb9fb3e74cd8144165c
|
[
"Apache-2.0"
] | 208
|
2018-01-26T08:55:12.000Z
|
2022-03-29T02:36:34.000Z
|
office-plugin/windows-office/program/wizards/ui/event/RadioDataAware.py
|
jerrykcode/kkFileView
|
6efc3750665c9c4034798fb9fb3e74cd8144165c
|
[
"Apache-2.0"
] | 1,933
|
2018-01-15T13:08:40.000Z
|
2022-03-31T11:28:59.000Z
|
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from .CommonListener import ItemListenerProcAdapter
from .DataAware import DataAware
class RadioDataAware(DataAware):
def __init__(self, data, value, radioButtons):
super(RadioDataAware,self).__init__(data, value)
self.radioButtons = radioButtons
def setToUI(self, value):
selected = int(value)
if selected == -1:
for i in self.radioButtons:
i.State = False
else:
self.radioButtons[selected].State = True
def getFromUI(self):
for index, workwith in enumerate(self.radioButtons):
if workwith.State:
return index
return -1
@classmethod
def attachRadioButtons(self, data, prop, buttons, field):
da = RadioDataAware(data, prop, buttons)
method = getattr(da,"updateData")
for i in da.radioButtons:
i.addItemListener(ItemListenerProcAdapter(method))
return da
| 35.653061
| 70
| 0.68403
| 861
| 0.492845
| 0
| 0
| 280
| 0.160275
| 0
| 0
| 794
| 0.454493
|
67ba1058171fe27c8c016baa860730f05f7fd4ed
| 5,416
|
py
|
Python
|
Allura/allura/lib/patches.py
|
shalithasuranga/allura
|
4f7fba13415954d07f602a051ec697329dd3706b
|
[
"Apache-2.0"
] | 1
|
2019-03-17T04:16:15.000Z
|
2019-03-17T04:16:15.000Z
|
Allura/allura/lib/patches.py
|
DalavanCloud/allura
|
a25329caed9e6d136a1004c33372e0632a16e352
|
[
"Apache-2.0"
] | null | null | null |
Allura/allura/lib/patches.py
|
DalavanCloud/allura
|
a25329caed9e6d136a1004c33372e0632a16e352
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import webob
import tg.decorators
from decorator import decorator
from pylons import request
import mock
import simplejson
from allura.lib import helpers as h
_patched = False
def apply():
global _patched
if _patched:
return
_patched = True
old_lookup_template_engine = tg.decorators.Decoration.lookup_template_engine
@h.monkeypatch(tg.decorators.Decoration)
def lookup_template_engine(self, request):
'''Wrapper to handle totally borked-up HTTP-ACCEPT headers'''
try:
return old_lookup_template_engine(self, request)
except:
pass
environ = dict(request.environ, HTTP_ACCEPT='*/*')
request = webob.Request(environ)
return old_lookup_template_engine(self, request)
@h.monkeypatch(tg, tg.decorators)
def override_template(controller, template):
'''Copy-pasted patch to allow multiple colons in a template spec'''
if hasattr(controller, 'decoration'):
decoration = controller.decoration
else:
return
if hasattr(decoration, 'engines'):
engines = decoration.engines
else:
return
for content_type, content_engine in engines.iteritems():
template = template.split(':', 1)
template.extend(content_engine[2:])
try:
override_mapping = request._override_mapping
except AttributeError:
override_mapping = request._override_mapping = {}
override_mapping[controller.im_func] = {content_type: template}
@h.monkeypatch(tg, tg.decorators)
@decorator
def without_trailing_slash(func, *args, **kwargs):
'''Monkey-patched to use 301 redirects for SEO, and handle query strings'''
response_type = getattr(request, 'response_type', None)
if (request.method == 'GET' and request.path.endswith('/') and not response_type):
location = request.path_url[:-1]
if request.query_string:
location += '?' + request.query_string
raise webob.exc.HTTPMovedPermanently(location=location)
return func(*args, **kwargs)
@h.monkeypatch(tg, tg.decorators)
@decorator
def with_trailing_slash(func, *args, **kwargs):
'''Monkey-patched to use 301 redirects for SEO, and handle query strings'''
response_type = getattr(request, 'response_type', None)
if (request.method == 'GET' and not request.path.endswith('/') and not response_type):
location = request.path_url + '/'
if request.query_string:
location += '?' + request.query_string
raise webob.exc.HTTPMovedPermanently(location=location)
return func(*args, **kwargs)
# http://blog.watchfire.com/wfblog/2011/10/json-based-xss-exploitation.html
# change < to its unicode escape when rendering JSON out of turbogears
# This is to avoid IE9 and earlier, which don't know the json content type
# and may attempt to render JSON data as HTML if the URL ends in .html
original_tg_jsonify_GenericJSON_encode = tg.jsonify.GenericJSON.encode
escape_pattern_with_lt = re.compile(
simplejson.encoder.ESCAPE.pattern.rstrip(']') + '<' + ']')
@h.monkeypatch(tg.jsonify.GenericJSON)
def encode(self, o):
# ensure_ascii=False forces encode_basestring() to be called instead of
# encode_basestring_ascii() and encode_basestring_ascii may likely be c-compiled
# and thus not monkeypatchable
with h.push_config(self, ensure_ascii=False), \
h.push_config(simplejson.encoder, ESCAPE=escape_pattern_with_lt), \
mock.patch.dict(simplejson.encoder.ESCAPE_DCT, {'<': r'\u003C'}):
return original_tg_jsonify_GenericJSON_encode(self, o)
# must be saved outside the newrelic() method so that multiple newrelic()
# calls (e.g. during tests) don't cause the patching to get applied to itself
# over and over
old_controller_call = tg.controllers.DecoratedController._call
def newrelic():
@h.monkeypatch(tg.controllers.DecoratedController,
tg.controllers.decoratedcontroller.DecoratedController)
def _call(self, controller, *args, **kwargs):
'''Set NewRelic transaction name to actual controller name'''
import newrelic.agent
newrelic.agent.set_transaction_name(
newrelic.agent.callable_name(controller))
return old_controller_call(self, controller, *args, **kwargs)
| 42.3125
| 94
| 0.675406
| 0
| 0
| 0
| 0
| 3,411
| 0.629801
| 0
| 0
| 1,932
| 0.356721
|