content
stringlengths 5
1.05M
|
|---|
# Generated by Django 3.0.5 on 2020-04-30 08:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0003_login_new'),
]
operations = [
migrations.RemoveField(
model_name='login_new',
name='image_url',
),
]
|
# -*- coding: utf-8 -*-
from .base import App
# ...
__all__ = [
'App'
]
|
class A73:
pass
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import pyroapi
import pytest
import torch
from pyro.infer.autoguide import AutoNormal
from tests.common import assert_equal
# put all funsor-related imports here, so test collection works without funsor
try:
import funsor
import pyro.contrib.funsor
funsor.set_backend("torch")
from pyroapi import distributions as dist
from pyroapi import handlers, infer, pyro
except ImportError:
pytestmark = pytest.mark.skip(reason="funsor is not installed")
logger = logging.getLogger(__name__)
_PYRO_BACKEND = os.environ.get("TEST_ENUM_PYRO_BACKEND", "contrib.funsor")
@pytest.mark.parametrize('length', [1, 2, 10, 100])
@pytest.mark.parametrize('temperature', [0, 1])
@pyroapi.pyro_backend(_PYRO_BACKEND)
def test_hmm_smoke(length, temperature):
# This should match the example in the infer_discrete docstring.
def hmm(data, hidden_dim=10):
transition = 0.3 / hidden_dim + 0.7 * torch.eye(hidden_dim)
means = torch.arange(float(hidden_dim))
states = [0]
for t in pyro.markov(range(len(data))):
states.append(pyro.sample("states_{}".format(t),
dist.Categorical(transition[states[-1]])))
data[t] = pyro.sample("obs_{}".format(t),
dist.Normal(means[states[-1]], 1.),
obs=data[t])
return states, data
true_states, data = hmm([None] * length)
assert len(data) == length
assert len(true_states) == 1 + len(data)
decoder = infer.infer_discrete(infer.config_enumerate(hmm), temperature=temperature)
inferred_states, _ = decoder(data)
assert len(inferred_states) == len(true_states)
logger.info("true states: {}".format(list(map(int, true_states))))
logger.info("inferred states: {}".format(list(map(int, inferred_states))))
@pyroapi.pyro_backend(_PYRO_BACKEND)
@pytest.mark.parametrize("temperature", [0, 1])
def test_distribution_1(temperature):
# +-------+
# z --|--> x |
# +-------+
num_particles = 10000
data = torch.tensor([1., 2., 3.])
@infer.config_enumerate
def model(z=None):
p = pyro.param("p", torch.tensor([0.75, 0.25]))
iz = pyro.sample("z", dist.Categorical(p), obs=z)
z = torch.tensor([0., 1.])[iz]
logger.info("z.shape = {}".format(z.shape))
with pyro.plate("data", 3):
pyro.sample("x", dist.Normal(z, 1.), obs=data)
first_available_dim = -3
vectorized_model = model if temperature == 0 else \
pyro.plate("particles", size=num_particles, dim=-2)(model)
sampled_model = infer.infer_discrete(
vectorized_model,
first_available_dim,
temperature
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {z: handlers.trace(model).get_trace(z=torch.tensor(z).long()) for z in [0., 1.]}
# Check posterior over z.
actual_z_mean = sampled_trace.nodes["z"]["value"].float().mean()
if temperature:
expected_z_mean = 1 / (1 + (conditioned_traces[0].log_prob_sum() -
conditioned_traces[1].log_prob_sum()).exp())
else:
expected_z_mean = (conditioned_traces[1].log_prob_sum() >
conditioned_traces[0].log_prob_sum()).float()
expected_max = max(t.log_prob_sum() for t in conditioned_traces.values())
actual_max = sampled_trace.log_prob_sum()
assert_equal(expected_max, actual_max, prec=1e-5)
assert_equal(actual_z_mean, expected_z_mean, prec=1e-2 if temperature else 1e-5)
@pyroapi.pyro_backend(_PYRO_BACKEND)
@pytest.mark.parametrize("temperature", [0, 1])
def test_distribution_2(temperature):
# +--------+
# z1 --|--> x1 |
# | | |
# V | |
# z2 --|--> x2 |
# +--------+
num_particles = 10000
data = torch.tensor([[-1., -1., 0.], [-1., 1., 1.]])
@infer.config_enumerate
def model(z1=None, z2=None):
p = pyro.param("p", torch.tensor([[0.25, 0.75], [0.1, 0.9]]))
loc = pyro.param("loc", torch.tensor([-1., 1.]))
z1 = pyro.sample("z1", dist.Categorical(p[0]), obs=z1)
z2 = pyro.sample("z2", dist.Categorical(p[z1]), obs=z2)
logger.info("z1.shape = {}".format(z1.shape))
logger.info("z2.shape = {}".format(z2.shape))
with pyro.plate("data", 3):
pyro.sample("x1", dist.Normal(loc[z1], 1.), obs=data[0])
pyro.sample("x2", dist.Normal(loc[z2], 1.), obs=data[1])
first_available_dim = -3
vectorized_model = model if temperature == 0 else \
pyro.plate("particles", size=num_particles, dim=-2)(model)
sampled_model = infer.infer_discrete(
vectorized_model,
first_available_dim,
temperature
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {(z1, z2): handlers.trace(model).get_trace(z1=torch.tensor(z1),
z2=torch.tensor(z2))
for z1 in [0, 1] for z2 in [0, 1]}
# Check joint posterior over (z1, z2).
actual_probs = torch.empty(2, 2)
expected_probs = torch.empty(2, 2)
for (z1, z2), tr in conditioned_traces.items():
expected_probs[z1, z2] = tr.log_prob_sum().exp()
actual_probs[z1, z2] = ((sampled_trace.nodes["z1"]["value"] == z1) &
(sampled_trace.nodes["z2"]["value"] == z2)).float().mean()
if temperature:
expected_probs = expected_probs / expected_probs.sum()
else:
expected_max, argmax = expected_probs.reshape(-1).max(0)
actual_max = sampled_trace.log_prob_sum()
assert_equal(expected_max.log(), actual_max, prec=1e-5)
expected_probs[:] = 0
expected_probs.reshape(-1)[argmax] = 1
assert_equal(expected_probs, actual_probs, prec=1e-2 if temperature else 1e-5)
@pyroapi.pyro_backend(_PYRO_BACKEND)
@pytest.mark.parametrize("temperature", [0, 1])
def test_distribution_3_simple(temperature):
# +---------------+
# | z2 ---> x2 |
# | 2 |
# +---------------+
num_particles = 10000
data = torch.tensor([-1., 1.])
@infer.config_enumerate
def model(z2=None):
p = pyro.param("p", torch.tensor([0.25, 0.75]))
loc = pyro.param("loc", torch.tensor([-1., 1.]))
with pyro.plate("data", 2):
z2 = pyro.sample("z2", dist.Categorical(p), obs=z2)
pyro.sample("x2", dist.Normal(loc[z2], 1.), obs=data)
first_available_dim = -3
vectorized_model = model if temperature == 0 else \
pyro.plate("particles", size=num_particles, dim=-2)(model)
sampled_model = infer.infer_discrete(
vectorized_model,
first_available_dim,
temperature
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {(z20, z21): handlers.trace(model).get_trace(z2=torch.tensor([z20, z21]))
for z20 in [0, 1] for z21 in [0, 1]}
# Check joint posterior over (z2[0], z2[1]).
actual_probs = torch.empty(2, 2)
expected_probs = torch.empty(2, 2)
for (z20, z21), tr in conditioned_traces.items():
expected_probs[z20, z21] = tr.log_prob_sum().exp()
actual_probs[z20, z21] = ((sampled_trace.nodes["z2"]["value"][..., :1] == z20) &
(sampled_trace.nodes["z2"]["value"][..., 1:] == z21)).float().mean()
if temperature:
expected_probs = expected_probs / expected_probs.sum()
else:
expected_max, argmax = expected_probs.reshape(-1).max(0)
actual_max = sampled_trace.log_prob_sum()
assert_equal(expected_max.log(), actual_max, prec=1e-5)
expected_probs[:] = 0
expected_probs.reshape(-1)[argmax] = 1
assert_equal(expected_probs.reshape(-1), actual_probs.reshape(-1), prec=1e-2)
@pyroapi.pyro_backend(_PYRO_BACKEND)
@pytest.mark.parametrize("temperature", [0, 1])
def test_distribution_3(temperature):
# +---------+ +---------------+
# z1 --|--> x1 | | z2 ---> x2 |
# | 3 | | 2 |
# +---------+ +---------------+
num_particles = 10000
data = [torch.tensor([-1., -1., 0.]), torch.tensor([-1., 1.])]
@infer.config_enumerate
def model(z1=None, z2=None):
p = pyro.param("p", torch.tensor([0.25, 0.75]))
loc = pyro.param("loc", torch.tensor([-1., 1.]))
z1 = pyro.sample("z1", dist.Categorical(p), obs=z1)
with pyro.plate("data[0]", 3):
pyro.sample("x1", dist.Normal(loc[z1], 1.), obs=data[0])
with pyro.plate("data[1]", 2):
z2 = pyro.sample("z2", dist.Categorical(p), obs=z2)
pyro.sample("x2", dist.Normal(loc[z2], 1.), obs=data[1])
first_available_dim = -3
vectorized_model = model if temperature == 0 else \
pyro.plate("particles", size=num_particles, dim=-2)(model)
sampled_model = infer.infer_discrete(
vectorized_model,
first_available_dim,
temperature
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {(z1, z20, z21): handlers.trace(model).get_trace(z1=torch.tensor(z1),
z2=torch.tensor([z20, z21]))
for z1 in [0, 1] for z20 in [0, 1] for z21 in [0, 1]}
# Check joint posterior over (z1, z2[0], z2[1]).
actual_probs = torch.empty(2, 2, 2)
expected_probs = torch.empty(2, 2, 2)
for (z1, z20, z21), tr in conditioned_traces.items():
expected_probs[z1, z20, z21] = tr.log_prob_sum().exp()
actual_probs[z1, z20, z21] = ((sampled_trace.nodes["z1"]["value"] == z1) &
(sampled_trace.nodes["z2"]["value"][..., :1] == z20) &
(sampled_trace.nodes["z2"]["value"][..., 1:] == z21)).float().mean()
if temperature:
expected_probs = expected_probs / expected_probs.sum()
else:
expected_max, argmax = expected_probs.reshape(-1).max(0)
actual_max = sampled_trace.log_prob_sum().exp()
assert_equal(expected_max, actual_max, prec=1e-5)
expected_probs[:] = 0
expected_probs.reshape(-1)[argmax] = 1
assert_equal(expected_probs.reshape(-1), actual_probs.reshape(-1), prec=1e-2)
def model_zzxx():
# loc,scale
# / \
# +-------/-+ +--------\------+
# z1 --|--> x1 | | z2 ---> x2 |
# | 3 | | 2 |
# +---------+ +---------------+
data = [torch.tensor([-1., -1., 0.]), torch.tensor([-1., 1.])]
p = pyro.param("p", torch.tensor([0.25, 0.75]))
loc = pyro.sample("loc", dist.Normal(0, 1).expand([2]).to_event(1))
# FIXME results in infinite loop in transformeddist_to_funsor.
# scale = pyro.sample("scale", dist.LogNormal(0, 1))
scale = pyro.sample("scale", dist.Normal(0, 1)).exp()
z1 = pyro.sample("z1", dist.Categorical(p))
with pyro.plate("data[0]", 3):
pyro.sample("x1", dist.Normal(loc[z1], scale), obs=data[0])
with pyro.plate("data[1]", 2):
z2 = pyro.sample("z2", dist.Categorical(p))
pyro.sample("x2", dist.Normal(loc[z2], scale), obs=data[1])
def model2():
data = [torch.tensor([-1., -1., 0.]), torch.tensor([-1., 1.])]
p = pyro.param("p", torch.tensor([0.25, 0.75]))
loc = pyro.sample("loc", dist.Normal(0, 1).expand([2]).to_event(1))
# FIXME results in infinite loop in transformeddist_to_funsor.
# scale = pyro.sample("scale", dist.LogNormal(0, 1))
z1 = pyro.sample("z1", dist.Categorical(p))
scale = pyro.sample("scale", dist.Normal(torch.tensor([0., 1.])[z1], 1)).exp()
with pyro.plate("data[0]", 3):
pyro.sample("x1", dist.Normal(loc[z1], scale), obs=data[0])
with pyro.plate("data[1]", 2):
z2 = pyro.sample("z2", dist.Categorical(p))
pyro.sample("x2", dist.Normal(loc[z2], scale), obs=data[1])
@pyroapi.pyro_backend(_PYRO_BACKEND)
@pytest.mark.parametrize("model", [model_zzxx, model2])
@pytest.mark.parametrize("temperature", [0, 1])
def test_svi_model_side_enumeration(model, temperature):
# Perform fake inference.
# This has the wrong distribution but the right type for tests.
guide = AutoNormal(handlers.enum(handlers.block(infer.config_enumerate(model), expose=["loc", "scale"])))
guide() # Initialize but don't bother to train.
guide_trace = handlers.trace(guide).get_trace()
guide_data = {
name: site["value"]
for name, site in guide_trace.nodes.items() if site["type"] == "sample"
}
# MAP estimate discretes, conditioned on posterior sampled continous latents.
actual_trace = handlers.trace(
infer.infer_discrete(
# TODO support replayed sites in infer_discrete.
# handlers.replay(infer.config_enumerate(model), guide_trace)
handlers.condition(infer.config_enumerate(model), guide_data),
temperature=temperature
)
).get_trace()
# Check site names and shapes.
expected_trace = handlers.trace(model).get_trace()
assert set(actual_trace.nodes) == set(expected_trace.nodes)
assert "z1" not in actual_trace.nodes["scale"]["funsor"]["value"].inputs
@pyroapi.pyro_backend(_PYRO_BACKEND)
@pytest.mark.parametrize("model", [model_zzxx, model2])
@pytest.mark.parametrize("temperature", [0, 1])
def test_mcmc_model_side_enumeration(model, temperature):
# Perform fake inference.
# Draw from prior rather than trying to sample from mcmc posterior.
# This has the wrong distribution but the right type for tests.
mcmc_trace = handlers.trace(
handlers.block(
handlers.enum(infer.config_enumerate(model)),
expose=["loc", "scale"]
)
).get_trace()
mcmc_data = {
name: site["value"]
for name, site in mcmc_trace.nodes.items() if site["type"] == "sample"
}
# MAP estimate discretes, conditioned on posterior sampled continous latents.
actual_trace = handlers.trace(
infer.infer_discrete(
# TODO support replayed sites in infer_discrete.
# handlers.replay(infer.config_enumerate(model), mcmc_trace),
handlers.condition(infer.config_enumerate(model), mcmc_data),
temperature=temperature
),
).get_trace()
# Check site names and shapes.
expected_trace = handlers.trace(model).get_trace()
assert set(actual_trace.nodes) == set(expected_trace.nodes)
assert "z1" not in actual_trace.nodes["scale"]["funsor"]["value"].inputs
@pytest.mark.parametrize('temperature', [0, 1])
@pyroapi.pyro_backend(_PYRO_BACKEND)
def test_distribution_masked(temperature):
# +-------+
# z --|--> x |
# +-------+
num_particles = 10000
data = torch.tensor([1., 2., 3.])
mask = torch.tensor([True, False, False])
@infer.config_enumerate
def model(z=None):
p = pyro.param("p", torch.tensor([0.75, 0.25]))
z = pyro.sample("z", dist.Categorical(p), obs=z)
logger.info("z.shape = {}".format(z.shape))
with pyro.plate("data", 3), handlers.mask(mask=mask):
pyro.sample("x", dist.Normal(z.type_as(data), 1.), obs=data)
first_available_dim = -3
vectorized_model = model if temperature == 0 else \
pyro.plate("particles", size=num_particles, dim=-2)(model)
sampled_model = infer.infer_discrete(
vectorized_model,
first_available_dim,
temperature
)
sampled_trace = handlers.trace(sampled_model).get_trace()
conditioned_traces = {z: handlers.trace(model).get_trace(z=torch.tensor(z)) for z in [0., 1.]}
# Check posterior over z.
actual_z_mean = sampled_trace.nodes["z"]["value"].type_as(data).mean()
if temperature:
expected_z_mean = 1 / (1 + (conditioned_traces[0].log_prob_sum() -
conditioned_traces[1].log_prob_sum()).exp())
else:
expected_z_mean = (conditioned_traces[1].log_prob_sum() >
conditioned_traces[0].log_prob_sum()).float()
assert_equal(actual_z_mean, expected_z_mean, prec=1e-2)
|
#Cristina Borges 08-Mar-18
arquivo = open("Data\iris.csv","r")
for linha in arquivo:
coluna = linha.split(",")
print(coluna[0], " ", " ", coluna[1], " ", coluna[2]," ", coluna[3])
arquivo.close
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Plot influence of V_s30 predicted by CY14 model."""
import matplotlib.pyplot as plt
import pygmm
fig, ax = plt.subplots()
for v_s30 in [300, 600, 900]:
s = pygmm.model.Scenario(
mag=7, dist_jb=20, dist_x=20, dist_rup=25, dip=90, v_s30=v_s30)
m = pygmm.ChiouYoungs2014(s)
ax.plot(m.periods, m.spec_accels, label=str(v_s30))
ax.set_xlabel('Period (s)')
ax.set_xscale('log')
ax.set_ylabel('5%-Damped Spectral Accel. (g)')
ax.set_yscale('log')
ax.grid()
ax.legend(title='$V_{s30}$ (m/s)')
plt.show()
|
import os
ON_DEV = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
engineauth = {
# Login uri. The user will be returned here if an error occures.
'login_uri': '/', # default 'login/'
# The user is sent here after successfull authentication.
'success_uri': '/',
'secret_key': 'CHANGE_TO_A_SECRET_KEY',
# Comment out the following lines to use default
# User and UserProfile models.
'user_model': 'models.CustomUser',
}
engineauth['provider.google'] = {
'client_id': '673072897993.apps.googleusercontent.com',
'client_secret': '4bNhP7gETK0DLOvIhC0rO39b',
'api_key': '',
'scope': 'https://www.googleapis.com/auth/plus.me',
}
engineauth['provider.github'] = {
'client_id': '7c9a74ca5fd7bdb149c2',
'client_secret': 'a6dbb9f8db8f881290db3bdc32c8f2ac3d5b2535',
}
engineauth['provider.linkedin'] = {
'client_id': 'jfsgpazuxzb2',
'client_secret': 'LxGBTeCpQlb4Ad2R',
}
engineauth['provider.twitter'] = {
'client_id': 'l8nfb1saEW4mlTOARqunKg',
'client_secret': 'LCQweRuuGndhtNWihnwiDxs9npkNRII8GAgpGkYFi5c',
}
if ON_DEV:
# Facebook settings for Development
FACEBOOK_APP_KEY = '343417275669983'
FACEBOOK_APP_SECRET = 'fec59504f33b238a5d7b5f3b35bd958a'
else:
# Facebook settings for Production
FACEBOOK_APP_KEY = '109551039166233'
FACEBOOK_APP_SECRET = 'f929abbc0c5092164df693d047f880ec'
engineauth['provider.facebook'] = {
'client_id': FACEBOOK_APP_KEY,
'client_secret': FACEBOOK_APP_SECRET,
'scope': 'email',
}
def webapp_add_wsgi_middleware(app):
from engineauth import middleware
return middleware.AuthMiddleware(app)
|
import setuptools
from version import VERSION
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="tkhtmlview",
version=VERSION,
author="Palash Bauri",
author_email="hey@palashbauri.in",
description="View Simple HTML docs on tkinter",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bauripalash/tkhtmlview",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.4.*",
install_requires=['Pillow>=5.3.0','requests>=2.22.0'],
)
|
age = 12
if age < 4:
price = 0
elif age < 18:
price = 25
elif age < 65:
price = 40
elif age >= 65:
price = 20
print(f"Your admission cost is ${price}.")
|
import paho.mqtt.client as mqtt
import time
import json
def main():
def on_connect(client, userdata, flags, rc):
print("CONNECTED!")
client = mqtt.Client()
client.on_connect = on_connect
client.connect("46.101.168.60", 1883, 60)
client.loop_start()
f = open("testDrive.csv")
for line in f.readlines():
if line.startswith(","):
#skip first line
continue
values = line.split(",")
speed = '{"value":' + values[1] + ', "utc":1}'
break_ = '{"value":' + values[2] + ', "utc":1}'
accel = '{"value":' + values[3] + ', "utc":1}'
client.publish("/signal/ESP_v_Signal", speed, qos=0, retain=False)
client.publish("/signal/ESP_Bremsdruck", break_, qos=0, retain=False)
client.publish("/signal/ESP_Laengsbeschl", accel, qos=0, retain=False)
time.sleep(0.01)
client.loop_stop()
if __name__=="__main__":
main()
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer reify option
"""
def get_reify_options(defaults=None):
"""Reify-related options
"""
if defaults is None:
defaults = {}
options = {
# Resource ID
'--id': {
'dest': 'resource_id',
'default': defaults.get('resource_id', None),
'help': ("ID for the resource to be reified.")},
# Language for the output (currently only python available)
'--language': {
'dest': 'language',
'default': defaults.get('language', 'python'),
'choices': ["python", "whizzml", "nb"],
'help': ("Language for the resource to be reified in.")},
# Name of the file to output the code.
"--output": {
'action': 'store',
'dest': 'output',
'default': defaults.get('output', None),
'help': "Path to the file to output the reify code."},
# Add updatable field structure information to the source update call
'--add-fields': {
'action': 'store_true',
'dest': 'add_fields',
'default': defaults.get('add_fields', False),
'help': ("Add the updatable fields structure information"
" to the source update call.")},
# Don't add the updatable field structure information to the
# source update call. As opposed to --add-fields
'--no-add-fields': {
'action': 'store_false',
'dest': 'add_fields',
'default': defaults.get('add_fields', False),
'help': ("Don't add the updatable fields structure information"
" to the source update call.")},
# Forces the scripts to be upgraded
'--upgrade': {
'action': 'store_true',
'dest': 'upgrade',
'default': defaults.get('upgrade', False),
'help': ("Force the scripts used in the retraining to be"
" upgraded.")}
}
return options
|
"""reverse track coords
Revision ID: 6cca33764ed5
Revises: c206a3641567
Create Date: 2016-02-04 22:58:51.312968
"""
# revision identifiers, used by Alembic.
revision = "6cca33764ed5"
down_revision = "c206a3641567"
import re
from alembic import op
import sqlalchemy as sa
_linestring_rx = re.compile("^LINESTRING\((.+)\)$")
def parse_linestring(wkt):
"""
Parses LINESTRING WKT into a list of lon/lat (str) tuples.
:param wkt: The WKT for the LINESTRING
:type wkt: str`
:return: List of (lon,lat) tuples.
:rtype: list[(str,str)]
"""
return [
lonlat.split(" ") for lonlat in _linestring_rx.match(wkt).group(1).split(",")
]
def linestring_wkt(points):
wkt_dims = ["{} {}".format(lon, lat) for (lon, lat) in points]
return "LINESTRING({})".format(", ".join(wkt_dims))
def reverse_coordinates():
conn = op.get_bind()
results = conn.execute(
"select ride_id, AsWkt(gps_track) as gps_track from ride_tracks"
)
i = 0
for row in results:
i += 1
if i % 100 == 0:
print("Row: {}".format(i))
orig_points = parse_linestring(row["gps_track"])
new_points = [(lon, lat) for (lat, lon) in orig_points]
new_wkt = linestring_wkt(new_points)
conn.execute(
"update ride_tracks set gps_track=ST_GeomFromText(%s) where ride_id=%s",
[new_wkt, row["ride_id"]],
)
def upgrade():
reverse_coordinates()
def downgrade():
upgrade()
|
"""Function to find all matching nodes in a linked list."""
from .linked_list import LinkedList
from .node import Node
from .k_tree import KTree
def find_matches(ktree, target_value):
"""
Perform search for all nodes.
if node's value matches with target_value, the node is
added to ist.
"""
ls = list()
def _walk(curr=None):
"""Define a helper function which traverses ktree."""
if curr is None:
return
if curr.val == target_value:
ls.append(curr)
for child in curr.children:
_walk(child)
_walk(ktree.root)
return ls
|
## FuzzingTool
#
# Authors:
# Vitor Oriel C N Borges <https://github.com/VitorOriel>
# License: MIT (LICENSE.md)
# Copyright (c) 2021 Vitor Oriel
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
## https://github.com/NESCAU-UFLA/FuzzingTool
from ..BaseDictionary import BaseDictionary
from ....conn.Request import Request
from ....conn.Response import Response
from ....IO.OutputHandler import outputHandler as oh
from ....exceptions.RequestExceptions import RequestException
from ....exceptions.MainExceptions import MissingParameter
from bs4 import BeautifulSoup
import re
class CrtDictionary(BaseDictionary):
__name__ = "CrtDictionary"
__author__ = ("Vitor Oriel C N Borges")
__params__ = "DOMAIN"
__desc__ = "Build the wordlist based on the content of the site crt.sh"
__type__ = "SubdomainFuzzing"
def __init__(self):
super().__init__()
def setWordlist(self, sourceParam: str):
if not sourceParam:
raise MissingParameter("target host")
requester = Request(
url="https://crt.sh/",
method='GET',
data={
'PARAM': {
'q': sourceParam,
},
'BODY': {},
},
headers={
'Host': "crt.sh",
'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:87.0) Gecko/20100101 Firefox/87.0",
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
'Accept-Language': "en-US,en;q=0.5",
'Accept-Encoding': "gzip, deflate",
'Connection': "keep-alive",
'Referer': "https://crt.sh/",
'Upgrade-Insecure-Requests': "1",
'TE': "Trailers",
},
)
try:
response = requester.request("")
except RequestException as e:
raise Exception(str(e))
if 'None found' in response.text:
raise Exception(f"No certified domains was found for '{sourceParam}'")
contentList = [element.string for element in BeautifulSoup(response.text, "lxml")('td')]
regex = r"([a-zA-Z0-9]+\.)*[a-zA-Z0-9]+"
for splited in sourceParam.split('.'):
regex += r"\."+splited
regexer = re.compile(regex)
domainList = sorted(set([element for element in contentList if regexer.match(str(element))]))
self._wordlist = [domain.split(f'.{sourceParam}')[0] for domain in domainList]
|
# -*- coding: utf-8 -*-
"""Models for the SLiM Flask application."""
from __future__ import absolute_import
import datetime
from flask_sqlalchemy import SQLAlchemy
from flask_security import UserMixin, RoleMixin
db = SQLAlchemy()
# Security
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('users.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('roles.id')))
class Role(db.Model, RoleMixin):
"""User role model."""
__tablename__ = 'roles'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __str__(self):
"""Return str(self)."""
return self.name
def __repr__(self):
"""Return repr(self)."""
return '<Role: id=%d, name=%r, description=%r>' % (
self.id, self.name, self.description)
class User(db.Model, UserMixin):
"""User model."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
# first_name = db.Column(db.String(255))
# last_name = db.Column(db.String(255))
email = db.Column(db.String(64), unique=True)
password = db.Column(db.String(255))
# password = db.Column(db.PasswordType(255))
active = db.Column(db.Boolean())
# confirmed_at = db.Column(db.DateTime())
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(45))
current_login_ip = db.Column(db.String(45))
login_count = db.Column(db.Integer)
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users')) # , lazy='dynamic'))
def __str__(self):
"""Return str(self)."""
return self.email
def __repr__(self):
"""Return repr(self)."""
roles = ', '.join([role.name for role in self.roles])
return '<User: id=%d, email=%r, roles=%r, active=%s>' % (
self.id, self.email, roles, self.active)
# SLiM
class Product(db.Model):
"""Product model."""
__tablename__ = 'products'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
description = db.Column(db.String(255), default='')
url = db.Column(db.String(255), default='')
def __str__(self):
"""Return str(self)."""
return self.name
def __repr__(self):
"""Return repr(self)."""
return '<Product: id=%d, name=%r' % (self.id, self.name)
class Purchase(db.Model):
"""Product purchase model."""
__tablename__ = 'purchases'
id = db.Column(db.Integer, primary_key=True) # @TODO: check
user_id = db.Column(db.Integer(), db.ForeignKey('users.id'))
product_id = db.Column(db.Integer(), db.ForeignKey('products.id'))
quantity = db.Column(db.Integer(), default=1)
# purchase_date = db.Column(db.DateTime())
# expiration_date = db.Column(db.DateTime())
# active = db.Column(db.Boolean())
user = db.relationship(
'User', backref=db.backref('purchases', lazy='dynamic'))
product = db.relationship(
'Product', backref=db.backref('purchases', lazy='dynamic'))
def __str__(self):
"""Return str(self)."""
return 'Purchase(id=%d, user=%r, product=%r, quantity=%d)' % (
self.id, self.user.email, self.product.name, self.quantity)
def __repr__(self):
"""Return repr(self)."""
return '<Purchase: id=%d, user_id=%r, product_id=%r, quantity=%d>' % (
self.id, self.user_id, self.product_id, self.quantity)
@classmethod
def count(cls, user_id=None, product_id=None):
"""Return the count of purchases according to specified parameters."""
query = cls.query
if user_id:
query = query.filter_by(user_id=user_id)
if product_id:
query = query.filter_by(product_id=product_id)
return sum(item.quantity for item in query.all())
class License(db.Model):
"""Product license model."""
__tablename__ = 'licenses'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
product_id = db.Column(db.Integer, db.ForeignKey('products.id'))
description = db.Column(db.String(255))
request = db.Column(db.LargeBinary)
request_date = db.Column(db.DateTime)
license = db.Column(db.LargeBinary)
user = db.relationship(
'User', backref=db.backref('licenses', lazy='dynamic'))
product = db.relationship(
'Product', backref=db.backref('licenses', lazy='dynamic'))
def __init__(self, **kwargs):
"""License model initializer."""
kwargs.setdefault('request_date', datetime.datetime.now())
super(License, self).__init__(**kwargs)
def __str__(self):
"""Return str(self)."""
return ('License(id=%d, user=%r, product=%r, request_date=%s)' % (
self.id, self.user.email, self.product.name,
self.request_date.isoformat()))
def __repr__(self):
"""Return repr(self)."""
return ('<License: id=%d, user_id=%r, product_id=%r, '
'request_date=%s>' % (self.id, self.user_id, self.product_id,
self.request_date.isoformat()))
|
from pytube import YouTube
# digite o link do video e o local que deseja salvar o video
link = input("digite o link do video que deseja baixar: ")
path = input("digite o diretório que deseja salvar o video: ")
yt = YouTube(link)
# Mostrar os detalhes do video
print("Titulo: ", yt.title)
print("Número de views: ", yt.views)
print(f"Tamanho do vídeo: {yt.length} segundos")
print("Avaliação do vídeo: ", yt.rating)
# Usa a maior resolução
ys = yt.streams.get_highest_resolution()
# Começa o Dowload do vídeo
print("Baixando...")
ys.download(path)
print("Download completo!")
|
# fa20-516-238 E.Cloudmesh.Common.5
from cloudmesh.common.util import banner
from cloudmesh.common.Shell import Shell
from cloudmesh.common.StopWatch import StopWatch
class CloudmeshCommon:
def demo_stopwatch(self):
count = 1
StopWatch.start("demo")
while (count < 5):
ping = Shell.ping(host='localhost',count=count)
print(ping)
count += count
StopWatch.stop("demo")
print("\n Total time elapsed:",StopWatch.get("demo", digits = 5))
if __name__ == '__main__':
demo = CloudmeshCommon()
banner("Stopwatch Demo", c="x", prefix="#", color="BLUE")
demo.demo_stopwatch()
|
"""Module that interacts with the Riot API
and transforms the received data in
a user readable way.
"""
import shelve
import matplotlib.pyplot as plt
import logging
from discord.ext import commands
import discord
import pandas as pd
from core import (
timers,
exceptions,
config
)
from core.state import GeneralState
from . import (
image_transformation,
riot_utility as utility
)
logger = logging.getLogger(__name__)
_timers = []
# FIXME: this is trash
# === BAN CALCULATION === #
def get_best_ban(summoner):
ban_list = []
most_played_champs = list(summoner.get_most_played_champs(10))
for champ in most_played_champs:
if summoner.has_played_champ_by_name_in_last_n_days(champ, 30):
ban_list.append(champ)
if len(ban_list) == 5:
return ban_list
return ban_list
def get_best_bans_for_team(team) -> list:
ban_list = []
ranks = []
best_bans_for_player = []
for player in team:
ranks.append(player.rank_value['RANKED_SOLO_5x5'])
best_bans_for_player.append(get_best_ban(player))
average_rank = utility.get_average_rank(ranks)
for i in range(0, len(team)):
if ranks[i] <= average_rank - 500:
continue
elif ranks[i] > average_rank - 500 and ranks[i] <= average_rank + 800:
ban_list.append(best_bans_for_player[i][0])
elif team[i].is_smurf() or ranks[i] > average_rank + 800:
ban_list.append(best_bans_for_player[i][0])
ban_list.append(best_bans_for_player[i][1])
ban_list.append(best_bans_for_player[i][2])
while len(ban_list) > 5:
del ban_list[-1]
return ban_list
# === INTERFACE === #
def get_player_stats(discord_user_name, summoner_name, guild_config: config.GuildConfig, general_config: config.GeneralConfig, guild_id: int, queue_type='RANKED_SOLO_5x5') -> str:
summoner = get_or_create_summoner(discord_user_name, summoner_name, guild_config, general_config, guild_id=guild_id)
return f'Rank: {summoner.get_rank_string(queue_type)}, Winrate {summoner.get_winrate(queue_type)}%.'
def get_smurf(discord_user_name, summoner_name, guild_config: config.GuildConfig, general_config: config.GeneralConfig, guild_id: int) -> str:
summoner = get_or_create_summoner(discord_user_name, summoner_name, guild_config, general_config, guild_id)
is_smurf_word = 'kein'
if summoner.is_smurf():
is_smurf_word = 'ein'
return f'Der Spieler **{utility.format_summoner_name(summoner.name)}** ist sehr wahrscheinlich **{is_smurf_word}** Smurf.'
def calculate_bans_for_team(bot_config: config.BotConfig, *names) -> str:
utility.update_champion_json()
if len(names[0]) != 5:
logger.exception('Check Failure')
raise commands.CheckFailure()
team = list(utility.create_summoners(list(names[0]), bot_config.general_config))
output = get_best_bans_for_team(team)
image_transformation.create_new_image(output, bot_config)
op_url = f'https://euw.op.gg/multi/query={team[0].name}%2C{team[1].name}%2C{team[2].name}%2C{team[3].name}%2C{team[4].name}'
return f'Team OP.GG: {op_url}\nBest Bans for Team:\n{utility.pretty_print_list(output)}'
def link_account(discord_user_name, summoner_name, guild_config: config.GuildConfig, general_config: config.GeneralConfig, guild_id: int):
summoner = utility.create_summoner(summoner_name, general_config, guild_config=guild_config)
summoner.discord_user_name = discord_user_name
folder_name = guild_config.folders_and_files.database_directory_summoners.format(guild_id=guild_id)
with shelve.open(f'{folder_name}/{guild_config.folders_and_files.database_name_summoners}', 'rc') as database:
for key in database.keys():
if key == str(discord_user_name):
logger.exception('DataBaseException')
raise exceptions.DataBaseException('Your discord account already has a lol account linked to it')
if database[key] is not None:
if database[key].name == summoner.name:
logger.exception('DataBaseException')
raise exceptions.DataBaseException('This lol account already has a discord account that is linked to it')
database[str(discord_user_name)] = summoner
def update_linked_account_data_by_discord_user_name(discord_user_name, guild_config: config.GuildConfig, general_config: config.GeneralConfig, guild_id: int):
summoner = utility.create_summoner(utility.read_account(discord_user_name, general_config, guild_id).name, general_config, guild_config=guild_config)
summoner.discord_user_name = discord_user_name
folder_name = general_config.database_directory_summoners.format(guild_id=guild_id)
with shelve.open(f'{folder_name}/{general_config.database_name_summoners}', 'rc') as database:
database[str(discord_user_name)] = summoner
return summoner
def update_linked_summoners_data(summoners, guild_config: config.GuildConfig, general_config: config.GeneralConfig, guild_id: int):
for summoner in summoners:
yield update_linked_account_data_by_discord_user_name(summoner.discord_user_name, guild_config, general_config, guild_id)
def get_or_create_summoner(discord_user_name, summoner_name, guild_config: config.GuildConfig, general_config: config.GeneralConfig, guild_id: int):
if summoner_name is None:
summoner = utility.read_account(discord_user_name, general_config, guild_id)
if utility.is_in_need_of_update(summoner):
update_linked_account_data_by_discord_user_name(discord_user_name, guild_config, general_config, guild_id)
return summoner
else:
return utility.create_summoner(summoner_name, general_config, guild_config=guild_config)
def unlink_account(discord_user_name, guild_config: config.GuildConfig, guild_id: int):
folder_name = guild_config.folders_and_files.database_directory_summoners.format(guild_id=guild_id)
with shelve.open(f'{folder_name}/{guild_config.folders_and_files.database_name_summoners}', 'rc') as database:
for key in database.keys():
if key == str(discord_user_name):
del database[key]
def create_leaderboard_embed(guild_config: config.GuildConfig, general_config: config.GeneralConfig, guild_id):
summoners = list(utility.read_all_accounts(general_config, guild_config.unsorted_config.guild_id))
old_summoners = summoners.copy()
summoners = list(update_linked_summoners_data(summoners, guild_config, general_config, guild_id))
summoners.sort(key=lambda x: x.rank_value['RANKED_SOLO_5x5'], reverse=True)
for summoner in summoners:
for old_summoner in old_summoners:
if old_summoner.name == summoner.name:
summoner.rank_dt = summoner.rank_value['RANKED_SOLO_5x5'] - old_summoner.rank_value['RANKED_SOLO_5x5']
if summoner.rank_dt > 0:
summoner.rank_dt = f'+{summoner.rank_dt}'
elif summoner.rank_dt < 0:
summoner.rank_dt = f'-{summoner.rank_dt}'
elif summoner.rank_dt == 0:
summoner.rank_dt = f'\u00B1{summoner.rank_dt}'
data = [[summoner.discord_user_name, summoner.name, summoner.get_rank_string(), f'{summoner.get_winrate()}%', summoner.rank_dt, summoner.get_promo_string() if summoner.get_promo_string() is not None else '-' ] for summoner in summoners if summoner.has_played_rankeds() ]
fig, ax = plt.subplots()
# hide axes
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
df = pd.DataFrame(data, columns=['Discord User', 'Summoner', 'Rank', 'Winrate', 'Progress in LP', 'Promo Progress'])
inner_cell_colours = []
col_colors = []
for i in range(0, len(df.columns)):
inner_cell_colours.append('#2c2f33')
col_colors.append('#23272a')
outer_cell_colours = []
for i in range(0, len(data)):
outer_cell_colours.append(inner_cell_colours)
table = ax.table(cellText=df.values, colLabels=df.columns, loc='center', cellLoc='center', colColours=col_colors, cellColours=outer_cell_colours)
table_props = table.properties()
table_cells = table_props['child_artists']
for cell in table_cells:
cell.get_text().set_fontsize(30)
cell.get_text().set_color('white')
fig.tight_layout()
plt.savefig('./temp/leaderboard.png')
op_url = 'https://euw.op.gg/multi/query='
for summoner in summoners:
op_url = op_url + f'{summoner.name}%2C'
_embed = discord.Embed(
title='Kraut9 Leaderboard',
colour=discord.Color.from_rgb(62, 221, 22),
url=op_url[:-3])
return _embed
def update_state_clash_dates(state: GeneralState, general_config: config.GeneralConfig):
clash_dates = utility.get_upcoming_clash_dates(general_config, state)
for clash_date in clash_dates:
if clash_date not in state.clash_dates:
state.clash_dates.append(clash_date)
return
# === INTERFACE END === #
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import annotations
from copy import deepcopy
from itertools import chain
from re import search
from typing import List, Optional
from numpy import ndarray, linspace, inf
from . import Group, Report
class Collection(object):
"""Collection storage class.
This class stores a collection of :class:`~araucaria.main.group.Group` objects.
Parameters
----------
name : :class:`str`
Name for the collection. The default is None.
Attributes
----------
tags : :class:`dict`
Dictionary with available groups in the collection based on tag keys.
Notes
-----
Each group will be stored as an attribute of the collection.
The ``tags`` attribute classifies group names based on a
``tag`` key, which is useful for joint manipulation of groups.
The following methods are currently implemented:
.. list-table::
:widths: auto
:header-rows: 1
* - Method
- Description
* - :func:`add_group`
- Adds a group to the collection.
* - :func:`apply`
- Applies a function to groups in the collection.
* - :func:`copy`
- Returns a copy of the collection.
* - :func:`del_group`
- Deletes a group from the collection.
* - :func:`get_group`
- Returns a group in the collection.
* - :func:`get_mcer`
- Returns the minimum common energy range for the collection.
* - :func:`get_names`
- Return group names in the collection.
* - :func:`get_tag`
- Returns tag of a group in the collection.
* - :func:`rename_group`
- Renames a group in the collection.
* - :func:`retag`
- Modifies tag of a group in the collection.
* - :func:`summary`
- Returns a summary report of the collection.
Warning
-------
Each group can only have a single ``tag`` key.
Example
-------
>>> from araucaria import Collection
>>> collection = Collection()
>>> type(collection)
<class 'araucaria.main.collection.Collection'>
"""
def __init__(self, name: str=None):
if name is None:
name = hex(id(self))
self.name = name
self.tags: dict = {}
def __repr__(self):
if self.name is not None:
return '<Collection %s>' % self.name
else:
return '<Collection>'
def add_group(self, group: Group, tag: str='scan') -> None:
"""Adds a group dataset to the collection.
Parameters
----------
group
The data group to add to the collection.
tag
Key for the ``tags`` attribute of the collection.
The default is 'scan'.
Returns
-------
:
Raises
------
TypeError
If ``group`` is not a valid Group instance.
ValueError
If ``group.name`` is already in the collection.
Example
-------
>>> from araucaria import Collection, Group
>>> from araucaria.utils import check_objattrs
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1'})
>>> g2 = Group(**{'name': 'group2'})
>>> for group in (g1, g2):
... collection.add_group(group)
>>> check_objattrs(collection, Collection, attrlist=['group1','group2'])
[True, True]
>>> # using tags
>>> g3 = Group(**{'name': 'group3'})
>>> collection.add_group(g3, tag='ref')
>>> for key, value in collection.tags.items():
... print(key, value, type(value))
scan ['group1', 'group2'] <class 'list'>
ref ['group3'] <class 'list'>
"""
if not isinstance(group, Group):
raise TypeError('group is not a valid Group instance.')
name = group.name
if name in self.get_names():
raise ValueError('group name already in the Collection.')
else:
setattr(self, name, group)
# updating tags
if tag in self.tags:
self.tags[tag].append(name)
self.tags[tag].sort()
else:
self.tags[tag] = [name]
def apply(self, func, taglist: List[str]=['all'], **kwargs: dict) -> None:
"""
Applies a function to groups in a collection.
Parameters
----------
func
Function to apply to the collection.
Must accept ``update=True`` as an argument.
taglist
List with keys to filter groups in the collection based
on the ``tags`` attribute. The default is ['all'].
**kwargs
Additional keyword arguments to pass to ``func``.
Returns
-------
:
Raises
------
ValueError
If any item in ``taglist`` is not a key of the ``tags`` attribute.
Example
-------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_collection_hdf5
>>> from araucaria.xas import pre_edge
>>> fpath = get_testpath('Fe_database.h5')
>>> collection = read_collection_hdf5(fpath)
>>> collection.apply(pre_edge)
>>> report = collection.summary(optional=['e0'])
>>> report.show()
===============================================
id dataset tag mode n e0
===============================================
1 FeIISO4_20K scan mu 5 7124.7
2 Fe_Foil scan mu_ref 5 7112
3 Ferrihydrite_20K scan mu 5 7127.4
4 Goethite_20K scan mu 5 7127.3
===============================================
"""
# retrieving list with group names
names = self.get_names(taglist=taglist)
for name in names:
group = self.get_group(name=name)
func(group, update=True, **kwargs)
return
def copy(self) -> Collection:
"""Returns a deep copy of the collection.
Parameters
----------
None
Returns
-------
:
Copy of the collection.
Example
-------
>>> from numpy import allclose
>>> from araucaria import Group, Collection
>>> collection1 = Collection()
>>> content = {'name': 'group', 'energy': [1,2,3,4,5,6]}
>>> group = Group(**content)
>>> collection1.add_group(group)
>>> collection2 = collection1.copy()
>>> energy1 = collection1.get_group('group').energy
>>> energy2 = collection2.get_group('group').energy
>>> allclose(energy1, energy2)
True
"""
return deepcopy(self)
def del_group(self, name) -> None:
"""Removes a group dataset from the collection.
Parameters
----------
name
Name of group to remove.
Returns
-------
:
Raises
------
TypeError
If ``name`` is not in a group in the collection.
Example
-------
>>> from araucaria import Collection, Group
>>> from araucaria.utils import check_objattrs
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1'})
>>> g2 = Group(**{'name': 'group2'})
>>> for group in (g1, g2):
... collection.add_group(group)
>>> check_objattrs(collection, Collection, attrlist=['group1','group2'])
[True, True]
>>> collection.del_group('group2')
>>> check_objattrs(collection, Collection, attrlist=['group1','group2'])
[True, False]
>>> # verifying that the deleted group has no tag
>>> for key, value in collection.tags.items():
... print(key, value)
scan ['group1']
"""
if not hasattr(self, name):
raise AttributeError('collection has no %s group.' % name)
# retrieving original tag key
for key, val in self.tags.items():
if name in val:
initag = key
break
# removing groupname from original tag
self.tags[initag].remove(name)
# removing entire key if group list is empty
if not self.tags[initag]:
del self.tags[initag]
# removing group
delattr(self, name)
def get_group(self, name) -> Group:
"""Returns a group dataset from the collection.
Parameters
----------
name
Name of group to retrieve.
Returns
-------
:
Requested group.
Raises
------
TypeError
If ``name`` is not in a group in the collection.
Important
---------
Changes made to the group will be propagated to the collection.
If you need a copy of the group use the :func:`copy` method.
Example
-------
>>> from araucaria import Collection, Group
>>> from araucaria.utils import check_objattrs
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1'})
>>> collection.add_group(g1)
>>> gcopy = collection.get_group('group1')
>>> check_objattrs(gcopy, Group)
True
>>> print(gcopy.name)
group1
"""
if not hasattr(self, name):
raise AttributeError('collection has no %s group.' % name)
return getattr(self, name)
def get_mcer(self, num: int=None, taglist: List[str]=['all']) -> ndarray:
"""Returns the minimum common energy range for the collection.
Parameters
----------
num
Number of equally-spaced points for the energy array.
taglist
List with keys to filter groups in the collection based
on the ``tags`` attribute. The default is ['all'].
Returns
-------
:
Array containing the minimum common energy range
Raises
------
AttributeError
If ``energy`` is not an attribute of the requested groups.
ValueError
If any item in ``taglist`` is not a key of the ``tags`` attribute.
Notes
-----
By default the returned array contains the lowest number of points
available in the minimum common energy range of the groups.
Providing a value for ``num`` will return the desired number
of equally-spaced points for the minimum common energy range.
Examples
--------
>>> from numpy import linspace
>>> from araucaria import Collection, Group
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1', 'energy': linspace(1000, 2000, 6)})
>>> g2 = Group(**{'name': 'group2', 'energy': linspace(1500, 2500, 11)})
>>> tags = ('scan', 'ref')
>>> for i, group in enumerate([g1, g2]):
... collection.add_group(group, tag=tags[i])
>>> # mcer for tag 'scan'
>>> print(collection.get_mcer(taglist=['scan']))
[1000. 1200. 1400. 1600. 1800. 2000.]
>>> # mcer for tag 'ref'
>>> print(collection.get_mcer(taglist=['ref']))
[1500. 1600. 1700. 1800. 1900. 2000. 2100. 2200. 2300. 2400. 2500.]
>>> # mcer for 'all' groups
>>> print(collection.get_mcer())
[1600. 1800. 2000.]
>>> # mcer for 'all' groups explicitly
>>> print(collection.get_mcer(taglist=['scan', 'ref']))
[1600. 1800. 2000.]
>>> # mcer with given number of points
>>> print(collection.get_mcer(num=11))
[1500. 1550. 1600. 1650. 1700. 1750. 1800. 1850. 1900. 1950. 2000.]
"""
# retrieving list with group names
names = self.get_names(taglist=taglist)
for item in names:
if not hasattr(getattr(self, item), 'energy'):
raise AttributeError('%s has no energy attribute.' % item)
# finding the maximum of minimum energy values
emc_min = max([getattr(self, item).energy[0] for item in names])
# finding the minimum of maximum energy values
emc_max = min([getattr(self, item).energy[-1] for item in names])
if num is not None:
# returning a formatted array
earray = linspace(emc_min, emc_max, num)
else:
# returning an array with the least ammount of points
for i, item in enumerate(names):
energy = getattr(self, item).energy
energy = energy[(energy >= emc_min ) & (energy <= emc_max)]
if i == 0:
earray = energy
else:
if len(energy) < len(earray):
earray = energy
return earray
def get_names(self, taglist: List[str]=['all']) -> List[str]:
"""Returns group names in the collection.
Parameters
----------
taglist
List with keys to filter groups in the collection based
on the ``tags`` attribute. The default is ['all'].
Returns
-------
:
List with group names in the collection.
Raises
------
ValueError
If any item in ``taglist`` is not a key of the ``tags`` attribute.
Example
-------
>>> from araucaria import Collection, Group
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1'})
>>> g2 = Group(**{'name': 'group2'})
>>> g3 = Group(**{'name': 'group3'})
>>> g4 = Group(**{'name': 'group4'})
>>> tags = ('scan', 'ref', 'ref', 'scan')
>>> for i, group in enumerate([g1, g2, g3, g4]):
... collection.add_group(group, tag=tags[i])
>>> collection.get_names()
['group1', 'group2', 'group3', 'group4']
>>> collection.get_names(taglist=['scan'])
['group1', 'group4']
>>> collection.get_names(taglist=['ref'])
['group2', 'group3']
"""
names = []
iterchain = False
for tag in taglist:
if tag == 'all':
# retrieving all groups
names = self.tags.values()
names = [item for sublist in names for item in sublist]
break
elif tag not in self.tags:
raise ValueError('%s is not a valid key for the collection.')
else:
# retrieving selected tag
names = names + self.tags[tag]
names.sort()
return names
def get_tag(self, name) -> str:
"""Returns tag of a group in the collection.
Parameters
----------
name
Name of group to retrieve tag.
Returns
-------
:
Tag of the group.
Raises
------
AttributeError
If ``name`` is not in a group in the collection.
Example
-------
>>> from araucaria import Collection, Group
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1'})
>>> g2 = Group(**{'name': 'group2'})
>>> tags = ('scan', 'ref')
>>> for i, group in enumerate([g1, g2]):
... collection.add_group(group, tag=tags[i])
>>> print(collection.get_tag('group1'))
scan
>>> print(collection.get_tag('group2'))
ref
"""
if not hasattr(self, name):
raise AttributeError('collection has no %s group.' % name)
# retrieving original tag key
for key, val in self.tags.items():
if name in val:
tag = key
break
return tag
def rename_group(self, name: str, newname: str) -> None:
"""Renames a group in the collection.
Parameters
-----------
name
Name of group to modify.
newname
New name for the group.
Returns
-------
:
Raises
------
AttributeError
If ``name`` is not a group in the collection.
TypeError
If ``newname`` is not a string.
Example
-------
>>> from araucaria import Collection, Group
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1'})
>>> g2 = Group(**{'name': 'group2'})
>>> for i, group in enumerate([g1, g2]):
... collection.add_group(group)
>>> collection.rename_group('group1', 'group3')
>>> print(collection.get_names())
['group2', 'group3']
>>> print(collection.group3.name)
group3
"""
if not hasattr(self, name):
raise AttributeError('collection has no %s group.' % name)
elif not isinstance(newname, str):
raise TypeError('newname is not a valid string.')
else:
self.__dict__[newname] = self.__dict__.pop(name)
# retrieving original tag key
for key, val in self.tags.items():
if name in val:
tag = key
break
# replacing record name with new name
self.tags[tag].remove(name)
self.tags[tag].append(newname)
self.tags[tag].sort()
# modifying name of group
self.__dict__[newname].name = newname
def retag(self, name: str, tag: str) -> None:
"""Modifies tag of a group in the collection.
Parameters
----------
name
Name of group to modify.
tag
New tag for the group.
Returns
-------
:
Raises
------
AttributeError
If ``name`` is not a group in the collection.
Example
-------
>>> from araucaria import Collection, Group
>>> collection = Collection()
>>> g1 = Group(**{'name': 'group1'})
>>> g2 = Group(**{'name': 'group2'})
>>> tags = ('scan', 'ref')
>>> for i, group in enumerate([g1, g2]):
... collection.add_group(group, tag=tags[i])
>>> collection.retag('group1', 'ref')
>>> for key, value in collection.tags.items():
... print(key, value)
ref ['group1', 'group2']
"""
# retrieving original tag key
initag = self.get_tag(name)
if initag == tag:
# nothing needs to be changed
return
else:
# removing groupname from original tag
self.tags[initag].remove(name)
# removing entire key if group list is empty
if not self.tags[initag]:
del self.tags[initag]
# reassigning groupname to new tag
if tag in self.tags:
self.tags[tag].append(name)
self.tags[tag].sort()
else:
self.tags[tag] = name
def summary(self, taglist: List[str]=['all'], regex: str=None,
optional: Optional[list]=None) -> Report:
"""Returns a summary report of groups in a collection.
Parameters
----------
taglist
List with keys to filter groups in the collection based
on the ``tags`` attribute. The default is ['all'].
regex
Search string to filter results by group name. See Notes for details.
The default is None.
optional
List with optional parameters. See Notes for details.
The default is None.
Returns
-------
:
Report for datasets in the HDF5 file.
Raises
------
ValueError
If any item in ``taglist`` is not a key of the ``tags`` attribute.
Notes
-----
Summary data includes the following:
1. Group index.
2. Group name.
3. Group tag.
4. Measurement mode.
5. Numbers of scans.
6. Merged scans, if ``optional=['merged_scans']``.
7. Optional parameters if they exist as attributes in the group.
A ``regex`` value can be used to filter group names based
on a regular expression (reges). For valid regex syntax, please
check the documentation of the module :mod:`re`.
The number of scans and names of merged files are retrieved
from the ``merged_scans`` attribute of ``collection``.
Optional parameters will be retrieved from the groups as
attributes. Currently only :class:`str`, :class:`float` or
:class:`int` will be retrieved. Otherswise an empty character
will be printed in the report.
See also
--------
:class:`~araucaria.main.report.Report`
Examples
--------
>>> from araucaria.testdata import get_testpath
>>> from araucaria.io import read_collection_hdf5
>>> fpath = get_testpath('Fe_database.h5')
>>> collection = read_collection_hdf5(fpath)
>>> # printing default summary
>>> report = collection.summary()
>>> report.show()
=======================================
id dataset tag mode n
=======================================
1 FeIISO4_20K scan mu 5
2 Fe_Foil scan mu_ref 5
3 Ferrihydrite_20K scan mu 5
4 Goethite_20K scan mu 5
=======================================
>>> # printing summary of dnd file with merged scans
>>> report = collection.summary(regex='Goe', optional=['merged_scans'])
>>> report.show()
=============================================================
id dataset tag mode n merged_scans
=============================================================
1 Goethite_20K scan mu 5 20K_GOE_Fe_K_240.00000.xdi
20K_GOE_Fe_K_240.00001.xdi
20K_GOE_Fe_K_240.00002.xdi
20K_GOE_Fe_K_240.00003.xdi
20K_GOE_Fe_K_240.00004.xdi
=============================================================
>>> # printing custom summary
>>> from araucaria.testdata import get_testpath
>>> from araucaria import Collection
>>> from araucaria.io import read_xmu
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> # adding additional attributes
>>> group_mu.symbol = 'Zn'
>>> group_mu.temp = 25.0
>>> # saving in a collection
>>> collection = Collection()
>>> collection.add_group(group_mu)
>>> report = collection.summary(optional=['symbol','temp'])
>>> report.show()
===================================================
id dataset tag mode n symbol temp
===================================================
1 xmu_testfile.xmu scan mu 1 Zn 25
===================================================
"""
# list with parameter names
field_names = ['id', 'dataset', 'tag', 'mode', 'n']
# verifying optional values
if optional is not None:
for opt_val in optional:
field_names.append(opt_val)
# instanciating report class
report = Report()
report.set_columns(field_names)
# number of records
names = self.get_names(taglist=taglist)
if regex is None:
pass
else:
index = []
for i, name in enumerate(names):
if search(regex, name) is None:
pass
else:
index.append(i)
names = [names[i] for i in index]
ncols = len(names)
for i, name in enumerate(names):
data = self.get_group(name)
scanval = data.get_mode()
tag = self.get_tag(name)
extra_content = False # aux variable for 'merged_scans'
try:
# number of merged_scans
nscans = len(data.merged_scans)
except:
nscans = 1
field_vals = [i+1, name, tag, scanval, nscans]
if optional is not None:
for j, opt_val in enumerate(optional):
if opt_val == 'merged_scans':
if i == 0:
# storing the col merge_index
merge_index = len(field_vals)
try:
list_scans = data.merged_scans
field_vals.append(data.merged_scans[0])
extra_content = True
except:
field_vals.append('None')
else:
# custom optional field
try:
val = getattr(data, opt_val)
if isinstance(val, (int, float, str)):
# if val is int or float print it
field_vals.append(val)
else:
field_vals.append('')
except:
field_vals.append('')
report.add_row(field_vals)
if extra_content:
for item in list_scans[1:]:
field_vals = []
for j,index in enumerate(field_names):
if j != merge_index:
field_vals.append('')
else:
field_vals.append(item)
report.add_row(field_vals)
if i < (ncols - 1):
report.add_midrule()
return report
if __name__ == '__main__':
import doctest
doctest.testmod()
|
#
# @lc app=leetcode id=105 lang=python3
#
# [105] Construct Binary Tree from Preorder and Inorder Traversal
#
# https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/description/
#
# algorithms
# Medium (51.38%)
# Likes: 4990
# Dislikes: 128
# Total Accepted: 480K
# Total Submissions: 916.3K
# Testcase Example: '[3,9,20,15,7]\n[9,3,15,20,7]'
#
# Given two integer arrays preorder and inorder where preorder is the preorder
# traversal of a binary tree and inorder is the inorder traversal of the same
# tree, construct and return the binary tree.
#
#
# Example 1:
#
#
# Input: preorder = [3,9,20,15,7], inorder = [9,3,15,20,7]
# Output: [3,9,20,null,null,15,7]
#
#
# Example 2:
#
#
# Input: preorder = [-1], inorder = [-1]
# Output: [-1]
#
#
#
# Constraints:
#
#
# 1 <= preorder.length <= 3000
# inorder.length == preorder.length
# -3000 <= preorder[i], inorder[i] <= 3000
# preorder and inorder consist of unique values.
# Each value of inorder also appears in preorder.
# preorder is guaranteed to be the preorder traversal of the tree.
# inorder is guaranteed to be the inorder traversal of the tree.
#
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
return self.constructTree(0, 0, len(inorder) - 1, preorder, inorder)
def constructTree(self, pre_start, in_start, in_end, preorder, inorder):
# exit
if pre_start > len(preorder) or in_start > in_end:
return None
root = TreeNode(preorder[pre_start])
in_index = inorder.index(preorder[pre_start])
root.left = self.constructTree(pre_start + 1, in_start, in_index - 1, preorder, inorder)
root.right = self.constructTree(pre_start + (in_index - in_start) + 1, in_index + 1, in_end, preorder, inorder)
return root
# @lc code=end
|
import logging
from pygdbmi.printcolor import fmt_cyan
class StringStream:
"""A simple class to hold text so that when passed
between functions, the object is passed by reference
and memory does not need to be repeatedly allocated for the string.
This class was written here to avoid adding a dependency
to the project.
"""
def __init__(self, raw_text, debug=False):
self.raw_text = raw_text
self.index = 0
self.len = len(raw_text)
if debug:
level = logging.DEBUG
else:
level = logging.ERROR
logging.basicConfig(format="%(funcName)20s %(message)s", level=level)
def read(self, count):
"""Read count characters starting at self.index,
and return those characters as a string
"""
new_index = self.index + count
if new_index > self.len:
buf = self.raw_text[self.index :] # return to the end, don't fail
else:
buf = self.raw_text[self.index : new_index]
self.index = new_index
return buf
def seek(self, offset):
"""Advance the index of this StringStream by offset characters"""
self.index = self.index + offset
def advance_past_chars(self, chars):
"""Advance the index past specific chars
Args chars (list): list of characters to advance past
Return substring that was advanced past
"""
start_index = self.index
while True:
current_char = self.raw_text[self.index]
self.index += 1
if current_char in chars:
break
elif self.index == self.len:
break
return self.raw_text[start_index : self.index - 1]
def advance_past_string_with_gdb_escapes(self, chars_to_remove_gdb_escape=None):
"""characters that gdb escapes that should not be
escaped by this parser
"""
if chars_to_remove_gdb_escape is None:
chars_to_remove_gdb_escape = ['"']
buf = ""
while True:
c = self.raw_text[self.index]
self.index += 1
logging.debug("%s", fmt_cyan(c))
if c == "\\":
# We are on a backslash and there is another character after the backslash
# to parse. Handle this case specially since gdb escaped it for us
# Get the next char that is being escaped
c2 = self.raw_text[self.index]
self.index += 1
# only store the escaped character in the buffer; don't store the backslash
# (don't leave it escaped)
buf += c2
elif c == '"':
# Quote is closed. Exit (and don't include the end quote).
break
else:
# capture this character, and keep capturing
buf += c
return buf
|
def enabled():
return True
def title():
return "Calvin and Hobbes"
def subtitle():
return "View Calvin's adventures with good old Hobbes"
def run():
import feedparser
import re
import os
d = feedparser.parse('http://calvinhobbesdaily.tumblr.com/rss')
strip = re.match(r'<img[^>]*\ssrc="(.*?)"' , d['entries'][0]['summary_detail']['value'], re.IGNORECASE).groups(0)[0]
# tweak, must be done by tumblr I suppose
strip = strip.replace("_500.gif", "_1280.gif")
os.system('curl -s ' + strip + ' --O strip.png')
os.system('qlmanage -p strip.png')
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E08000010"
addresses_name = "parl.2019-12-12/Version 2/Democracy_Club__12December2019Wig.tsv"
stations_name = "parl.2019-12-12/Version 2/Democracy_Club__12December2019Wig.tsv"
elections = ["parl.2019-12-12"]
csv_delimiter = "\t"
csv_encoding = "windows-1252"
allow_station_point_from_postcode = False
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "200004805060":
rec["postcode"] = "WN7 1BT"
if uprn == "10091702455":
rec["postcode"] = "WN6 0GU"
if uprn == "10091700365":
rec["postcode"] = "WN7 1LS"
if uprn in [
"10014065653", # WN60TE -> WN60UL : 57 Granny Flat School Lane
]:
rec["accept_suggestion"] = True
if uprn in [
"10014060608", # WN25TA -> WN25NY : 10 Caravan Site
"10014060609", # WN25TA -> WN25NY : 11 Caravan Site
"10014060610", # WN25TA -> WN25NY : 12 Caravan Site
"10014060611", # WN25TA -> WN25NY : 13 Caravan Site
"10014060612", # WN25TA -> WN25NY : 14 Caravan Site
"10014060613", # WN25TA -> WN25NY : 15 Caravan Site
"10014060614", # WN25TA -> WN25NY : 16 Caravan Site
"200001924721", # WN40JH -> WN40JA : High Brooks Stables High Brooks
"100012500742", # WN24XR -> WN24XS : The Old Barn Smiths Lane
"100011798794", # WN59DL -> WN59DN : Flat Above 301-305 Ormskirk Road
]:
rec["accept_suggestion"] = False
# 17 Chester Street, Leigh WN7 2LS. NB addressbase UPRN is "10091700365"
if uprn == "100011763908":
rec["postcode"] = "WN71LS"
return rec
|
# for PDF miner use
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams, LTTextBox, LTTextLine
from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
# for regex use
import re
# for xlsx output
from datetime import datetime
import xlsxwriter
# for file system use
import os
# for argv use
import sys, getopt
# qr code and invoice code info refer to https://zhuanlan.zhihu.com/p/32315595
class ChineseAmount():
chinese_amount_num = {
'〇' : 0,
'一' : 1,
'二' : 2,
'三' : 3,
'四' : 4,
'五' : 5,
'六' : 6,
'七' : 7,
'八' : 8,
'九' : 9,
'零' : 0,
'壹' : 1,
'贰' : 2,
'叁' : 3,
'肆' : 4,
'伍' : 5,
'陆' : 6,
'柒' : 7,
'捌' : 8,
'玖' : 9,
'貮' : 2,
#'两' : 2,
}
chinese_amount_unit = {
'分' : 0.01,
'角' : 0.1,
'元' : 1,
'圆' : 1,
'十' : 10,
'拾' : 10,
'百' : 100,
'佰' : 100,
'千' : 1000,
'仟' : 1000,
'万' : 10000,
'萬' : 10000,
'亿' : 100000000,
'億' : 100000000,
'兆' : 1000000000000,
}
chinese_amount_exclude_char = {
'整',
}
def convert_chinese_amount_to_number(self, chinese_amount):
#chinese_amount = chinese_amount.strip('整') #remove unused char
amount_number = 0
for key, value in self.chinese_amount_unit.items():
re_string = "(.{1})" + key
regex = re.compile(re_string)
result = re.search(regex, chinese_amount)
if(result):
if(result.group(1) in self.chinese_amount_num):
amount_number = amount_number + self.chinese_amount_num[result.group(1)] * value
return amount_number
class eInvoicePDFParse():
_debug_output_textfile = False
def parse_pdf(self, pdf_path):
with open(pdf_path, 'rb') as fp:
parser = PDFParser(fp)
doc = PDFDocument(parser)
parser.set_document(doc)
rsrcmgr = PDFResourceManager()
laparams = LAParams(all_texts=True, boxes_flow=0.2, word_margin=0.5, detect_vertical=True)
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
extracted_text = ''
for page in PDFPage.create_pages(doc):
interpreter.process_page(page)
layout = device.get_result()
for lt_obj in layout:
if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):
#print(lt_obj)
#print(lt_obj.get_text())
extracted_text += lt_obj.get_text()
#else:
#print(lt_obj)
return extracted_text
einvoice_patten = {
"invoice_code" : r"\n([0-2][0-9]{11})\n", #发票代码 04403180xxx1
"invoice_number" : r"\n([0-9]{8})\n", #发票号码 1249xxx7
"date" : r"\n([0-9]{4})[^0-9/<>\+\-\*]+([0-9]{2})[^0-9/<>\+\-\*]+([0-9]{2}).*\n",
#"checksum" : r"\n([0-9]{5} *[0-9]{5} *[0-9]{5} *[0-9]{5})\n", #校验码 12157 2xxx4 8xxx0 73378
#"passcode" : r"\n[0-9/<>\+\-\*]{28}\n", #密码区 03597///23xxx146/6xxx6>5>-/50 {0-9,"/<>+-*"}
#"amount" : r"\n[¥¥]+ *([0-9.]+)\n", #合计税额 < 合计金额 < 价税合计 ¥ 26.42
"chinese_amount" : r"\n([壹贰叁肆伍陆柒捌玖零亿万仟佰拾圆元角分整]+)\n",
"itemName" : r"\n(\*+[^0-9/<>\+\-\*]+\*+.+)\n", #*日用杂品*日用品
}
einvoice_result = [
"invoice_code", "invoice_number",
"date_year", "date_month", "date_day",
"chinese_amount", "number_amount", "itemName",
"file_name"
]
def parse_einvoice_items(self, text):
results = {}
for key, patten in self.einvoice_patten.items():
result = re.findall(patten, text)
if(key == 'date'):
results["date_year"] = result[0][0]
results["date_month"] = result[0][1]
results["date_day"] = result[0][2]
else:
results[key] = result[0]
if(key == 'chinese_amount'):
results["number_amount"] = ChineseAmount().convert_chinese_amount_to_number(result[0])
return results
def parse_einvoice_item_by_pdf(self, pdfFilePath):
text = self.parse_pdf(pdfFilePath)
if(self._debug_output_textfile):
textFilePath = pdfFilePath.replace(".pdf", ".txt")
with open(textFilePath, "w", encoding="utf-8") as f:
f.write(text)
einvoice_item_result = self.parse_einvoice_items(text)
#einvoice_item_result["file_name"] = os.fsdecode(pdfFilePath)
return einvoice_item_result
class eInvoicePDFtoExcel():
def load_pdf_dir_get_einvoice_items(self, pdf_dir):
eInvoicePDFParser = eInvoicePDFParse()
pdfFiles = FileSystem().enumerate_pdf_in_folder(pdf_dir)
eInvoice_results = []
for pdfFile in pdfFiles:
eInvoice_result = eInvoicePDFParser.parse_einvoice_item_by_pdf(pdfFile)
eInvoice_results.append(eInvoice_result)
return eInvoice_results
einvoice_sheet_header = {
"invoice_code" : "发票代码",
"invoice_number": "发票号码",
"date" : "开票日期",
"chinese_amount": "价税合计 (大写)",
"number_amount" : "价税合计 (小写)",
"itemName" : "货物或应税劳务、服务名称",
#"file_name" : "PDF文件名称",
}
def extract_items_to_xlsx(self, eInvoice_data, output_path):
workbook = xlsxwriter.Workbook(output_path)
worksheet = workbook.add_worksheet("invoice")
date_format = workbook.add_format({'num_format': 'yyyy-mm-dd'})
row = 0
col = 0
# write header
worksheet.write_string(row, col, self.einvoice_sheet_header["invoice_code"])
worksheet.write_string(row, col + 1, self.einvoice_sheet_header["invoice_number"])
worksheet.write_string(row, col + 2, self.einvoice_sheet_header["date"])
worksheet.write_string(row, col + 3, self.einvoice_sheet_header["chinese_amount"])
worksheet.write_string(row, col + 4, self.einvoice_sheet_header["number_amount"])
worksheet.write_string(row, col + 5, self.einvoice_sheet_header["itemName"])
#worksheet.write_string(row, col + 6, self.einvoice_sheet_header["file_name"])
row = 1
for eInvoiceItem in eInvoice_data:
dateStr = eInvoiceItem["date_year"] + '-' + eInvoiceItem["date_month"] + '-' + eInvoiceItem["date_day"]
# Convert the date string into a datetime object.
date = datetime.strptime(dateStr, "%Y-%m-%d")
worksheet.write_string (row, col, eInvoiceItem["invoice_code"])
worksheet.write_string (row, col + 1, eInvoiceItem["invoice_number"])
worksheet.write_datetime(row, col + 2, date, date_format)
worksheet.write_string (row, col + 3, eInvoiceItem["chinese_amount"])
worksheet.write_number (row, col + 4, eInvoiceItem["number_amount"])
worksheet.write_string (row, col + 5, eInvoiceItem["itemName"])
#worksheet.write_string (row, col + 6, eInvoiceItem["file_name"])
row += 1
workbook.close()
def load_pdf_dir_output_xlsx(self, pdf_dir, output_xlsx):
eInvoiceItems = self.load_pdf_dir_get_einvoice_items(pdf_dir)
xlsxPath = os.path.join(pdf_dir, output_xlsx)
self.extract_items_to_xlsx(eInvoiceItems, xlsxPath)
class FileSystem:
def enumerate_pdf_in_folder(self, pdf_dir):
pdfFileList = []
directory = os.fsencode(pdf_dir)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".pdf"):
pdfFilePath = os.path.join(pdf_dir, filename)
pdfFileList.append(pdfFilePath)
return pdfFileList
def main(argv):
inputdir = None
helpstr = 'Usage:\neinvoice_pdf_to_xlsx.py -d <pdf_dir>'
try:
opts, args = getopt.getopt(argv,"hd:",["pdf_dir="])
except getopt.GetoptError:
print(helpstr)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(helpstr)
sys.exit()
elif opt in ("-d", "--pdf_dir"):
inputdir = arg
if not inputdir:
print(helpstr)
sys.exit(2)
pdf2excel = eInvoicePDFtoExcel()
pdf2excel.load_pdf_dir_output_xlsx(inputdir, "output.xlsx")
if __name__ == "__main__":
main(sys.argv[1:])
|
import asyncio
import logging
from aioredlock import Aioredlock, LockError, LockAcquiringError
async def basic_lock():
lock_manager = Aioredlock([{
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None
}])
if await lock_manager.is_locked("resource"):
print('The resource is already acquired')
try:
lock = await lock_manager.lock("resource")
except LockAcquiringError:
print('Something happened during normal operation. We just log it.')
except LockError:
print('Something is really wrong and we prefer to raise the exception')
raise
assert lock.valid is True
assert await lock_manager.is_locked("resource") is True
# Do your stuff having the lock
await lock_manager.unlock(lock)
assert lock.valid is False
assert await lock_manager.is_locked("resource") is False
await lock_manager.destroy()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
loop.run_until_complete(basic_lock())
|
# -*- coding: utf-8 -*-
'''
Plots numerical results
'''
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def linear(df, x, y, ci=None):
'''
df: DataFrame with numerical values
x: string of column names to use in x-axis
y: string of column names to use in x-axis
ci: confidence interval around regression line
'''
f, ax = plt.subplots(figsize=(10,10))
if ci==None:
ax = sns.regplot(x=x, y=y, data=df, marker='.')
else:
ax = sns.regplot(x=x, y=y, data=df, marker='.', ci=ci)
plt.show()
def pairwise(df, names=None, category=None):
'''
df: DataFrame with numerical columns to explore pairwise
names = list of columns to investigate, including category if used
category: optional string, column name that serves to classify results
'''
if names !=None:
df = df[names]
df = df.dropna()
if category == None:
ax = sns.pairplot(df, markers='.')
else:
ax = sns.pairplot(df, hue=category, markers='.')
plt.show()
def violin(df, x, bins, labels, y, category=None):
'''
df: DataFrame with columns to investigate
x: string, column name with data to bin
bins: list of limits to bin column x1 (nr of bins + 1)
labels: list of labels to bin to use as categories
y: string, column name with numerical values
category: optional string, column name with second category
'''
assert ( len(bins) == len(labels) + 1 ),'wrong nr. of bins and labels'
df['bins'] = pd.cut(df[x], bins=bins, labels=labels)
f, ax = plt.subplots(figsize=(12,8))
if category == None:
sns.violinplot(x='bins', y=y, data=df, order=labels)
else:
sns.violinplot(x='bins', y=y, data=df, hue=category, order=labels)
plt.show()
def distribution(df, names=None):
'''
df: DataFrame with columns to plot
names = list of columns to investigate
'''
if names !=None:
df = df[names]
df = df.dropna()
f, ax = plt.subplots(figsize=(10,10))
for col in df.columns:
ax = sns.distplot(df[col], bins=20, norm_hist=True, kde=True, label=col)
ax.set(xlabel='Value', ylabel='Probability density [-]')
plt.legend()
plt.show()
def heatmap(data, xnames, ynames):
'''
data: 2D numpy array with numerical data
xnames: list of names for x-axis
ynames: list of names for y-axis
'''
(r, c) = np.shape(data)
assert (len(xnames) == r) & (len(ynames) == c), 'check data & names'
df = pd.DataFrame(data, index=xnames, columns=ynames)
sns.heatmap(df, annot=True)
plt.show()
|
#1usr/bin/env python3
#-*-coding:utf-8 -*-
#Filename:Mail.py
#The email from user->MUA->MTA->another user
#It obeys the SMTP(simple mail tranfer protol)
from email.mime.text import MIMEText
msg=MIMEText('hello,send by Frank...','plain','utf-8')
#input the Email address
from_addr=input('From: ')
password=input('Password: ')
#input receiver address
to_addr=input('To: ')
#input SMTP server address
smtp_server=input('SMTP server: ')
import smtplib
server=smtplib.SMTP(smtp_server,25) #default port is 25
server.set_debuglevel(1)
server.login(from_addr,password)
server.sendmail(from_addr,[to_addr],msg.as_string())
server.quit()
#This is a unperfectable code, the email send and receive things will be finished in august
#need an optimization of SMTP and a complement/supplement of POP3
|
import pandas as pd
#read the data
data = pd.read_csv('diabetes.csv')
#labels for discretization
labels = ['low','medium','high']
#Preprocessing
for j in data.columns[:-1]:
mean = data[j].mean()
data[j] = data[j].replace(0,mean)
data[j] = pd.cut(data[j],bins=len(labels),labels=labels)
#train test split
split_per = [80,70,60]
def count(data,colname,label,target):
condition = (data[colname] == label) & (data['Outcome'] == target)
return len(data[condition])
#Process starts here
for i in split_per:
#result list to store predicted values
predicted = []
#dictionary to store probabilities
probabilities = {0:{},1:{}}
#calculate training length
train_len = int((i*len(data))/100)
#Split training and testing data
train_X = data.iloc[:train_len,:]
test_X = data.iloc[train_len+1:,:-1]
test_y = data.iloc[train_len+1:,-1]
#count total number of 0s and 1s
count_0 = count(train_X,'Outcome',0,0)
count_1 = count(train_X,'Outcome',1,1)
prob_0 = count_0/len(train_X)
prob_1 = count_1/len(train_X)
#Train the model
for j in train_X.columns[:-1]:
probabilities[0][j] = {}
probabilities[1][j] = {}
for k in labels:
count_k_0 = count(train_X,j,k,0)
count_k_1 = count(train_X,j,k,1)
probabilities[0][j][k] = count_k_0 / count_0
probabilities[1][j][k] = count_k_1 / count_1
#Test the model
for row in range(0,len(test_X)):
prod_0 = prob_0
prod_1 = prob_1
for feature in test_X.columns:
prod_0 *= probabilities[0][feature][test_X[feature].iloc[row]]
prod_1 *= probabilities[1][feature][test_X[feature].iloc[row]]
#Predict the outcome
if prod_0 > prod_1:
predicted.append(0)
else:
predicted.append(1)
#create confusion matrix
tp,tn,fp,fn = 0,0,0,0
for j in range(0,len(predicted)):
if predicted[j] == 0:
if test_y.iloc[j] == 0:
tp += 1
else:
fp += 1
else:
if test_y.iloc[j] == 1:
tn += 1
else:
fn += 1
print('Accuracy for training length '+str(i)+'% : ',((tp+tn)/len(test_y))*100)
|
"""
Precisely APIs
Enhance & enrich your data, applications, business processes, and workflows with rich location, information, and identify APIs. # noqa: E501
The version of the OpenAPI document: 11.9.3
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from com.precisely.apis.api_client import ApiClient, Endpoint as _Endpoint
from com.precisely.apis.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from com.precisely.apis.model.demographics import Demographics
from com.precisely.apis.model.demographics_advanced_request import DemographicsAdvancedRequest
from com.precisely.apis.model.error_info import ErrorInfo
from com.precisely.apis.model.segmentation import Segmentation
class DemographicsServiceApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_demographics_advanced_endpoint = _Endpoint(
settings={
'response_type': (Demographics,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/demographics-segmentation/v1/advanced/demographics',
'operation_id': 'get_demographics_advanced',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'demographics_advanced_request',
],
'required': [
'demographics_advanced_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'demographics_advanced_request':
(DemographicsAdvancedRequest,),
},
'attribute_map': {
},
'location_map': {
'demographics_advanced_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.get_demographics_basic_endpoint = _Endpoint(
settings={
'response_type': (Demographics,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/demographics-segmentation/v1/basic/demographics',
'operation_id': 'get_demographics_basic',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'address',
'longitude',
'latitude',
'search_radius',
'search_radius_unit',
'travel_time',
'travel_time_unit',
'travel_distance',
'travel_distance_unit',
'travel_mode',
'country',
'profile',
'filter',
'include_geometry',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'address':
(str,),
'longitude':
(str,),
'latitude':
(str,),
'search_radius':
(str,),
'search_radius_unit':
(str,),
'travel_time':
(str,),
'travel_time_unit':
(str,),
'travel_distance':
(str,),
'travel_distance_unit':
(str,),
'travel_mode':
(str,),
'country':
(str,),
'profile':
(str,),
'filter':
(str,),
'include_geometry':
(str,),
},
'attribute_map': {
'address': 'address',
'longitude': 'longitude',
'latitude': 'latitude',
'search_radius': 'searchRadius',
'search_radius_unit': 'searchRadiusUnit',
'travel_time': 'travelTime',
'travel_time_unit': 'travelTimeUnit',
'travel_distance': 'travelDistance',
'travel_distance_unit': 'travelDistanceUnit',
'travel_mode': 'travelMode',
'country': 'country',
'profile': 'profile',
'filter': 'filter',
'include_geometry': 'includeGeometry',
},
'location_map': {
'address': 'query',
'longitude': 'query',
'latitude': 'query',
'search_radius': 'query',
'search_radius_unit': 'query',
'travel_time': 'query',
'travel_time_unit': 'query',
'travel_distance': 'query',
'travel_distance_unit': 'query',
'travel_mode': 'query',
'country': 'query',
'profile': 'query',
'filter': 'query',
'include_geometry': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [],
},
api_client=api_client
)
self.get_demographics_by_address_endpoint = _Endpoint(
settings={
'response_type': (Demographics,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/demographics-segmentation/v1/demographics/byaddress',
'operation_id': 'get_demographics_by_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'address',
'country',
'profile',
'filter',
'value_format',
'variable_level',
],
'required': [
'address',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'address':
(str,),
'country':
(str,),
'profile':
(str,),
'filter':
(str,),
'value_format':
(str,),
'variable_level':
(str,),
},
'attribute_map': {
'address': 'address',
'country': 'country',
'profile': 'profile',
'filter': 'filter',
'value_format': 'valueFormat',
'variable_level': 'variableLevel',
},
'location_map': {
'address': 'query',
'country': 'query',
'profile': 'query',
'filter': 'query',
'value_format': 'query',
'variable_level': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [],
},
api_client=api_client
)
self.get_demographics_by_boundary_ids_endpoint = _Endpoint(
settings={
'response_type': (Demographics,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/demographics-segmentation/v1/demographics/byboundaryids',
'operation_id': 'get_demographics_by_boundary_ids',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'boundary_ids',
'profile',
'filter',
'value_format',
'variable_level',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'boundary_ids':
(str,),
'profile':
(str,),
'filter':
(str,),
'value_format':
(str,),
'variable_level':
(str,),
},
'attribute_map': {
'boundary_ids': 'boundaryIds',
'profile': 'profile',
'filter': 'filter',
'value_format': 'valueFormat',
'variable_level': 'variableLevel',
},
'location_map': {
'boundary_ids': 'query',
'profile': 'query',
'filter': 'query',
'value_format': 'query',
'variable_level': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [],
},
api_client=api_client
)
self.get_demographics_by_location_endpoint = _Endpoint(
settings={
'response_type': (Demographics,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/demographics-segmentation/v1/demographics/bylocation',
'operation_id': 'get_demographics_by_location',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'longitude',
'latitude',
'profile',
'filter',
'value_format',
'variable_level',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'longitude':
(str,),
'latitude':
(str,),
'profile':
(str,),
'filter':
(str,),
'value_format':
(str,),
'variable_level':
(str,),
},
'attribute_map': {
'longitude': 'longitude',
'latitude': 'latitude',
'profile': 'profile',
'filter': 'filter',
'value_format': 'valueFormat',
'variable_level': 'variableLevel',
},
'location_map': {
'longitude': 'query',
'latitude': 'query',
'profile': 'query',
'filter': 'query',
'value_format': 'query',
'variable_level': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [],
},
api_client=api_client
)
self.get_segmentation_by_address_endpoint = _Endpoint(
settings={
'response_type': (Segmentation,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/demographics-segmentation/v1/segmentation/byaddress',
'operation_id': 'get_segmentation_by_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'address',
'country',
],
'required': [
'address',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'address':
(str,),
'country':
(str,),
},
'attribute_map': {
'address': 'address',
'country': 'country',
},
'location_map': {
'address': 'query',
'country': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [],
},
api_client=api_client
)
self.get_segmentation_by_location_endpoint = _Endpoint(
settings={
'response_type': (Segmentation,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/demographics-segmentation/v1/segmentation/bylocation',
'operation_id': 'get_segmentation_by_location',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'longitude',
'latitude',
],
'required': [
'longitude',
'latitude',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'longitude':
(str,),
'latitude':
(str,),
},
'attribute_map': {
'longitude': 'longitude',
'latitude': 'latitude',
},
'location_map': {
'longitude': 'query',
'latitude': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/xml'
],
'content_type': [],
},
api_client=api_client
)
def get_demographics_advanced(
self,
demographics_advanced_request,
**kwargs
):
"""Demographics Advanced Endpoint # noqa: E501
Demographics Advanced Endpoint will return the aggregated values of the selected demographics variables of the regions falling inside a user provided geometry or travel time/distance boundaries. All the intersecting demographic boundaries will be snapped completely, and user will have option to request these boundaries in response. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_demographics_advanced(demographics_advanced_request, async_req=True)
>>> result = thread.get()
Args:
demographics_advanced_request (DemographicsAdvancedRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Demographics
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['demographics_advanced_request'] = \
demographics_advanced_request
return self.get_demographics_advanced_endpoint.call_with_http_info(**kwargs)
def get_demographics_basic(
self,
**kwargs
):
"""Demographics Basic # noqa: E501
Demographics Basic Endpoint will return the aggregated values of the selected demographics variables of the regions falling inside the search radius. All the intersecting demographic boundaries will be snapped completely and user will have option to request these boundaries in response. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_demographics_basic(async_req=True)
>>> result = thread.get()
Keyword Args:
address (str): Address to be searched. [optional]
longitude (str): Longitude of the location. [optional]
latitude (str): Latitude of the location. [optional]
search_radius (str): Radius within which demographics details are required. Max. value is 52800 Feet or 10 miles. [optional]
search_radius_unit (str): Radius unit such as Feet, Kilometers, Miles or Meters . [optional]
travel_time (str): Travel Time based on ‘travelMode’ within which demographics details are required. Max. value is 1 hour.. [optional]
travel_time_unit (str): minutes,hours,seconds,milliseconds. Default is meters.Default is minutes.. [optional]
travel_distance (str): Travel Distance based on ‘travelMode’ within which demographics details are required. Max. value is 10 miles.. [optional]
travel_distance_unit (str): feet,kilometers,miles,meters. Default is feet.. [optional]
travel_mode (str): Default is driving.. [optional]
country (str): 3 digit ISO country code (Used in case address is mentioned).. [optional]
profile (str): Applicable on ranged variables. Returns top sorted result based on the input value.. [optional]
filter (str): If Y, demographic boundaries are returned in response.. [optional]
include_geometry (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Demographics
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_demographics_basic_endpoint.call_with_http_info(**kwargs)
def get_demographics_by_address(
self,
address,
**kwargs
):
"""Demographics By Address. # noqa: E501
Provides the demographic details around a specified address. GeoLife 'byaddress' service accepts address as an input to return a specific population segment's age group, ethnicity, income, purchasing behaviour, commuter patterns and more. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_demographics_by_address(address, async_req=True)
>>> result = thread.get()
Args:
address (str): The address to be searched.
Keyword Args:
country (str): 3 letter ISO code of the country to be searched.Allowed values USA,CAN,GBR,AUS.. [optional]
profile (str): Retrieves the sorted demographic data on the basis of pre-defined profiles that can display the top 3 or top 5 results (by location) either in ascending or descending order.Allowed values Top5Ascending,Top5Descending,Top3Ascending,Top3Descending. [optional]
filter (str): The 'filter' parameter retrieves the demographic data based upon specified input themes.. [optional]
value_format (str): The 'valueFormat' parameter is applicable for few ranged variables where percent & count both are available and filter response based on the input value.. [optional]
variable_level (str): The 'variableLevel' retrieves demographic facts in response based on the input value. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Demographics
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['address'] = \
address
return self.get_demographics_by_address_endpoint.call_with_http_info(**kwargs)
def get_demographics_by_boundary_ids(
self,
**kwargs
):
"""Demographics By Boundaryids. # noqa: E501
This endpoint will allow the user to request demographics details by census boundary id. Multiple comma separated boundary ids will be accepted. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_demographics_by_boundary_ids(async_req=True)
>>> result = thread.get()
Keyword Args:
boundary_ids (str): Accepts comma separated multiple boundary ids.. [optional]
profile (str): Applicable on ranged variables. Returns top sorted result based on the input value.. [optional]
filter (str): Accept the comma separated theme names and filter response based on value. Maximum 10 can be provided.. [optional]
value_format (str): Applicable for few ranged variables where percent & count both are available and filter response based on the input value.. [optional]
variable_level (str): Retrieves demographic facts in response based on the input value.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Demographics
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_demographics_by_boundary_ids_endpoint.call_with_http_info(**kwargs)
def get_demographics_by_location(
self,
**kwargs
):
"""Demographics By Location. # noqa: E501
Provides the demographic details around a specified location. GeoLife 'bylocation' service accepts longitude and latitude as an input to return a specific population segment's age group, ethnicity, income, purchasing behaviour, commuter patterns and more. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_demographics_by_location(async_req=True)
>>> result = thread.get()
Keyword Args:
longitude (str): Longitude of the location.. [optional]
latitude (str): Latitude of the location.. [optional]
profile (str): Retrieves the sorted demographic data on the basis of pre-defined profiles that can display the top 3 or top 5 results (by location) either in ascending or descending order.Allowed values Top5Ascending,Top5Descending,Top3Ascending,Top3Descending. [optional]
filter (str): The 'filter' parameter retrieves the demographic data based upon specified input themes.. [optional]
value_format (str): The 'valueFormat' parameter is applicable for few ranged variables where percent & count both are available and filter response based on the input value.. [optional]
variable_level (str): The 'variableLevel' retrieves demographic facts in response based on the input value. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Demographics
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_demographics_by_location_endpoint.call_with_http_info(**kwargs)
def get_segmentation_by_address(
self,
address,
**kwargs
):
"""Segmentation By Address. # noqa: E501
Provides the segmentation details around a specified address. GeoLife 'Segmentation by Address' service accepts address as an input to return the lifestyle characteristics of households in terms of their family status, children characteristics, income behaviors, financial preferences and interests. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_segmentation_by_address(address, async_req=True)
>>> result = thread.get()
Args:
address (str): The address to be searched.
Keyword Args:
country (str): 3 letter ISO code of the country to be searched.Allowed values USA,CAN,GBR,FRA,ITA,AUS,DEU.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Segmentation
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['address'] = \
address
return self.get_segmentation_by_address_endpoint.call_with_http_info(**kwargs)
def get_segmentation_by_location(
self,
longitude,
latitude,
**kwargs
):
"""Segmentation By Location. # noqa: E501
Provides the segmentation details around a specified location. GeoLife 'segmentation bylocation' service accepts longitude and latitude as an input to return the lifestyle characteristics of households in terms of their family status, children characteristics, income behaviors, financial preferences and interests. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_segmentation_by_location(longitude, latitude, async_req=True)
>>> result = thread.get()
Args:
longitude (str): Longitude of the location.
latitude (str): Latitude of the location.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Segmentation
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['longitude'] = \
longitude
kwargs['latitude'] = \
latitude
return self.get_segmentation_by_location_endpoint.call_with_http_info(**kwargs)
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import cv2
import imgproc
from scipy.spatial import distance
from PIL import ImageFont, ImageDraw, Image
###################
font_dir = "arial.ttf"
# ###################
def count_rec(box):
dis1 = int(np.ceil(distance.euclidean(box[0], box[1])))
dis2 = int(np.ceil(distance.euclidean(box[1], box[2])))
dis3 = int(np.ceil(distance.euclidean(box[2], box[3])))
dis4 = int(np.ceil(distance.euclidean(box[3], box[0])))
max_1 = max(dis1, dis3)
max_2 = max(dis2, dis4)
width = max(max_1,max_2)
height = min(max_1,max_2)
return width, height
def plus_box(box, plus_pixel):
box[0][1] -= plus_pixel
box[1][1] -= plus_pixel
box[2][1] += plus_pixel
box[3][1] += plus_pixel
return box
def vietocr_text(img, boxes, detector):
pre_texts = []
img = np.array(img)
for i, box in enumerate(boxes):
width,height = count_rec(box)
box = plus_box(box, int(height * 0.1))
pts1 = np.float32(np.vstack((box[0], box[1], box[3], box[2])))
pts2 = np.float32([[0,0],[width,0],[0,height],[width,height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
img_output = cv2.warpPerspective(img, matrix, (width, height))
rgb = cv2.cvtColor(img_output, cv2.COLOR_BGR2RGB)
rgb = Image.fromarray(rgb)
s = detector.predict(rgb)
pre_texts.append(str(s).upper())
return pre_texts
# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py
def get_files(img_dir):
imgs, masks, xmls = list_files(img_dir)
return imgs, masks, xmls
def list_files(in_path):
img_files = []
mask_files = []
gt_files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
for file in filenames:
filename, ext = os.path.splitext(file)
ext = str.lower(ext)
if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':
img_files.append(os.path.join(dirpath, file))
elif ext == '.bmp':
mask_files.append(os.path.join(dirpath, file))
elif ext == '.xml' or ext == '.gt' or ext == '.txt':
gt_files.append(os.path.join(dirpath, file))
elif ext == '.zip':
continue
# img_files.sort()
# mask_files.sort()
# gt_files.sort()
return img_files, mask_files, gt_files
def saveResult(img_file, img, boxes,pre_texts_, dirname='./result/', verticals=None, texts= True):
""" save text detection result one by one
Args:
img_file (str): image file name
img (array): raw image context
boxes (array): array of result file
Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
Return:
None
"""
img = np.array(img)
# make result file list
filename, file_ext = os.path.splitext(os.path.basename(img_file))
# result directory
res_file = dirname + filename + '.jpg.txt'
res_img_file = dirname + filename + '.jpg'
if not os.path.isdir(dirname):
os.mkdir(dirname)
with open(res_file, 'w') as f:
for i, box in enumerate(boxes):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly])+ ','+ pre_texts_[i] + '\r\n'
f.write(strResult)
poly = poly.reshape(-1, 2)
cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 255, 0), thickness=2)
ptColor = (0, 255, 255)
if verticals is not None:
if verticals[i]:
ptColor = (255, 0, 0)
if texts:
width,height = count_rec(box)
# font = cv2.FONT_HERSHEY_SIMPLEX
# font_scale = 0.5
b,g,r,a = 0,0,255,0
font = ImageFont.truetype(font_dir, int(height * 0.6))
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
draw.text((poly[0][0]+1, poly[0][1]-10), "{}".format(pre_texts_[i]), font = font, fill = (b, g, r, a))
# cv2.putText(img, "{}".format(pre_texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1)
# cv2.putText(img, "{}".format(pre_texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1)
img = np.array(img_pil)
# Save result image
cv2.imwrite(res_img_file, img)
|
# this is for login and logout authentication
from rest_framework.renderers import JSONRenderer
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from django.contrib.auth import login as django_login
from rest_framework.views import APIView
from django.shortcuts import render
# this is to send response to client
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt # to resolve csrf issue
from rest_framework.parsers import JSONParser
from rest_framework import status
from rest_framework.response import Response
from rest_framework import viewsets
# model and serializer import
from .serializer import *
from .models import *
from django.core.mail import send_mail
# importing "random" for random operations
import random
from django.db import Error
from userprofile.views import *
# ****************************************************
# USER REGISTRATION PROCESS
# ****************************************************
@csrf_exempt
def UserViewSet(request, format=None):
if request.method == "POST":
json_parser = JSONParser()
data = json_parser.parse(request)
print('ok')
serializer = MyUserSerializer(data=data)
if serializer.is_valid():
# generate otp
serializer.save()
# email_address = serializer.data.get('email_address')
# name = serializer.data.get('name')
# otp = GenerateOtp(phone_number)
# send mail to the email address
# create Entry of the user in the profile table WE have to create a function to insert the data
# insert_user(phone_number, name)
# redirect to the varification page -- done at fron end
return JsonResponse(serializer.data, status=200)
else:
return JsonResponse(serializer.errors, status=400)
# **********************************************************
# post views for the login and send some user information
# *********************************************************
class LoginViewSet(ObtainAuthToken):
def post(self, request, *args, **kwargs):
# TO get data from the request object
email_address = request.data.get('username')
# validate that user exists in the database
email_address = User.objects.filter(email_address=email_address)
# if user does not exists in the database send massage need to register
if (email_address.count() == 0):
content = 'User Does not exists please register'
return Response(content, status=status.HTTP_404_NOT_FOUND)
# If user exists in the data base check for the varification
else:
email_active = Varification.objects.filter(
email_address_id__in=email_address)
# if data for the user daoes not exists in the varification table
if (email_active.count() == 0):
# generate otp and send and ask for varification redirect it to varification page
# otp = GenerateOtp(user)
content = 'Email is not validated please validate'
return Response(content, status=status.HTTP_403_FORBIDDEN)
# if data for the user exists in the varification table then cheack the varification status
else:
# validate the validation and counter accordingly respons
data = Varification.objects.get(
email_address_id__in=email_address)
print(data.email_varification)
# check the varification status on the varification table if not varified
if (data.email_varification != 'done'):
# generate otp and send and ask for varification redirect it to varification page
# otp = GenerateOtp(user)
content = 'Email is not validated please validate'
return Response(content, status=status.HTTP_403_FORBIDDEN)
# If varification is allready in the varification table
else:
user_profile = Profile.objects.get(
email_address_id__in=email_address)
serializer = self.serializer_class(
data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response({
'token': token.key,
'email_address': user.email_address,
'role': user_profile.role,
})
# **********************************************************
# post view for the email varification
# *********************************************************
class VarificationViewSet(viewsets.ViewSet):
permission_classes = [AllowAny]
def post_auth(self, request, *args, **kwargs):
# post method to validate the key
phone_number = request.data.get('phone_number')
otp = request.data.get('phone_otp')
otp = int(otp)
# get otp from the table
try:
data = PhoneOtp.objects.get(phone_number_id=phone_number)
# compare the otp
if (otp == data.phone_otp):
# cretae a entry in the varification table
Varification.objects.filter(phone_number_id=phone_number).create(
phone_number_id=phone_number,
phone_varification='done'
)
# varification successfulle please login
return Response('varification successfulle please login')
else:
content = 'Validation Unsuccesfull - Please check your OTP'
return Response(content, status.HTTP_401_UNAUTHORIZED)
except PhoneOtp.DoesNotExist:
content = 'No Data in the table'
return Response(content, status.HTTP_401_UNAUTHORIZED)
# ******************************************************************
# sent mail function
# *******************************************************************
def SendEmail(email_address):
subject = "Email Varification"
message = "Please find the key here"
from_email = "alok_kumar@nanduniversity.com"
to_email = email_address
send_mail(subject, message, from_email, [to_email], fail_silently=False,)
# ******************************************************************
# generate OTP function
# *******************************************************************
def GenerateOtp(phone_number):
# return (random.randrange(100000, 999999))
# check otp exists in the otp table or not
try:
data = PhoneOtp.objects.get(phone_number_id=phone_number)
otp = (random.randrange(100000, 999999))
PhoneOtp.objects.filter(phone_number_id=phone_number).update(
phone_otp=otp,
counter=data.counter+1
)
return otp
except PhoneOtp.DoesNotExist:
# otp generated
otp = (random.randrange(100000, 999999))
# save otp to the otp table
PhoneOtp.objects.create(
phone_number_id=phone_number,
phone_otp=otp,
counter=1
)
return otp
|
from web3 import HTTPProvider
from .quorum import Web3Quorum, Web3QuorumMock
from .utils import enode_to_raft_id
HTTPProvider = HTTPProvider
Web3Quorum = Web3Quorum
Web3QuorumMock = Web3QuorumMock
enode_to_raft_id = enode_to_raft_id
|
# Maciej Izydorek
# program counts how many times it was run
import os.path
filename = 'count.txt'
if not os.path.isfile(filename):
print('File does not exsist')
# init file here
writeNumber(0)
def readNumber():
try:
with open(filename, 'r') as reader:
number = int(reader.read()) # converts str to int
return number
except IOError:
# no file - first run
return 0
def writeNumber(number):
with open(filename, 'w') as writer:
writer.write(str(number)) # write takes str
# main
number = readNumber()
number += 1
print('Program has been run {} times'.format(number))
writeNumber(number)
|
import unittest
import os
import logging
import gzip
from htseq_tools.tools.merge_counts import merge_files
logger = logging.getLogger()
class TestMergeCounts(unittest.TestCase):
len_file = os.path.join(os.path.dirname(__file__),
'etc/test_lengths.txt')
ct_file_1 = os.path.join(os.path.dirname(__file__),
'etc/test_counts_1.txt')
ct_file_2 = os.path.join(os.path.dirname(__file__),
'etc/test_counts_2.txt')
exp_counts_file = os.path.join(os.path.dirname(__file__),
'etc/test_counts_merge_2_expected.txt.gz')
out_counts_file = os.path.join(os.path.dirname(__file__),
'etc/test_counts_merge.txt.gz')
def test_merge_files_one(self):
merge_files([self.ct_file_1], self.out_counts_file, logger)
with open(self.ct_file_1, 'rt') as fh, gzip.open(
self.out_counts_file, 'rt') as ofh:
exp = fh.read()
found = ofh.read()
self.assertEqual(exp, found)
os.remove(self.out_counts_file)
def test_merge_files_two(self):
merge_files([self.ct_file_1, self.ct_file_2],
self.out_counts_file, logger)
with gzip.open(self.exp_counts_file, 'rt') as fh, gzip.open(
self.out_counts_file, 'rt') as ofh:
exp = fh.read()
found = ofh.read()
self.assertEqual(exp, found)
os.remove(self.out_counts_file)
def setUp(self):
pass
def tearDown(self):
if os.path.exists(self.out_counts_file):
os.remove(self.out_counts_file)
|
import cairo
import os
from libqtile import bar
import base
BAT_DIR = '/sys/class/power_supply'
CHARGED = 'Full'
CHARGING = 'Charging'
DISCHARGING = 'Discharging'
UNKNOWN = 'Unknown'
BATTERY_INFO_FILES = {
'energy_now_file': ['energy_now', 'charge_now'],
'energy_full_file': ['energy_full', 'charge_full'],
'power_now_file': ['power_now', 'current_now'],
'status_file': ['status'],
}
def default_icon_path():
# default icons are in libqtile/resources/battery-icons
root = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])
return os.path.join(root, 'resources', 'battery-icons')
class _Battery(base._TextBox):
''' Base battery class '''
filenames = {}
defaults = [
('battery_name', 'BAT0', 'ACPI name of a battery, usually BAT0'),
(
'status_file',
'status',
'Name of status file in'
' /sys/class/power_supply/battery_name'
),
(
'energy_now_file',
None,
'Name of file with the '
'current energy in /sys/class/power_supply/battery_name'
),
(
'energy_full_file',
None,
'Name of file with the maximum'
' energy in /sys/class/power_supply/battery_name'
),
(
'power_now_file',
None,
'Name of file with the current'
' power draw in /sys/class/power_supply/battery_name'
),
('update_delay', 1, 'The delay in seconds between updates'),
]
def __init__(self, **config):
base._TextBox.__init__(self, "BAT", bar.CALCULATED, **config)
self.add_defaults(_Battery.defaults)
def _load_file(self, name):
try:
path = os.path.join(BAT_DIR, self.battery_name, name)
with open(path, 'r') as f:
return f.read().strip()
except IOError:
if name == 'current_now':
return 0
return False
except Exception:
self.log.exception("Failed to get %s" % name)
def _get_param(self, name):
if name in self.filenames:
return self._load_file(self.filenames[name])
else:
## Don't have the file name cached, figure it out
file_list = BATTERY_INFO_FILES.get(name, [])
if getattr(self, name, None):
## If a file is manually specified, check it first
file_list.insert(0, getattr(self, name))
## Iterate over the possibilities, and return the first valid value
for file in file_list:
value = self._load_file(file)
if not (value in (False, None)):
self.filenames[name] = file
return value
## If we made it this far, we don't have a valid file. Just return 0.
return 0
def _get_info(self):
try:
info = {
'stat': self._get_param('status_file'),
'now': float(self._get_param('energy_now_file')),
'full': float(self._get_param('energy_full_file')),
'power': float(self._get_param('power_now_file')),
}
except TypeError:
return False
return info
class Battery(_Battery):
"""
A simple but flexible text-based battery widget.
"""
defaults = [
('low_foreground', 'FF0000', 'font color when battery is low'),
(
'format',
'{char} {percent:2.0%} {hour:d}:{min:02d}',
'Display format'
),
('charge_char', '^', 'Character to indicate the battery is charging'),
(
'discharge_char',
'V',
'Character to indicate the battery'
' is discharging'
),
(
'low_percentage',
0.10,
"0 < x < 1 at which to indicate battery is low with low_foreground"
),
('hide_threshold', None, 'Hide the text when there is enough energy'),
]
def __init__(self, **config):
_Battery.__init__(self, **config)
self.add_defaults(Battery.defaults)
self.timeout_add(self.update_delay, self.update)
self.update()
def _get_text(self):
info = self._get_info()
if info is False:
return 'Error'
## Set the charging character
try:
# hide the text when it's higher than threshold, but still
# display `full` when the battery is fully charged.
if self.hide_threshold and \
info['now'] / info['full'] * 100.0 >= \
self.hide_threshold and \
info['stat'] != CHARGED:
return ''
elif info['stat'] == DISCHARGING:
char = self.discharge_char
time = info['now'] / info['power']
elif info['stat'] == CHARGING:
char = self.charge_char
time = (info['full'] - info['now']) / info['power']
else:
return 'Full'
except ZeroDivisionError:
time = -1
## Calculate the battery percentage and time left
if time >= 0:
hour = int(time)
min = int(time * 60) % 60
else:
hour = -1
min = -1
percent = info['now'] / info['full']
if info['stat'] == DISCHARGING and percent < self.low_percentage:
self.layout.colour = self.low_foreground
else:
self.layout.colour = self.foreground
return self.format.format(
char=char,
percent=percent,
hour=hour,
min=min
)
def update(self):
if self.configured:
ntext = self._get_text()
if ntext != self.text:
self.text = ntext
self.bar.draw()
return True
class BatteryIcon(_Battery):
''' Battery life indicator widget '''
defaults = [
('theme_path', default_icon_path(), 'Path of the icons'),
('custom_icons', {}, 'dict containing key->filename icon map'),
]
def __init__(self, **config):
_Battery.__init__(self, **config)
self.add_defaults(BatteryIcon.defaults)
if self.theme_path:
self.width_type = bar.STATIC
self.width = 0
self.surfaces = {}
self.current_icon = 'battery-missing'
self.icons = dict([(x, '{0}.png'.format(x)) for x in (
'battery-missing',
'battery-caution',
'battery-low',
'battery-good',
'battery-full',
'battery-caution-charging',
'battery-low-charging',
'battery-good-charging',
'battery-full-charging',
'battery-full-charged',
)])
self.icons.update(self.custom_icons)
self.timeout_add(self.update_delay, self.update)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
self.setup_images()
def _get_icon_key(self):
key = 'battery'
info = self._get_info()
if info is False or not info.get('full'):
key += '-missing'
else:
percent = info['now'] / info['full']
if percent < .2:
key += '-caution'
elif percent < .4:
key += '-low'
elif percent < .8:
key += '-good'
else:
key += '-full'
if info['stat'] == CHARGING:
key += '-charging'
elif info['stat'] == CHARGED:
key += '-charged'
return key
def update(self):
if self.configured:
icon = self._get_icon_key()
if icon != self.current_icon:
self.current_icon = icon
self.draw()
return True
def draw(self):
if self.theme_path:
self.drawer.clear(self.background or self.bar.background)
self.drawer.ctx.set_source(self.surfaces[self.current_icon])
self.drawer.ctx.paint()
self.drawer.draw(self.offset, self.width)
else:
self.text = self.current_icon[8:]
base._TextBox.draw(self)
def setup_images(self):
for key, name in self.icons.iteritems():
try:
path = os.path.join(self.theme_path, name)
img = cairo.ImageSurface.create_from_png(path)
except cairo.Error:
self.theme_path = None
self.qtile.log.warning('Battery Icon switching to text mode')
return
input_width = img.get_width()
input_height = img.get_height()
sp = input_height / float(self.bar.height - 1)
width = input_width / sp
if width > self.width:
self.width = int(width) + self.actual_padding * 2
imgpat = cairo.SurfacePattern(img)
scaler = cairo.Matrix()
scaler.scale(sp, sp)
scaler.translate(self.actual_padding * -1, 0)
imgpat.set_matrix(scaler)
imgpat.set_filter(cairo.FILTER_BEST)
self.surfaces[key] = imgpat
|
# create a path
path = BezierPath()
# move to a point
path.moveTo((100, 100))
# line to a point
path.lineTo((100, 200))
path.lineTo((120, 100))
# set stroke color to black
stroke(0)
# set no fill
fill(None)
# set the width of the stroke
strokeWidth(10)
# draw the path
drawPath(path)
# move the canvas
translate(100, 0)
# set a miter limit
miterLimit(50)
# draw the same path again
drawPath(path)
|
#!/usr/bin/env python
#
# A lightweight Telegram Bot running on Flask
#
# Copyright 2020 Rodion Nehoroshev <rodion.nehoroshev@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Telegram bot command data model"""
from typing import TYPE_CHECKING
from common.db import BaseModel, BigIntegerType, PersistableMixin, db
if TYPE_CHECKING:
from .telegram_bot import TelegramBot # noqa: F401
class BotCommand(BaseModel, PersistableMixin):
"""BotCommand object model from Telegram Bot API
https://core.telegram.org/bots/api#botcommand
This object represents a bot command.
"""
__tablename__ = "bot_command"
bot_id = db.Column(
BigIntegerType,
db.ForeignKey("telegram_bot.user_id"),
primary_key=True,
index=True,
autoincrement=False,
)
command = db.Column(db.String(50), primary_key=True, index=True, autoincrement=False)
description = db.Column(db.String(500))
bot = db.relationship("TelegramBot", back_populates="commands")
@staticmethod
def from_dict(d: dict, bot_id: int) -> "BotCommand": # pylint: disable=invalid-name
"""Constructs an object from a dict
:param d: A dictionary to construct the object from. Should contain a valid primary key
and an arbitrary number of model attributes. If an object with the given primary key
is found in the persistent storage, it will first be loaded, otherwise a new object
with default attribute values will be created. After that, any attribute values present
in `d` will update corresponding object fields.
:param bot_id: ID of a bot that owns the command
:return: An up-to-date data object.
"""
cmd = BotCommand.query.filter_by(
bot_id=bot_id, command=d["command"]
).first() or BotCommand(bot_id=bot_id, command=d["command"])
cmd.description = d.get("description", None)
return cmd
def to_dict(self) -> dict:
"""Returns a dict representation of an object"""
return {"command": self.command, "description": self.description}
BaseModel.BotCommand = BotCommand
|
import json
import os
import boto3
from botocore.exceptions import ClientError, ProfileNotFound
from tweet.twitter import TwitterCredentials, TwitterClient
def get_secret(boto3_session, secret_name):
client = boto3_session.client(
service_name='secretsmanager',
region_name=boto3_session.region_name,
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
print("The requested secret " + secret_name + " was not found")
elif e.response['Error']['Code'] == 'InvalidRequestException':
print("The request was invalid due to:", e)
elif e.response['Error']['Code'] == 'InvalidParameterException':
print("The request had invalid params:", e)
else:
# Decrypted secret using the associated KMS CMK
# Depending on whether the secret was a string or binary, one of these fields will be populated
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return secret
else:
binary_secret_data = get_secret_value_response['SecretBinary']
return binary_secret_data
def generate_boto3_session(aws_profile_name):
try:
boto3_session = boto3.Session(profile_name=aws_profile_name)
except ProfileNotFound:
region = os.environ['AWS_DEFAULT_REGION']
boto3_session = boto3.Session(region_name=region)
return boto3_session
def twitter_client_from_aws(boto3_session, aws_secret_name):
secret = json.loads(get_secret(boto3_session, aws_secret_name))
twitter_credentials = TwitterCredentials(secret['twitter-consumer-key'],
secret['twitter-consumer-secret'],
secret['twitter-access-token'],
secret['twitter-access-secret'])
return TwitterClient(twitter_credentials)
|
"""Amazon S3 Module."""
import concurrent.futures
import csv
import logging
import time
import uuid
from itertools import repeat
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import boto3 # type: ignore
import botocore.exceptions # type: ignore
import pandas as pd # type: ignore
import pandas.io.parsers # type: ignore
import pyarrow as pa # type: ignore
import pyarrow.lib # type: ignore
import pyarrow.parquet # type: ignore
import s3fs # type: ignore
from boto3.s3.transfer import TransferConfig # type: ignore
from pandas.io.common import infer_compression # type: ignore
from awswrangler import _data_types, _utils, catalog, exceptions
_COMPRESSION_2_EXT: Dict[Optional[str], str] = {None: "", "gzip": ".gz", "snappy": ".snappy"}
_logger: logging.Logger = logging.getLogger(__name__)
def get_bucket_region(bucket: str, boto3_session: Optional[boto3.Session] = None) -> str:
"""Get bucket region name.
Parameters
----------
bucket : str
Bucket name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
str
Region code (e.g. 'us-east-1').
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name')
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name', boto3_session=boto3.Session())
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
_logger.debug(f"bucket: {bucket}")
region: str = client_s3.get_bucket_location(Bucket=bucket)["LocationConstraint"]
region = "us-east-1" if region is None else region
_logger.debug(f"region: {region}")
return region
def does_object_exist(path: str, boto3_session: Optional[boto3.Session] = None) -> bool:
"""Check if object exists on S3.
Parameters
----------
path: str
S3 path (e.g. s3://bucket/key).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
bool
True if exists, False otherwise.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real')
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal')
False
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real', boto3_session=boto3.Session())
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal', boto3_session=boto3.Session())
False
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
bucket: str
key: str
bucket, key = path.replace("s3://", "").split("/", 1)
try:
client_s3.head_object(Bucket=bucket, Key=key)
return True
except botocore.exceptions.ClientError as ex:
if ex.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return False
raise ex # pragma: no cover
def list_objects(path: str, boto3_session: Optional[boto3.Session] = None) -> List[str]:
"""List Amazon S3 objects from a prefix.
Parameters
----------
path : str
S3 path (e.g. s3://bucket/prefix).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of objects paths.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix')
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix', boto3_session=boto3.Session())
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
paginator = client_s3.get_paginator("list_objects_v2")
bucket: str
prefix: str
bucket, prefix = _utils.parse_path(path=path)
response_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, PaginationConfig={"PageSize": 1000})
paths: List[str] = []
for page in response_iterator:
contents: Optional[List] = page.get("Contents")
if contents is not None:
for content in contents:
if (content is not None) and ("Key" in content):
key: str = content["Key"]
paths.append(f"s3://{bucket}/{key}")
return paths
def _path2list(path: Union[str, List[str]], boto3_session: Optional[boto3.Session]) -> List[str]:
if isinstance(path, str): # prefix
paths: List[str] = list_objects(path=path, boto3_session=boto3_session)
elif isinstance(path, list):
paths = path
else:
raise exceptions.InvalidArgumentType(f"{type(path)} is not a valid path type. Please, use str or List[str].")
return paths
def delete_objects(
path: Union[str, List[str]], use_threads: bool = True, boto3_session: Optional[boto3.Session] = None
) -> None:
"""Delete Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.delete_objects(['s3://bucket/key0', 's3://bucket/key1']) # Delete both objects
>>> wr.s3.delete_objects('s3://bucket/prefix') # Delete all objects under the received prefix
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
buckets: Dict[str, List[str]] = _split_paths_by_bucket(paths=paths)
for bucket, keys in buckets.items():
chunks: List[List[str]] = _utils.chunkify(lst=keys, max_length=1_000)
if use_threads is False:
for chunk in chunks:
_delete_objects(bucket=bucket, keys=chunk, client_s3=client_s3)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
executor.map(_delete_objects, repeat(bucket), chunks, repeat(client_s3))
def _split_paths_by_bucket(paths: List[str]) -> Dict[str, List[str]]:
buckets: Dict[str, List[str]] = {}
bucket: str
key: str
for path in paths:
bucket, key = _utils.parse_path(path=path)
if bucket not in buckets:
buckets[bucket] = []
buckets[bucket].append(key)
return buckets
def _delete_objects(bucket: str, keys: List[str], client_s3: boto3.client) -> None:
_logger.debug(f"len(keys): {len(keys)}")
batch: List[Dict[str, str]] = [{"Key": key} for key in keys]
client_s3.delete_objects(Bucket=bucket, Delete={"Objects": batch})
def describe_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Dict[str, Any]]:
"""Describe Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Fetch attributes like ContentLength, DeleteMarker, LastModified, ContentType, etc
The full list of attributes can be explored under the boto3 head_object documentation:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Dict[str, Any]]
Return a dictionary of objects returned from head_objects where the key is the object path.
The response object can be explored here:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Examples
--------
>>> import awswrangler as wr
>>> descs0 = wr.s3.describe_objects(['s3://bucket/key0', 's3://bucket/key1']) # Describe both objects
>>> descs1 = wr.s3.describe_objects('s3://bucket/prefix') # Describe all objects under the prefix
>>> descs2 = wr.s3.describe_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return {}
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resp_list: List[Tuple[str, Dict[str, Any]]]
if use_threads is False:
resp_list = [_describe_object(path=p, wait_time=wait_time, client_s3=client_s3) for p in paths]
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
resp_list = list(executor.map(_describe_object, paths, repeat(wait_time), repeat(client_s3)))
desc_list: Dict[str, Dict[str, Any]] = dict(resp_list)
return desc_list
def _describe_object(
path: str, wait_time: Optional[Union[int, float]], client_s3: boto3.client
) -> Tuple[str, Dict[str, Any]]:
wait_time = int(wait_time) if isinstance(wait_time, float) else wait_time
tries: int = wait_time if (wait_time is not None) and (wait_time > 0) else 1
bucket: str
key: str
bucket, key = _utils.parse_path(path=path)
desc: Dict[str, Any] = {}
for i in range(tries, 0, -1):
try:
desc = client_s3.head_object(Bucket=bucket, Key=key)
break
except botocore.exceptions.ClientError as e: # pragma: no cover
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404: # Not Found
_logger.debug(f"Object not found. {i} seconds remaining to wait.")
if i == 1: # Last try, there is no more need to sleep
break
time.sleep(1)
else:
raise e
return path, desc
def size_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Optional[int]]:
"""Get the size (ContentLength) in bytes of Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Optional[int]]
Dictionary where the key is the object path and the value is the object size.
Examples
--------
>>> import awswrangler as wr
>>> sizes0 = wr.s3.size_objects(['s3://bucket/key0', 's3://bucket/key1']) # Get the sizes of both objects
>>> sizes1 = wr.s3.size_objects('s3://bucket/prefix') # Get the sizes of all objects under the received prefix
>>> sizes2 = wr.s3.size_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
desc_list: Dict[str, Dict[str, Any]] = describe_objects(
path=path, wait_time=wait_time, use_threads=use_threads, boto3_session=boto3_session
)
size_list: Dict[str, Optional[int]] = {k: d.get("ContentLength", None) for k, d in desc_list.items()}
return size_list
def to_csv( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
sep: str = ",",
index: bool = True,
columns: Optional[List[str]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write CSV file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
sep : str
String of length 1. Field delimiter for the output file.
index : bool
Write row names (index).
columns : List[str], optional
Columns to write.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html
Returns
-------
None
None.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.csv'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
pandas_kwargs["sep"] = sep
pandas_kwargs["index"] = index
pandas_kwargs["columns"] = columns
_to_text(file_format="csv", df=df, path=path, fs=fs, **pandas_kwargs)
paths = [path]
else:
mode = "append" if mode is None else mode
exist: bool = False
if columns:
df = df[columns]
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_csv_dataset(
df=df,
path=path,
index=index,
sep=sep,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype, index_left=True
)
if (exist is False) or (mode == "overwrite"):
catalog.create_csv_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
sep=sep,
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_csv_partitions(
database=database, table=table, partitions_values=partitions_values, boto3_session=session, sep=sep
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_csv_dataset(
df: pd.DataFrame,
path: str,
index: bool,
sep: str,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
_logger.debug(f"dtypes: {df.dtypes}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=df,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=subgroup,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def to_json(
df: pd.DataFrame,
path: str,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
"""Write JSON file on Amazon S3.
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html
Returns
-------
None
None.
Examples
--------
Writing JSON file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... )
Writing CSV file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
"""
return _to_text(
file_format="json",
df=df,
path=path,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
**pandas_kwargs,
)
def _to_text(
file_format: str,
df: pd.DataFrame,
path: str,
fs: Optional[s3fs.S3FileSystem] = None,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
if df.empty is True: # pragma: no cover
raise exceptions.EmptyDataFrame()
if fs is None:
fs = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
with fs.open(path, "w") as f:
if file_format == "csv":
df.to_csv(f, **pandas_kwargs)
elif file_format == "json":
df.to_json(f, **pandas_kwargs)
def to_parquet( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
index: bool = False,
compression: Optional[str] = "snappy",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write Parquet file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``).
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
Returns
-------
Dict[str, Union[List[str], Dict[str, List[str]]]]
Dictionary with:
'paths': List of all stored files paths on S3.
'partitions_values': Dictionary of partitions added with keys as S3 path locations
and values as a list of partitions values as str.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.parquet'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
compression_ext: Optional[str] = _COMPRESSION_2_EXT.get(compression, None)
if compression_ext is None:
raise exceptions.InvalidCompression(f"{compression} is invalid, please use None, snappy or gzip.")
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
paths = [
_to_parquet_file(
df=df, path=path, schema=None, index=index, compression=compression, cpus=cpus, fs=fs, dtype={}
)
]
else:
mode = "append" if mode is None else mode
exist: bool = False
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_parquet_dataset(
df=df,
path=path,
index=index,
compression=compression,
compression_ext=compression_ext,
cpus=cpus,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype
)
if (exist is False) or (mode == "overwrite"):
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
compression=compression,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_parquet_dataset(
df: pd.DataFrame,
path: str,
index: bool,
compression: Optional[str],
compression_ext: str,
cpus: int,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(
df=df, index=index, ignore_cols=partition_cols, dtype=dtype
)
_logger.debug(f"schema: {schema}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=df, schema=schema, path=file_path, index=index, compression=compression, cpus=cpus, fs=fs, dtype=dtype
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=subgroup,
schema=schema,
path=file_path,
index=index,
compression=compression,
cpus=cpus,
fs=fs,
dtype=dtype,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def _to_parquet_file(
df: pd.DataFrame,
path: str,
schema: pa.Schema,
index: bool,
compression: Optional[str],
cpus: int,
fs: s3fs.S3FileSystem,
dtype: Dict[str, str],
) -> str:
table: pa.Table = pyarrow.Table.from_pandas(df=df, schema=schema, nthreads=cpus, preserve_index=index, safe=True)
for col_name, col_type in dtype.items():
if col_name in table.column_names:
col_index = table.column_names.index(col_name)
pyarrow_dtype = _data_types.athena2pyarrow(col_type)
field = pa.field(name=col_name, type=pyarrow_dtype)
table = table.set_column(col_index, field, table.column(col_name).cast(pyarrow_dtype))
_logger.debug(f"Casting column {col_name} ({col_index}) to {col_type} ({pyarrow_dtype})")
pyarrow.parquet.write_table(
table=table,
where=path,
write_statistics=True,
use_dictionary=True,
filesystem=fs,
coerce_timestamps="ms",
compression=compression,
flavor="spark",
)
return path
def read_csv(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read CSV file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_csv().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all CSV files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path='s3://bucket/prefix/')
Reading all CSV files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all CSV files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_csv,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_fwf(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read fixed-width formatted file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_fwf().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_fwf.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all fixed-width formatted (FWF) files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path='s3://bucket/prefix/')
Reading all fixed-width formatted (FWF) files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all fixed-width formatted (FWF) files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_fwf,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_json(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read JSON file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_json().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_json.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all JSON files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path='s3://bucket/prefix/')
Reading all JSON files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_json(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all JSON files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_json,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def _read_text(
parser_func: Callable,
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
if "iterator" in pandas_kwargs:
raise exceptions.InvalidArgument("Please, use chunksize instead of iterator.")
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if chunksize is not None:
dfs: Iterator[pd.DataFrame] = _read_text_chunksize(
parser_func=parser_func,
paths=paths,
boto3_session=boto3_session,
chunksize=chunksize,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
return dfs
if use_threads is False:
df: pd.DataFrame = pd.concat(
objs=[
_read_text_full(
parser_func=parser_func,
path=p,
boto3_session=boto3_session,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
for p in paths
],
ignore_index=True,
sort=False,
)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
df = pd.concat(
objs=executor.map(
_read_text_full,
repeat(parser_func),
paths,
repeat(boto3_session),
repeat(pandas_kwargs),
repeat(s3_additional_kwargs),
),
ignore_index=True,
sort=False,
)
return df
def _read_text_chunksize(
parser_func: Callable,
paths: List[str],
boto3_session: boto3.Session,
chunksize: int,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Iterator[pd.DataFrame]:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
for path in paths:
_logger.debug(f"path: {path}")
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
reader: pandas.io.parsers.TextFileReader = parser_func(f, chunksize=chunksize, **pandas_args)
for df in reader:
yield df
def _read_text_full(
parser_func: Callable,
path: str,
boto3_session: boto3.Session,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pd.DataFrame:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
return parser_func(f, **pandas_args)
def _read_parquet_init(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
categories: List[str] = None,
validate_schema: bool = True,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pyarrow.parquet.ParquetDataset:
"""Encapsulate all initialization before the use of the pyarrow.parquet.ParquetDataset."""
if dataset is False:
path_or_paths: Union[str, List[str]] = _path2list(path=path, boto3_session=boto3_session)
elif isinstance(path, str):
path_or_paths = path[:-1] if path.endswith("/") else path
else:
path_or_paths = path
_logger.debug(f"path_or_paths: {path_or_paths}")
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
data: pyarrow.parquet.ParquetDataset = pyarrow.parquet.ParquetDataset(
path_or_paths=path_or_paths,
filesystem=fs,
metadata_nthreads=cpus,
filters=filters,
read_dictionary=categories,
validate_schema=validate_schema,
)
return data
def read_parquet(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
validate_schema: bool = True,
chunked: bool = False,
dataset: bool = False,
categories: List[str] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet file(s) from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
validate_schema:
Check that individual file schemas are all the same / compatible. Schemas within a
folder prefix should all be the same. Disable if you have schemas that are different
and want to disable this check.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading all Parquet files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path='s3://bucket/prefix/')
Reading all Parquet files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all Parquet files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path=['s3://bucket/filename0.parquet', 's3://bucket/filename1.parquet'])
Reading in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path,
filters=filters,
dataset=dataset,
categories=categories,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
validate_schema=validate_schema,
)
if chunked is False:
return _read_parquet(
data=data, columns=columns, categories=categories, use_threads=use_threads, validate_schema=validate_schema
)
return _read_parquet_chunked(data=data, columns=columns, categories=categories, use_threads=use_threads)
def _read_parquet(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
validate_schema: bool = True,
) -> pd.DataFrame:
tables: List[pa.Table] = []
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
tables.append(table)
promote: bool = not validate_schema
table = pa.lib.concat_tables(tables, promote=promote)
return table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def _read_parquet_chunked(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
) -> Iterator[pd.DataFrame]:
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
yield table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def read_parquet_metadata(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]]]:
"""Read Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]]]
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}).
Examples
--------
Reading all Parquet files (with partitions) metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path='s3://bucket/prefix/', dataset=True)
Reading all Parquet files metadata from a list
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path=[
... 's3://bucket/filename0.parquet',
... 's3://bucket/filename1.parquet'
... ])
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=boto3_session
)
return _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=data.partitions
)
def store_parquet_metadata(
path: str,
database: str,
table: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
compression: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]:
"""Infer and store parquet metadata on AWS Glue Catalog.
Infer Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths
And then stores it on AWS Glue Catalog including all inferred partitions
(No need of 'MCSK REPAIR TABLE')
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
database : str
Glue/Athena catalog: Database name.
table : str
Glue/Athena catalog: Table name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``, etc).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]
The metadata used to create the Glue Table.
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}). /
partitions_values: Dictionary with keys as S3 path locations and values as a
list of partitions values as str (e.g. {'s3://bucket/prefix/y=2020/m=10/': ['2020', '10']}).
Examples
--------
Reading all Parquet files metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types, partitions_values = wr.s3.store_parquet_metadata(
... path='s3://bucket/prefix/',
... database='...',
... table='...',
... dataset=True
... )
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=session
)
partitions: Optional[pyarrow.parquet.ParquetPartitions] = data.partitions
columns_types, partitions_types = _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=partitions
)
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
)
partitions_values: Dict[str, List[str]] = _data_types.athena_partitions_from_pyarrow_partitions(
path=path, partitions=partitions
)
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return columns_types, partitions_types, partitions_values
def wait_objects_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects
"""
return _wait_objects(
waiter_name="object_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def wait_objects_not_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects not exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectNotExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_not_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects not exist
"""
return _wait_objects(
waiter_name="object_not_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def _wait_objects(
waiter_name: str,
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
delay = 5 if delay is None else delay
max_attempts = 20 if max_attempts is None else max_attempts
_delay: int = int(delay) if isinstance(delay, float) else delay
if len(paths) < 1:
return None
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
waiter = client_s3.get_waiter(waiter_name)
_paths: List[Tuple[str, str]] = [_utils.parse_path(path=p) for p in paths]
if use_threads is False:
for bucket, key in _paths:
waiter.wait(Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts})
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
futures: List[concurrent.futures.Future] = []
for bucket, key in _paths:
future: concurrent.futures.Future = executor.submit(
fn=waiter.wait, Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts}
)
futures.append(future)
for future in futures:
future.result()
return None
def read_parquet_table(
table: str,
database: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
categories: List[str] = None,
chunked: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet table registered on AWS Glue Catalog.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
table : str
AWS Glue Catalog table name.
database : str
AWS Glue Catalog database name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading Parquet Table
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(database='...', table='...')
Reading Parquet Table encrypted
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(
... database='...',
... table='...'
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading Parquet Table in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet_table(database='...', table='...', chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
path: str = catalog.get_table_location(database=database, table=table, boto3_session=boto3_session)
return read_parquet(
path=path,
filters=filters,
columns=columns,
categories=categories,
chunked=chunked,
dataset=True,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
)
def merge_datasets(
source_path: str,
target_path: str,
mode: str = "append",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Merge a source dataset into a target dataset.
Note
----
If you are merging tables (S3 datasets + Glue Catalog metadata),
remember that you will also need to update your partitions metadata in some cases.
(e.g. wr.athena.repair_table(table='...', database='...'))
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.merge_datasets(
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... mode="append"
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
paths: List[str] = list_objects(path=f"{source_path}/", boto3_session=session)
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
if mode == "overwrite":
_logger.debug(f"Deleting to overwrite: {target_path}/")
delete_objects(path=f"{target_path}/", use_threads=use_threads, boto3_session=session)
elif mode == "overwrite_partitions":
paths_wo_prefix: List[str] = [x.replace(f"{source_path}/", "") for x in paths]
paths_wo_filename: List[str] = [f"{x.rpartition('/')[0]}/" for x in paths_wo_prefix]
partitions_paths: List[str] = list(set(paths_wo_filename))
target_partitions_paths = [f"{target_path}/{x}" for x in partitions_paths]
for path in target_partitions_paths:
_logger.debug(f"Deleting to overwrite_partitions: {path}")
delete_objects(path=path, use_threads=use_threads, boto3_session=session)
elif mode != "append":
raise exceptions.InvalidArgumentValue(f"{mode} is a invalid mode option.")
new_objects: List[str] = copy_objects(
paths=paths, source_path=source_path, target_path=target_path, use_threads=use_threads, boto3_session=session
)
_logger.debug(f"len(new_objects): {len(new_objects)}")
return new_objects
def copy_objects(
paths: List[str],
source_path: str,
target_path: str,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Copy a list of S3 objects to another S3 directory.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/dir0/key0, s3://bucket/dir0/key1]).
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.copy_objects(
... paths=["s3://bucket0/dir0/key0", "s3://bucket0/dir0/key1"])
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
batch: List[Tuple[str, str]] = []
new_objects: List[str] = []
for path in paths:
path_wo_prefix: str = path.replace(f"{source_path}/", "")
path_final: str = f"{target_path}/{path_wo_prefix}"
new_objects.append(path_final)
batch.append((path, path_final))
_logger.debug(f"len(new_objects): {len(new_objects)}")
_copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)
return new_objects
def _copy_objects(batch: List[Tuple[str, str]], use_threads: bool, boto3_session: boto3.Session) -> None:
_logger.debug(f"len(batch): {len(batch)}")
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resource_s3: boto3.resource = _utils.resource(service_name="s3", session=boto3_session)
for source, target in batch:
source_bucket, source_key = _utils.parse_path(path=source)
copy_source: Dict[str, str] = {"Bucket": source_bucket, "Key": source_key}
target_bucket, target_key = _utils.parse_path(path=target)
resource_s3.meta.client.copy(
CopySource=copy_source,
Bucket=target_bucket,
Key=target_key,
SourceClient=client_s3,
Config=TransferConfig(num_download_attempts=15, use_threads=use_threads),
)
|
"""
249. Count of Smaller Number before itself
https://www.lintcode.com/problem/count-of-smaller-number-before-itself/description
"""
import math
class Block:
def __init__(self):
self.count = {}
self.total = 0
class BlockArray:
def __init__(self, array_range):
self.array_size = int(math.sqrt(array_range)) + 1
self.blocks = [
Block() for _ in range(self.array_size)
]
def add(self, num):
array_index = num // self.array_size
index_in_array = num % self.array_size
self.blocks[array_index].count[index_in_array] = self.blocks[array_index].count.get(index_in_array, 0) + 1
self.blocks[array_index].total += 1
def count_smaller(self, num):
array_index = num // self.array_size
sum_before_block = 0
for i in range(array_index):
sum_before_block += self.blocks[i].total
count_inside_block = 0
index_inside_block = num % self.array_size
for i in range(index_inside_block):
count_inside_block += self.blocks[array_index].count.get(i, 0)
return sum_before_block + count_inside_block
class Solution:
"""
@param A: an integer array
@return: A list of integers includes the index of the first number and the index of the last number
"""
def countOfSmallerNumberII(self, A):
# write your code here
ba = BlockArray(10000)
results = []
for num in A:
results.append(ba.count_smaller(num))
ba.add(num)
return results
|
# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See LICENSE
"""
frappe.coverage
~~~~~~~~~~~~~~~~
Coverage settings for frappe
"""
STANDARD_INCLUSIONS = ["*.py"]
STANDARD_EXCLUSIONS = [
"*.js",
"*.xml",
"*.pyc",
"*.css",
"*.less",
"*.scss",
"*.vue",
"*.html",
"*/test_*",
"*/node_modules/*",
"*/doctype/*/*_dashboard.py",
"*/patches/*",
]
FRAPPE_EXCLUSIONS = [
"*/tests/*",
"*/commands/*",
"*/frappe/change_log/*",
"*/frappe/exceptions*",
"*frappe/setup.py",
"*/doctype/*/*_dashboard.py",
"*/patches/*",
]
|
import pathlib
import time, os
def PunctuationRemover(wordsstring, normalise_spelling=False, return_list=True):
a = wordsstring
if normalise_spelling == True:
a = wordsstring.replace('oͤ', 'ö').replace('Oͤ', 'Ö').replace('aͤ', 'ä').replace('Aͤ', 'Ä').replace('uͤ', 'ü').replace('Uͤ', 'Ü')\
.replace('ſ', 's').replace('æ', 'ae').replace('ï', 'i').replace('ꝛc', 'etc').replace('Ï', 'I').replace('Æ', 'Ae').replace('/', ' ')\
.replace('-', '').replace('̃n', 'n').replace('̃N', 'N')
translator = str.maketrans('', '', string.punctuation) # This uses the 3-argument version of str.maketrans with arguments (x, y, z) where 'x' and 'y' must be equal-length strings and characters in 'x' are replaced by characters in 'y'. 'z' is a string (string.punctuation here) where each character in the string is mapped to None f.translate(str.maketrans("",""), string.punctuation) #then the translator is passed on to the translate function in string
g = a.translate(translator)
h = re.sub('(«|»)', '', g)
if return_list == True:
h = h.split()
return h
def GermanCityDeLatiniser(wordstring):
string = ''
for word in wordstring.split():
if word in gss.Latin_to_German_cities:
string += gss.Latin_to_German_cities.get(word) + ' '
else:
string += word + ' '
return string
def WordLabeler(wordlist): #make sure punctuation is removed in the wordlist, otherwise it won't return, say, a city if is followed by punctuation
h = [] #ultimately for this to work, the wordlist has to go through the suffixremover first## should the suffixremover be built in?
# lang = [x for x in gss.languages[0]] #list comprehension is needed because str(gss.languages) would match any string-part of the languages in the language list with the word (e.g. 'ich' in 'Österreich'). The [0] is needed because the countries are read as a list from the file via list comprehension which creates a list of lists. Thus e.g. countries[0] is the first list (of the countries list) which is the list containing all countries.
coun = [x for x in gss.countries.split()]
ita = [x for x in gss.Italian_cities.split()]
fre = [x for x in gss.French_cities.split()]
ger = [x for x in gss.German_cities.split()]
aus = [x for x in gss.Austrian_cities.split()]
reg = [x for x in gss.German_regions.split()]
tc = gss.territorial_classifiers#[x for x in gss.territorial_classifiers]
# dia = [x for x in gss.German_dialects.split()]
uk = [x for x in gss.UK_cities.split()]
pol = [x for x in gss.Polish_cities.split()]
swiss = [x for x in gss.Swiss_cities.split()]
span = [x for x in gss.Spanish_cities.split()]
spec = [x for x in gss.special_cases.split()]
for index, word in enumerate((wordlist)): #should this be build in as a second-order conditional?: 'or wordlist[index - 1] not in gss.loc_prepositions and wordlist[index - 1] not in gss.loc_prepositions_caps'
# if word in lang:
# h.extend(('LANGUAGE:', word))
if word in spec:
if wordlist[index - 8: index + 8] in tc or wordlist[index - 1] in gss.loc_prepositions \
or wordlist[index - 1] in gss.loc_prepositions_caps:
h.extend(('GERMAN_CITY:', word))
if word[:-1] in spec:
if wordlist[index - 8: index + 8] in tc or wordlist[index - 1] in gss.loc_prepositions \
or wordlist[index - 1] in gss.loc_prepositions_caps:
h.extend(('GERMAN_CITY:', word[:-1]))
if wordlist[index - 1] not in gss.determiners and wordlist[index - 1] not in tc \
or wordlist[index-5: index+5] in tc: #to check whether the preceding word is a determiner, which would likely indicate that it is not a city (e.g. 'Essen' == Stadt, 'das Essen'== food) or a territorial classifiers are used in the surroundings of the word
if word in ger:
h.extend(('GERMAN_CITY:', word))
if word in reg:
h.extend(('REGION_IN_GERMANY:', word))
if word in swiss:
h.extend(('SWISS_CITY:', word))
if word in pol:
h.extend(('POLISH_CITY:', word))
if word[-1:] == 's': #word[:-1] removes the last letter of word to check whether the word is used in genitive case with added 's'
if word[:-1] in ger:
h.extend(('GERMAN_CITY:', word[:-1]))
if word[:-1] in reg:
h.extend(('REGION_IN_GERMANY:', word[:-1]))
if word[:-1] in coun:
h.extend(('COUNTRY:', word[:-1]))
if word[:-1] in span:
h.extend(('SPANISH_CITY:', word[:-1]))
if word[:-1] in aus:
h.extend(('AUSTRIAN_CITY:', word[:-1]))
if word[:-1] in fre:
h.extend(('FRENCH_CITY:', word[:-1]))
if word[:-1] in ita:
h.extend(('ITALIAN_CITY:', word[:-1]))
if word[:-1] in uk:
h.extend(('UK_CITY:', word[:-1]))
if word[:-1] in swiss:
h.extend(('SWISS_CITY:', word[:-1]))
if word[:-1] in pol:
h.extend(('POLISH_CITY:', word[:-1]))
if word in span:
h.extend(('SPANISH_CITY:', word))
if word in aus:
h.extend(('AUSTRIAN_CITY:', word))
if word in fre:
h.extend(('FRENCH_CITY:', word))
if word in ita:
h.extend(('ITALIAN_CITY:', word))
if word in uk:
h.extend(('UK_CITY:', word))
if word in coun:
h.extend(('COUNTRY:', word))
# if word in dia:
# h.extend(('GERMAN_DIALECT:', word))
# if word in tc:
# h.extend(('TERRITORIAL CLASSIFICATION:', word))
# elif word not in h: #comment these two lines for just the list, otherwise gives complete list, not just langs, countries etc.
# h.append(word) #comment these two lines for just the list, otherwise gives complete list, not just langs, countries etc.
# return h
##uncomment all of below, if above lines are commented and vice versa
l = []
counter = 1
for i in range(1, len(h), 2):
if h[i] == '/':
continue
unique = True
l.append(h[0 + i - 1]) # appends the classifier
l.append(h[i]) # append the element in above-specified range (2n+1)
counter = 1
for j in range(i + 2, len(h), 2): # then iterate over all the remaining (2n+1)+2 elements
if h[i] == h[j]:
h[j] = '/'
unique = False
counter += 1
if unique == False: # this operation cannot be integrated into the 2nd loop (e.g. by l[i] = '/'), because then it would not go through the entire list with j to find all the matches with i. so in the following list [0, 'a', 2, 'b', 4, 'b', 6, 'c', 8, 'c', 10, 'a', 12, 'a'] the 'a' at index 11 would be matched in the j loop by the 'a' in index 2, but the 'a' in index 13 would not be ranged over anymore if after the match between index 2 and 11 l[i] would already be appended and replaced
h[i] = '/'
l.append(counter) # to count how many times l[i] and l[j] were matched
return l
def GP_parser(path_to_files: str, csv_filename: str, file_path_text_cleaner_output, text_cleaner=True, csv_header=False, author=False, title=False,
geodata=False, word_count=False, skip_translations=True, flush_buffer_n_write_to_file=True):
###for text cleaning###
most_common_engl_words_custom = 'the be any to of Gutenberg before downloading, copying, displaying, performing, distributing copying, distributing, performing, THE FULL PROJECT GUTENBERG LICENSE permission and without paying copyright royalties. Special rules, redistribution. displaying See paragraph electronic creating derivative particular state visit ways including http://www.gutenberg.net West, Salt Lake City, including checks License Dr. Gregory B. Newby Chief Executive and Director gbnewby@pglaf.org as specified Literary Archive Foundation For additional contact information page often in several formats including plain compressed others online payments please visit works and credit card donations To donate array of equipment including outdated equipment Many small donations Project provide by produce provided produced and a that have I it is was for not on with he as you do at this but his by from they we say her she or of if even an will my one all would there their what so up out if about who get which go me when make can like time no just him know take people into year your good some could them see other than then now look only come its over think also back after use two how our work first well way even new want because any these give day most us'
set_most_common_engl_words_custom = set([x for x in most_common_engl_words_custom.split()])
with open(csv_filename, "a", encoding='utf8', errors='ignore') as a:
with open(csv_filename, "r", encoding='utf8', errors='ignore') as r:
###csv header###
id = -1
counter = -1 #for stop-n-go parsing: to continue from last parsing step
lines = []
for line in r:
counter += 1
print(counter)
lines.append(line.split(';')) #the csv separator is an underscore instead of a comma to avoid possible conflicts with commas in the xml and the geodata lists, which contain commas
if csv_header:
try:
if lines[0][0] == 'id':
pass
except:
csv_head = 'id'
if title:
csv_head += ';title'
if author:
csv_head += ';author name'
if not skip_translations:
csv_head += ';translator name'
if text_cleaner:
print(
'Note that the texts will be saved as numbered .txt files, followed by author name and title, from the csv in your CWD.')
if geodata:
csv_head += ';geodata'
print('Please make sure to first clean up the texts before extracting metadata and creating a csv. For parsing for metadata and create csv'
'run this parser again with the path_to_files directed ad the folder with the cleaned-up Gutenberg txts')
if word_count:
csv_head += ';word count of text'
csv_head += '\n'
a.write(csv_head)
###parser starts here###
for txt in pathlib.Path(path_to_files).iterdir():
if txt.is_file():
id += 1
athr = ''
gdata = ''
ttle = ''
string = ''
trnslator = ''
skipper = False
memoriser = True
memoriser2 = True
memoriser3 = True
if counter == -1 or counter == 0 or id >= counter: # to pick up at the last point
if geodata:
print('Parsing document ' + str(id) + ' for geodata')
start1 = time.time()
###csv metadata###
if title or author or geodata or word_count:
with open(txt, 'r', encoding='utf8') as g:
if word_count:
word_counter = 0
for line in g:
string += line
for i in range(len(line.split())):
if skip_translations:
if line.split()[i] == 'Translator:':
skipper = True
print('Skipping translation')
break
else:
if line.split()[i] == 'Translator:':
trnslator += ';(Translator:) '
trnslator += ' '.join(line.split()[i + 1:])
memoriser = False
if word_count:
word_counter += 1
if author:
if line.split()[i] == 'Author:':
athr += ';'
athr += ' '.join(line.split()[i + 1:])
memoriser2 = False
if title:
if line.split()[i] == 'Title:':
ttle += ';'
temp_string4 = ' '.join(line.split()[i + 1:])
temp_string4 = temp_string4.replace(';', '')
ttle += temp_string4
memoriser3 = False
if geodata and skipper == False:
try:
temp_string = PunctuationRemover(string, normalise_spelling=True, return_list=False)
temp_string2 = GermanCityDeLatiniser(temp_string)
temp_string3 = WordLabeler(temp_string2.split())
gdata += ';' + str(temp_string3)
except:
gdata += ';'
continue
###write metadata to file###
if skipper == False:
a.write(str(id))
if title:
if memoriser3:
ttle += ';-99999'
a.write(ttle)
if author:
if memoriser2:
athr += ';-99999'
a.write(athr)
if not skip_translations:
if memoriser:
trnslator += ';-99999'
a.write(trnslator)
if geodata:
a.write(gdata)
duration1 = time.time() - start1
print('finished parsing text for geolocations. duration:', duration1)
if word_count:
a.write(';' + str(word_counter))
a.write('\n')
if flush_buffer_n_write_to_file:
a.flush() # flushing internal buffers
os.fsync(a.fileno()) # force-writing buffers to file
###text###
if text_cleaner:
try:
with open(txt, 'r', encoding='utf8') as f:
txts = ''
ttle = ''
athr = ''
for line in f:
line_split = line.split()
for i in range(len(line_split)):
if line_split[i] == 'Title:':
ttle += ' '.join(line_split[i + 1:])
ttle = PunctuationRemover(ttle, normalise_spelling=True, return_list=False)
ttle = ttle.replace('(', ' ')
ttle = ttle.replace(')', ' ')
ttle = ttle.replace(' ', ' ')
ttle = ttle.replace(';', '.')
if line_split[i] == 'Author:':
athr += ', '
athr += ' '.join(line_split[i + 1:])
athr += ', '
counter = 0
for word in line.split():
if word.lower() in set_most_common_engl_words_custom or word in set_most_common_engl_words_custom or word.upper() in set_most_common_engl_words_custom:
counter += 1
if counter >= 2:
continue
if counter < 2:
txts += line
print('writing text ' + str(id) + ' to file')
try:
with open(file_path_text_cleaner_output + str(id) + str(athr) + str(ttle) + '.txt', 'w+', encoding='utf8', errors='ignore') as w:
w.write(str(txts))
except:
try:
with open(file_path_text_cleaner_output + str(id) + str(athr) + '.txt', 'w+', encoding='utf8', errors='ignore') as w:
w.write(str(txts))
except:
with open(file_path_text_cleaner_output + str(id) + '.txt', 'w+', encoding='utf8', errors='ignore') as w:
w.write(str(txts))
#in case encoding of text is different
except:
with open(txt, 'r', encoding='ISO-8859-1') as f:
txts = ''
ttles = ''
athrs = ''
for line in f:
line_split = line.split()
for i in range(len(line_split)):
if line_split[i] == 'Title:':
ttle += ' '.join(line_split[i + 1:])
ttle = PunctuationRemover(ttle, normalise_spelling=True, return_list=False)
ttle = ttle.replace('(', ' ')
ttle = ttle.replace(')', ' ')
ttle = ttle.replace(' ', ' ')
ttle = ttle.replace(';', '.')
if line_split[i] == 'Author:':
athr += ', '
athr += ' '.join(line_split[i + 1:])
athr += ', '
counter = 0
for word in line.split():
if word.lower() in set_most_common_engl_words_custom or word in set_most_common_engl_words_custom or word.upper() in set_most_common_engl_words_custom:
counter += 1
if counter >= 2:
continue
if counter < 2:
txts += line
print('writing text ' + str(id) + ' to file')
try:
with open(file_path_text_cleaner_output + str(id) + str(athr) + str(ttle) + '.txt', 'w+', encoding='utf8', errors='ignore') as w:
w.write(str(txts))
except:
try:
with open(file_path_text_cleaner_output + str(id) + str(athr) + '.txt', 'w+', encoding='utf8', errors='ignore') as w:
w.write(str(txts))
except:
with open(file_path_text_cleaner_output + str(id) + '.txt', 'w+', encoding='utf8', errors='ignore') as w:
w.write(str(txts))
path = './Gutenberg cleaned files/'
# path2 = './Gutenberg ebooks/'
csv = './DTA outputs/GP_csv.txt'
text_clnr_output = './Gutenberg cleaned files/'
x = GP_parser(path_to_files=path, csv_filename=csv, file_path_text_cleaner_output=text_clnr_output, csv_header=True, text_cleaner=False, author=True, title=True, word_count=True, geodata=True, skip_translations=False)
|
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from gensim.models import Word2Vec
import numpy as np
import gc
def train_word2vec(documents, embedding_dim):
"""
train word2vector over traning documents
Args:
documents (list): list of document
embedding_dim (int): outpu wordvector size
Returns:
word_vectors(dict): dict containing words and their respective vectors
"""
model = Word2Vec(documents, min_count=1, size=embedding_dim)
word_vectors = model.wv
del model
return word_vectors
def create_embedding_matrix(tokenizer, word_vectors, embedding_dim):
"""
Create embedding matrix containing word indexes and respective vectors from word vectors
Args:
tokenizer (keras.preprocessing.text.Tokenizer): keras tokenizer object containing word indexes
word_vectors (dict): dict containing word and their respective vectors
embedding_dim (int): dimention of word vector
Returns:
"""
nb_words = len(tokenizer.word_index) + 1
word_index = tokenizer.word_index
embedding_matrix = np.zeros((nb_words, embedding_dim))
print("Embedding matrix shape: %s" % str(embedding_matrix.shape))
for word, i in word_index.items():
try:
embedding_vector = word_vectors[word]
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
except KeyError:
print("vector not found for word - %s" % word)
print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))
return embedding_matrix
def word_embed_meta_data(documents, embedding_dim):
"""
Load tokenizer object for given vocabs list
Args:
documents (list): list of document
embedding_dim (int): embedding dimension
Returns:
tokenizer (keras.preprocessing.text.Tokenizer): keras tokenizer object
embedding_matrix (dict): dict with word_index and vector mapping
"""
documents = [x.lower().split() for x in documents]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(documents)
word_vector = train_word2vec(documents, embedding_dim)
embedding_matrix = create_embedding_matrix(tokenizer, word_vector, embedding_dim)
del word_vector
gc.collect()
return tokenizer, embedding_matrix
def create_train_dev_set(tokenizer, sentences_pair, is_similar, max_sequence_length, validation_split_ratio):
"""
Create training and validation dataset
Args:
tokenizer (keras.preprocessing.text.Tokenizer): keras tokenizer object
sentences_pair (list): list of tuple of sentences pairs
is_similar (list): list containing labels if respective sentences in sentence1 and sentence2
are same or not (1 if same else 0)
max_sequence_length (int): max sequence length of sentences to apply padding
validation_split_ratio (float): contain ratio to split training data into validation data
Returns:
train_data_1 (list): list of input features for training set from sentences1
train_data_2 (list): list of input features for training set from sentences2
labels_train (np.array): array containing similarity score for training data
leaks_train(np.array): array of training leaks features
val_data_1 (list): list of input features for validation set from sentences1
val_data_2 (list): list of input features for validation set from sentences1
labels_val (np.array): array containing similarity score for validation data
leaks_val (np.array): array of validation leaks features
"""
sentences1 = [x[0].lower() for x in sentences_pair]
sentences2 = [x[1].lower() for x in sentences_pair]
train_sequences_1 = tokenizer.texts_to_sequences(sentences1)
train_sequences_2 = tokenizer.texts_to_sequences(sentences2)
leaks = [[len(set(x1)), len(set(x2)), len(set(x1).intersection(x2))]
for x1, x2 in zip(train_sequences_1, train_sequences_2)]
train_padded_data_1 = pad_sequences(train_sequences_1, maxlen=max_sequence_length)
train_padded_data_2 = pad_sequences(train_sequences_2, maxlen=max_sequence_length)
train_labels = np.array(is_similar)
leaks = np.array(leaks)
shuffle_indices = np.random.permutation(np.arange(len(train_labels)))
train_data_1_shuffled = train_padded_data_1[shuffle_indices]
train_data_2_shuffled = train_padded_data_2[shuffle_indices]
train_labels_shuffled = train_labels[shuffle_indices]
leaks_shuffled = leaks[shuffle_indices]
dev_idx = max(1, int(len(train_labels_shuffled) * validation_split_ratio))
del train_padded_data_1
del train_padded_data_2
gc.collect()
train_data_1, val_data_1 = train_data_1_shuffled[:-dev_idx], train_data_1_shuffled[-dev_idx:]
train_data_2, val_data_2 = train_data_2_shuffled[:-dev_idx], train_data_2_shuffled[-dev_idx:]
labels_train, labels_val = train_labels_shuffled[:-dev_idx], train_labels_shuffled[-dev_idx:]
leaks_train, leaks_val = leaks_shuffled[:-dev_idx], leaks_shuffled[-dev_idx:]
return train_data_1, train_data_2, labels_train, leaks_train, val_data_1, val_data_2, labels_val, leaks_val
def create_test_data(tokenizer, test_sentences_pair, max_sequence_length):
"""
Create training and validation dataset
Args:
tokenizer (keras.preprocessing.text.Tokenizer): keras tokenizer object
test_sentences_pair (list): list of tuple of sentences pairs
max_sequence_length (int): max sequence length of sentences to apply padding
Returns:
test_data_1 (list): list of input features for training set from sentences1
test_data_2 (list): list of input features for training set from sentences2
"""
test_sentences1 = [x[0].lower() for x in test_sentences_pair]
test_sentences2 = [x[1].lower() for x in test_sentences_pair]
test_sequences_1 = tokenizer.texts_to_sequences(test_sentences1)
test_sequences_2 = tokenizer.texts_to_sequences(test_sentences2)
leaks_test = [[len(set(x1)), len(set(x2)), len(set(x1).intersection(x2))]
for x1, x2 in zip(test_sequences_1, test_sequences_2)]
leaks_test = np.array(leaks_test)
test_data_1 = pad_sequences(test_sequences_1, maxlen=max_sequence_length)
test_data_2 = pad_sequences(test_sequences_2, maxlen=max_sequence_length)
return test_data_1, test_data_2, leaks_test
|
import requests
import json
key = "9247ef2009ba4e86a10874c39a1f1868"
#relative path to your new query file
queryFilePath = '/path/to/your/query.json'
headerDict = {}
paramDict = {}
#baseUrl = 'https' + '://' + 'api.yuuvis.io'
objectId = '805715d6-9ed9-4069-8922-bb587ce9d652'
baseUrl = 'https' + '://' + 'api.yuuvis.io'
header_name = 'Content-Type'
headerDict['Content-Type'] = 'application/json'
header_name = 'Ocp-Apim-Subscription-Key'
headerDict['Ocp-Apim-Subscription-Key'] = key
session = requests.Session()
response = session.get(str(baseUrl+'/dms/objects/'+objectId+'/contents/file'), headers=headerDict)
print(response.text)
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from userena.forms import SignupForm
from .models import MyProfile
class SignupFormExtra(SignupForm):
blank_choice = (('', _('Please choose an option')),)
gender = forms.ChoiceField(label=_(u'gender'), choices=blank_choice+MyProfile.GENDER_CHOICES, required=False)
birthday = forms.DateField(label=_(u'birthday'), required=False)
education = forms.ChoiceField(label=_(u'education'), choices=blank_choice+MyProfile.EDUCATION_CHOICES, required=False)
annual_income = forms.ChoiceField(label=_(u'annual income'), choices=((0, _('Please choose an option')),)+MyProfile.ANNUAL_INCOME_CHOICES, required=False)
def __init__(self, *args, **kw):
"""
A bit of hackery to get the first name and last name at the top of the
form instead at the end.
"""
super(SignupFormExtra, self).__init__(*args, **kw)
def save(self):
"""
Override the save method to save the first and last name to the user
field.
"""
# First save the parent form and get the user.
new_user = super(SignupFormExtra, self).save()
# Get the profile, the `save` method above creates a profile for each
# user because it calls the manager method `create_user`.
# See: https://github.com/django-userena-ce/django-userena-ce/blob/master/userena/managers.py#L65
profile = new_user.my_profile
profile.gender = self.cleaned_data['gender']
profile.education = self.cleaned_data['education']
profile.birthday = self.cleaned_data['birthday']
profile.annual_income = self.cleaned_data['annual_income']
profile.save()
# Userena expects to get the new user from this form, so return the new
# user.
return new_user
|
#!/usr/bin/env python
## HiC-Pro
## Copyright (c) 2015 Institut Curie
## Author(s): Nicolas Servant, Eric Viara
## Contact: nicolas.servant@curie.fr
## This software is distributed without any guarantee under the terms of the BSD-3 licence.
## See the LICENCE file for details
"""
Script to split valid interactions into G1/G2 interaction files and calculate statistics
"""
import getopt
import sys
# from itertools import izip
def usage():
"""Usage function"""
print("Usage : python split_valid_interactions.py")
print("-i/--input <valid interaction file>")
print("[-s/--stats] <stats file>")
print("[-v/--verbose] <Verbose>")
print("[-h/--help] <Help>")
return
def get_args():
"""Get argument"""
try:
opts, args = getopt.getopt(
sys.argv[1:],
"i:s:vh",
["input=", "stats=",
"verbose", "help"])
except getopt.GetoptError:
usage()
sys.exit(-1)
return opts
if __name__ == "__main__":
## Read command line arguments
opts = get_args()
inputfile = None
statsFile = None
verbose = False
if len(opts) == 0:
usage()
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--stats"):
statsFile = arg
elif opt in ("-i", "--input"):
inputfile = arg
elif opt in ("-v", "--verbose"):
verbose = True
else:
assert False, "unhandled option"
## Verbose mode
if verbose:
print("## split_valid_interactions.py")
print("## input=", inputfile)
print("## statsFile=", statsFile)
print("## verbose=", verbose)
## AS counter
vp_counter = 0
G1G1_ascounter = 0
G2G2_ascounter = 0
G1U_ascounter = 0
UG1_ascounter = 0
G2U_ascounter = 0
UG2_ascounter = 0
G1G2_ascounter = 0
G2G1_ascounter = 0
UU_ascounter = 0
CF_ascounter = 0
G1cis_s = 0
G1cis_l = 0
G1trans = 0
G2cis_s = 0
G2cis_l = 0
G2trans = 0
## Init output
handle_g1 = open(inputfile.replace(".allValidPairs", "_G1.allValidPairs"), 'w')
handle_g2 = open(inputfile.replace(".allValidPairs", "_G2.allValidPairs"), 'w')
if verbose:
print("## Splitting valid pairs interactions ...")
with open(inputfile) as hr:
for line in hr:
isG1 = False
isG2 = False
vp_counter += 1
h = line.rstrip().split("\t")
haplotype = h[len(h) - 1].split("-") ## always last column
r1as = int(haplotype[0])
r2as = int(haplotype[1])
chr1 = h[1]
chr2 = h[4]
## counter
if r1as == 1 and r2as == 1:
isG1 = True
G1G1_ascounter += 1
handle_g1.write(line)
elif r1as == 1 and r2as == 0:
isG1 = True
G1U_ascounter += 1
handle_g1.write(line)
elif r1as == 0 and r2as == 1:
isG1 = True
UG1_ascounter += 1
handle_g1.write(line)
elif r1as == 2 and r2as == 2:
isG2 = True
G2G2_ascounter += 1
handle_g2.write(line)
elif r1as == 2 and r2as == 0:
isG2 = True
G2U_ascounter += 1
handle_g2.write(line)
elif r1as == 0 and r2as == 2:
isG2 = True
UG2_ascounter += 1
handle_g2.write(line)
elif r1as == 1 and r2as == 2:
G1G2_ascounter += 1
elif r1as == 2 and r2as == 1:
G2G1_ascounter += 1
elif r1as == 3 or r2as == 3:
CF_ascounter += 1
else:
UU_ascounter += 1
##Sats on distance
if isG1 == True:
if chr1 == chr2:
d = abs(int(h[5]) - int(h[2]))
if d <= 20000:
G1cis_s += 1
else:
G1cis_l += 1
else:
G1trans += 1
elif isG2 == True:
if chr1 == chr2:
d = abs(int(h[5]) - int(h[2]))
if d <= 20000:
G2cis_s += 1
else:
G2cis_l += 1
else:
G2trans += 1
if vp_counter % 100000 == 0 and verbose:
print("##", vp_counter)
if statsFile is not None:
handle_stat = open(statsFile, 'w')
handle_stat.write("## HiC-Pro\n")
handle_stat.write("## Allele specific information\n")
handle_stat.write("Valid_pairs\t" + str(vp_counter) + "\n")
handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t" + str(
UG1_ascounter + G1U_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t" + str(
UG2_ascounter + G2U_ascounter) + "\n")
handle_stat.write(
"Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter + G2G1_ascounter) + "\n")
handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
handle_stat.write("cis_short_G1\t" + str(G1cis_s) + "\n")
handle_stat.write("cis_long_G1\t" + str(G1cis_l) + "\n")
handle_stat.write("trans_G1\t" + str(G1trans) + "\n")
handle_stat.write("cis_short_G2\t" + str(G2cis_s) + "\n")
handle_stat.write("cis_long_G2\t" + str(G2cis_l) + "\n")
handle_stat.write("trans_G2\t" + str(G2trans) + "\n")
handle_stat.close()
|
import itertools
import multiprocessing
import os
from datetime import datetime as dt
import jinja2
import networkx as nx
import numpy as np
import pandas as pd
import yaml
from tqdm import tqdm
from classifier import classifier_output
from graphs import random_delaunay, random_grid, random_realization
from planner import (CellPlanner, DoorPlanner, Planner)
def all_realizations(size):
ls = size * [[True, False]]
return itertools.product(*ls)
def policy_label(policy, th):
if policy == 'optimal':
return policy
else:
return f'{policy}@{th}'
def _chunks(size, chunk_size=None, number_of_chunks=None, index=0):
if number_of_chunks is None:
number_of_chunks = size // chunk_size
return _chunks(size, number_of_chunks=number_of_chunks, index=index)
chunk_size = size // number_of_chunks
rem = size % number_of_chunks
chunks = (np.array([chunk_size] * number_of_chunks) +
np.array([1] * rem + [0] * (number_of_chunks - rem)))
r = np.concatenate([[0], np.cumsum(chunks)]) + index
return [range(*x) for x in zip(r[:-1], r[1:])]
def samples_for_classifier(samples, sigma=None, accuracy=None, **kwargs):
if accuracy in [0.5, 1] or sigma in [0, 1]:
return 1
else:
return samples
def dict_product(**config):
ps = [[(k, v) for v in vs] for k, vs in config.items() if isinstance(vs, list)]
rs = [(k, v) for k, v in config.items() if not isinstance(v, list)]
return [dict(list(v) + rs) for v in itertools.product(*ps)]
def all_policies(thresholds=[], **kwargs):
return [('optimal', 0)] + [('optimistic', th) for th in thresholds]
def classifier_sample(planner, realization, sources, classifier_config={}, policy_config={}):
policies = all_policies(**policy_config)
row = []
cols = (['source', 'sigma', 'gamma', 'classification'] +
[policy_label(*policy) for policy in policies])
while True:
ps, sigma, gamma = classifier_output(realization, **classifier_config)
cs = [[] for _ in sources]
valid = True
for i, source in enumerate(sources):
for policy, th in policies:
c, r = planner.cost(ps, realization, source, policy=policy,
optimistic_threshold=th)
if r:
cs[i].append(c)
else:
valid = False
break
if not valid:
break
if not valid:
continue
min_cs = [planner.min_cost(realization, source) for source in sources]
crs = np.array(cs) / np.array(min_cs)[:, np.newaxis]
r = pd.DataFrame(
[row + [source, sigma, gamma, ps] + list(cr) for source, cr in zip(sources, crs)],
columns=cols)
yield r
def all_classifier_samples(realization, planner, sources=[], classifier_config={},
policy_config={}):
ns = [source for source in sources if planner.is_connected(source, realization)]
configs = dict_product(**classifier_config)
gens = [itertools.islice(classifier_sample(planner, realization, ns, classifier_config=config,
policy_config=policy_config),
samples_for_classifier(**config))
for config in configs]
try:
data = pd.concat(itertools.chain(*gens))
except ValueError as e:
print(f'Exception {e}')
data = pd.DataFrame()
return data
# TODO: get/set/save the seeds. For the moment the seed is set to None.
# Cannot be directly retrieved but could save the result of np.random.get_state()
# Setting seed is simplier: np.random.seed(<int>). Scipy random draws uses numpy.
# I'm also using python random module which also as functions random.seed and random.getstate()
# to get/set the seed.
class Experiment(object):
@classmethod
def new_experiment(cls, name, data, save=False, pool=6, return_data=None, seed=None):
t = data['map']['type']
return _experimentTypes[t](name, data, save=save, pool=pool, return_data=return_data,
seed=seed)
def __init__(self, name, data, save=False, return_data=None, pool=6, seed=None):
self.name = name
self.data = data
self.save = save
if return_data is None:
self.return_data = not self.save
else:
self.return_data = return_data
self.pool = pool
self.classifier_config = data['classifier']
self.policy_config = data['policy']
self.map_config = dict(data['map'])
self.number = self.map_config.pop('number', 1)
self.map_config.pop('type')
if save:
os.makedirs(name)
with open(f'{self.name}/experiment.yaml', 'w') as f:
yaml.dump({self.name: self.data}, f)
def compute(self):
# indices = _chunks(self.number, chunk, index=0)
indices = ([x] for x in range(self.number))
if self.pool > 0:
with multiprocessing.Pool(self.pool) as p:
return pd.concat(
tqdm(
p.imap_unordered(
self.compute_samples, indices), # optional arg chunk_size=1
total=self.number, desc=f'Experiment {self.name}'))
else:
return pd.concat(map(self.compute_samples, indices))
def compute_samples(self, indices):
return pd.concat([self.compute_sample(i) for i in indices])
def compute_sample(self, index):
np.random.seed(index)
if self.save:
os.makedirs(f'{self.name}/{index}')
data = None
while data is None:
try:
realization, planner, sources = self.sample(index)
data = all_classifier_samples(realization, planner, sources=sources,
classifier_config=self.classifier_config,
policy_config=self.policy_config)
except NameError as e:
# print(e)
continue
if self.save and not data.empty:
data.to_csv(f'{self.name}/{index}/data.csv')
if self.return_data:
return data
else:
return pd.DataFrame()
class RandomGraphExperiment(Experiment):
def sample(self, index):
g, hidden_state, s, t, cut, pruned = self.sample_map(index)
realization = random_realization(g, hidden_state, s, t)
planner = Planner(g, t, hidden_state)
if self.save:
self.save_experiment(index, g, cut, pruned, hidden_state, s, t, realization)
return realization, planner, [s]
def save_experiment(self, index, g, cut, pruned, hidden_state, s, t, realization):
r = {'hidden_state': hidden_state,
's': s,
't': t,
'realization': realization}
cut = nx.Graph(cut)
pruned = nx.Graph(pruned)
for _, _, data in cut.edges(data=True):
data['cut'] = True
for _, _, data in pruned.edges(data=True):
data['pruned'] = True
try:
pos = g.pos
except AttributeError:
pos = None
g = nx.compose(nx.compose(cut, pruned), g)
if pos is not None:
for i, p in enumerate(pos):
g.node[i]['pos'] = p
nx.write_gpickle(g, f'{self.name}/{index}/graph.gpickle')
with open(f'{self.name}/{index}/map.yaml', 'w') as f:
yaml.dump(r, f)
class RandomGridExperiment(RandomGraphExperiment):
def sample_map(self, index):
return random_grid(**self.map_config)
class RandomDelaunayExperiment(RandomGraphExperiment):
def sample_map(self, index):
return random_delaunay(**self.map_config)
class CellGraphExperiment(Experiment):
def __init__(self, *args, **kwargs):
super(CellGraphExperiment, self).__init__(*args, **kwargs)
self.sources = self.map_config['sources']
self.planner = CellPlanner(
layer_id=self.map_config['layer'], target_id=self.map_config['target'])
size = len(self.planner.hidden_state)
self.rs = list(all_realizations(size))
self.number = len(self.rs)
def sample(self, index):
realization = self.rs[index]
if self.save:
with open(f'{self.name}/{index}/map.yaml', 'w') as f:
yaml.dump({'realization': realization}, f)
return realization, self.planner, self.sources
class DoorGraphExperiment(CellGraphExperiment):
def __init__(self, *args, **kwargs):
super(CellGraphExperiment, self).__init__(*args, **kwargs)
self.sources = [self.map_config['source_id']]
planner = DoorPlanner(**self.map_config)
if self.pool < 1:
self.planner = planner
size = len(planner.hidden_state)
self.rs = list(all_realizations(size))
self.number = len(self.rs)
def sample(self, index):
if self.pool < 1:
planner = self.planner
else:
planner = DoorPlanner(**self.map_config)
realization = self.rs[index]
if self.save:
with open(f'{self.name}/{index}/map.yaml', 'w') as f:
yaml.dump({'realization': realization}, f)
return realization, planner, self.sources
def edge_from_r_graph(data):
return {'u': (data['certainty'] < 1), 'length': data['cost']}
def import_graph(path, s, t, traversable=[], prune=[], **kwargs):
original_graph = nx.read_gpickle(path)
es = [(x, y, edge_from_r_graph(data)) for x, y, data in original_graph.edges(data=True)
if [x, y] not in prune and [y, x] not in prune]
hidden_state = [[(x, y)] for x, y, d in es
if d['u'] and ([x, y] not in traversable and [y, x] not in traversable)]
g = nx.Graph(es)
for n, data in g.nodes(data=True):
data['observe'] = []
data['pos'] = original_graph.node[n]['pos']
for i, es in enumerate(hidden_state):
for x, y in es:
g.node[x]['observe'].append(i)
g.node[y]['observe'].append(i)
g[x][y]['hidden'] = True
return g, hidden_state, s, t
class RealGraphExperiment(Experiment):
def __init__(self, *args, **kwargs):
super(RealGraphExperiment, self).__init__(*args, **kwargs)
g, hs, s, t = import_graph(**self.map_config)
planner = Planner(g, t, hs)
size = len(planner.hidden_state)
self.rs = list(all_realizations(size))
self.number = len(self.rs)
self.sources = [s]
def sample(self, index):
g, hs, s, t = import_graph(**self.map_config)
planner = Planner(g, t, hs)
realization = self.rs[index]
if self.save:
with open(f'{self.name}/{index}/map.yaml', 'w') as f:
yaml.dump({'realization': realization}, f)
return realization, planner, self.sources
_experimentTypes = {'grid': RandomGridExperiment,
'delaunay': RandomDelaunayExperiment,
'cells': CellGraphExperiment,
'real': RealGraphExperiment,
'doors': DoorGraphExperiment
}
def execute_all_experiments(config_file='./experiment.yaml', pool=6):
if os.path.splitext(config_file)[1] == '.j2':
print('Load Jinjia template')
jinjia_env = jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
template = jinjia_env.get_template(config_file)
experiments = yaml.load(template.render())
else:
with open(config_file) as f:
experiments = yaml.load(f)
for name, data in tqdm(experiments.items(), desc='All experiments'):
if os.path.exists(name):
print(f'Experiment {name} already computed')
continue
print(f'Starting to compute experiment {name}')
description = data.get('description', '')
if description:
print(f'***\n\t{description}\n***')
start_time = dt.now()
Experiment.new_experiment(name, data, save=True, pool=pool).compute()
duration = dt.now() - start_time
secs = round(duration.total_seconds())
print(f'Experiment {name} computed in {secs} seconds')
def load_map(folder, **kwargs):
from graphs import draw_graph
g = nx.read_gpickle(f'{folder}/graph.gpickle')
cut = nx.Graph([e for e in g.edges(data=True) if 'cut' in e[2]])
pruned = nx.Graph([e for e in g.edges(data=True) if 'pruned' in e[2]])
for n, d in (list(cut.nodes(data=True)) + list(pruned.nodes(data=True))):
d.update(g.node[n])
g.remove_edges_from(list(cut.edges()) + list(pruned.edges()))
g.remove_nodes_from([n for n in g if len(g[n]) == 0])
with open(f'{folder}/map.yaml') as f:
map_config = yaml.load(f)
draw_graph(g, realization=map_config['realization'],
hidden_state=map_config['hidden_state'], cut=cut, pruned=pruned,
s=map_config['s'],
t=map_config['t'], **kwargs)
|
# Generated by Django 4.0 on 2022-02-13 11:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Sales',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unit_price_sale', models.DecimalField(decimal_places=2, max_digits=10)),
('commision_sale', models.FloatField(default='0')),
('payment_method_sale', models.CharField(max_length=100)),
('id_payment_sale', models.CharField(max_length=100)),
('status_sale', models.TextField()),
('date_created_sale', models.DateTimeField(auto_now_add=True)),
('date_updated_sale', models.DateTimeField(auto_now=True)),
('id_order_sale', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.orders')),
],
options={
'verbose_name_plural': 'Sales',
'ordering': ('commision_sale',),
},
),
]
|
# Copyright 2020 Sorunome
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from twisted.web.server import Request
from synapse.api.constants import Membership
from synapse.api.errors import SynapseError
from synapse.http.servlet import (
RestServlet,
parse_json_object_from_request,
parse_strings_from_args,
)
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import set_tag
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.types import JsonDict, RoomAlias, RoomID
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
from ._base import client_patterns
logger = logging.getLogger(__name__)
class KnockRoomAliasServlet(RestServlet):
"""
POST /xyz.amorgan.knock/{roomIdOrAlias}
"""
PATTERNS = client_patterns(
"/xyz.amorgan.knock/(?P<room_identifier>[^/]*)", releases=()
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self.txns = HttpTransactionCache(hs)
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
async def on_POST(
self,
request: SynapseRequest,
room_identifier: str,
txn_id: Optional[str] = None,
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
event_content = None
if "reason" in content:
event_content = {"reason": content["reason"]}
if RoomID.is_valid(room_identifier):
room_id = room_identifier
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore
remote_room_hosts = parse_strings_from_args(
args, "server_name", required=False
)
elif RoomAlias.is_valid(room_identifier):
handler = self.room_member_handler
room_alias = RoomAlias.from_string(room_identifier)
room_id_obj, remote_room_hosts = await handler.lookup_room_alias(room_alias)
room_id = room_id_obj.to_string()
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
await self.room_member_handler.update_membership(
requester=requester,
target=requester.user,
room_id=room_id,
action=Membership.KNOCK,
txn_id=txn_id,
third_party_signed=None,
remote_room_hosts=remote_room_hosts,
content=event_content,
)
return 200, {"room_id": room_id}
def on_PUT(self, request: Request, room_identifier: str, txn_id: str):
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
request, self.on_POST, request, room_identifier, txn_id
)
def register_servlets(hs, http_server):
KnockRoomAliasServlet(hs).register(http_server)
|
import json
import os
from typing import Iterator, Tuple
import pytest
from altair_saver.savers import BasicSaver
from altair_saver._utils import JSONDict
def get_testcases() -> Iterator[Tuple[str, str, JSONDict]]:
directory = os.path.join(os.path.dirname(__file__), "testcases")
cases = set(f.split(".")[0] for f in os.listdir(directory))
for case in sorted(cases):
for mode, filename in [
("vega-lite", f"{case}.vl.json"),
("vega", f"{case}.vg.json"),
]:
with open(os.path.join(directory, filename)) as f:
spec = json.load(f)
yield case, mode, spec
@pytest.mark.parametrize("case, mode, spec", get_testcases())
def test_basic_saver(case: str, mode: str, spec: JSONDict) -> None:
saver = BasicSaver(spec)
bundle = saver.mimebundle([mode, "json"])
for output in bundle.values():
assert output == spec
def test_bad_format() -> None:
saver = BasicSaver({})
with pytest.raises(ValueError):
saver.mimebundle("vega")
|
#!/usr/bin/python
from clients.keystone import Client
from core import agent
__author__ = 'beb'
if __name__ == '__main__':
ksclient = Client()
print 'token: %s' % ksclient.get_token()
print 'endpoint: %s' % ksclient.get_endpoint()
print 'network endpoint: %s' % agent._get_endpoint('network')
|
"""
build_SIM_with_deposits.py
Create an augmented version of the SIM model (Godley & Lavoie Chapter 3), but with deposits added.
Since we do not have a central bank sector, this falls short of the model PC specifcation (Chapetr 4).
Copyright 2016 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# from sfc_models.utils import register_standard_logs
import sfc_models
from sfc_models.examples.Quick2DPlot import Quick2DPlot
from sfc_models.models import Model, Country
from sfc_models.sector import Market
from sfc_models.sector_definitions import Household, ConsolidatedGovernment, TaxFlow, FixedMarginBusiness, DepositMarket, MoneyMarket
def main():
# The next line of code sets the name of the output files based on the code file's name.
# This means that if you paste this code into a new file, get a new log name.
sfc_models.register_standard_logs('output', __file__)
# Create model, which holds all entities
mod = Model()
# Create first country - Canada. (This model only has one country.)
can = Country(mod, 'CA', 'Canada')
# Create sectors
gov = ConsolidatedGovernment(can, 'GOV', 'Government')
hh = Household(can, 'HH', 'Household')
# A literally non-profit business sector
bus = FixedMarginBusiness(can, 'BUS', 'Business Sector')
# Create the linkages between sectors - tax flow, markets - labour ('LAB'), goods ('GOOD')
tax = TaxFlow(can, 'TF', 'TaxFlow', .2)
labour = Market(can, 'LAB', 'Labour market')
goods = Market(can, 'GOOD', 'Goods market')
# Add the financial markets
# GOV -> issuing sector
mm = MoneyMarket(can, issuer_short_code='GOV')
dep = DepositMarket(can, issuer_short_code='GOV')
# --------------------------------------------
# Financial asset demand equations
# Need the full variable name for 'F' in household
hh_F = hh.GetVariableName('F')
hh.AddVariable('DEM_MON', 'Demand for Money', '0.5 * ' + hh_F)
hh.AddVariable('DEM_DEP', 'Demand for deposits', '0.5 * ' + hh_F)
# -----------------------------------------------------------------
# Need to set the exogenous variables
# Government demand for Goods ("G" in economist symbology)
mod.AddExogenous('GOV', 'DEM_GOOD', '[20.,] * 105')
mod.AddExogenous('DEP', 'r', '[0.0,] * 5 + [0.04]*100')
mod.AddInitialCondition('HH', 'F', 80.)
mod.main()
mod.TimeSeriesSupressTimeZero = True
mod.TimeSeriesCutoff = 20
Quick2DPlot(mod.GetTimeSeries('t'), mod.GetTimeSeries('GOOD__SUP_GOOD'),
'Goods supplied (national production Y)')
Quick2DPlot(mod.GetTimeSeries('t'), mod.GetTimeSeries('HH__F'),
'Household Financial Assets (F)')
if __name__ == '__main__':
main()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import numpy
import numpy.ma as ma
import os
import urllib
import netCDF4
import datetime
import inspect
import test_local # Import test_local so we can use inspect to get the path
import ocw.data_source.local as local
class test_load_file(unittest.TestCase):
def setUp(self):
#Read netCDF file
self.file_path = create_netcdf_object()
self.netCDF_file = netCDF4.Dataset(self.file_path, 'r')
self.latitudes = self.netCDF_file.variables['latitude'][:]
self.longitudes = self.netCDF_file.variables['longitude'][:]
self.values = self.netCDF_file.variables['value'][:]
self.variable_name_list = ['latitude', 'longitude', 'time', 'level', 'value']
self.possible_value_name = ['latitude', 'longitude', 'time', 'level']
def tearDown(self):
os.remove(self.file_path)
def test_function_load_file_lats(self):
'''To test load_file function for latitudes'''
self.assertItemsEqual(local.load_file(self.file_path, "value").lats, self.latitudes)
def test_function_load_file_lons(self):
'''To test load_file function for longitudes'''
self.assertItemsEqual(local.load_file(self.file_path, "value").lons, self.longitudes)
def test_function_load_file_times(self):
'''To test load_file function for times'''
newTimes = datetime.datetime(2001,01,01), datetime.datetime(2001,02,01), datetime.datetime(2001,03,01)
self.assertItemsEqual(local.load_file(self.file_path, "value").times, newTimes)
def test_function_load_file_values(self):
'''To test load_file function for values'''
new_values = self.values[0,:,:,:]
self.assertTrue(numpy.allclose(local.load_file(self.file_path, "value").values, new_values))
def test_custom_dataset_name(self):
'''Test adding a custom name to a dataset'''
ds = local.load_file(self.file_path, 'value', name='foo')
self.assertEqual(ds.name, 'foo')
class test_get_netcdf_variable_names(unittest.TestCase):
file_path = "http://zipper.jpl.nasa.gov/dist/"
test_model = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
def setUp(self):
urllib.urlretrieve(self.file_path + self.test_model, self.test_model)
self.invalid_netcdf_path = create_invalid_dimensions_netcdf_object()
self.netcdf = netCDF4.Dataset(self.test_model, mode='r')
def tearDown(self):
os.remove(self.invalid_netcdf_path)
os.remove(self.test_model)
def test_valid_latitude(self):
self.lat = local._get_netcdf_variable_name(
local.LAT_NAMES,
self.netcdf,
"tasmax")
self.assertEquals(self.lat, "rlat")
def test_invalid_dimension_latitude(self):
self.netcdf = netCDF4.Dataset(self.invalid_netcdf_path, mode='r')
self.lat = local._get_netcdf_variable_name(
local.LAT_NAMES,
self.netcdf,
"value")
self.assertEquals(self.lat, "latitude")
def test_dimension_variable_name_mismatch(self):
self.netcdf = netCDF4.Dataset(self.invalid_netcdf_path, mode='r')
self.lat = local._get_netcdf_variable_name(
["lat_dim"] + local.LAT_NAMES,
self.netcdf,
"value")
self.assertEquals(self.lat, "latitude")
def test_no_match_latitude(self):
with self.assertRaises(ValueError):
self.lat = local._get_netcdf_variable_name(
['notAVarName'],
self.netcdf,
"tasmax")
def create_netcdf_object():
#To create the temporary netCDF file
file_path = '/tmp/temporaryNetcdf.nc'
netCDF_file = netCDF4.Dataset(file_path, 'w', format='NETCDF4')
#To create dimensions
netCDF_file.createDimension('lat_dim', 5)
netCDF_file.createDimension('lon_dim', 5)
netCDF_file.createDimension('time_dim', 3)
netCDF_file.createDimension('level_dim', 2)
#To create variables
latitudes = netCDF_file.createVariable('latitude', 'd', ('lat_dim',))
longitudes = netCDF_file.createVariable('longitude', 'd', ('lon_dim',))
times = netCDF_file.createVariable('time', 'd', ('time_dim',))
levels = netCDF_file.createVariable('level', 'd', ('level_dim',))
values = netCDF_file.createVariable('value', 'd', ('level_dim', 'time_dim', 'lat_dim', 'lon_dim'))
#To latitudes and longitudes for five values
latitudes = range(0,5)
longitudes = range(200,205)
#Three months of data
#Two levels
levels = [100, 200]
#Create 150 values
values = numpy.array([i for i in range(150)])
#Reshape values to 4D array (level, time, lats, lons)
values = values.reshape(len(levels), len(times),len(latitudes),len(longitudes))
#Ingest values to netCDF file
latitudes[:] = latitudes
longitudes[:] = longitudes
times[:] = numpy.array(range(3))
levels[:] = levels
values[:] = values
#Assign time info to time variable
netCDF_file.variables['time'].units = 'months since 2001-01-01 00:00:00'
netCDF_file.close()
return file_path
def create_invalid_dimensions_netcdf_object():
#To create the temporary netCDF file
file_path = '/tmp/temporaryNetcdf.nc'
netCDF_file = netCDF4.Dataset(file_path, 'w', format='NETCDF4')
#To create dimensions
netCDF_file.createDimension('lat_dim', 5)
netCDF_file.createDimension('lon_dim', 5)
netCDF_file.createDimension('time_dim', 3)
netCDF_file.createDimension('level_dim', 2)
#To create variables
latitudes = netCDF_file.createVariable('latitude', 'd', ('lat_dim',))
longitudes = netCDF_file.createVariable('longitude', 'd', ('lon_dim',))
times = netCDF_file.createVariable('time', 'd', ('time_dim',))
levels = netCDF_file.createVariable('level', 'd', ('level_dim',))
values = netCDF_file.createVariable('value', 'd', ('level_dim', 'time_dim', 'lat_dim', 'lon_dim'))
#To latitudes and longitudes for five values
latitudes = range(0,5)
longitudes = range(200,205)
#Three months of data
times = range(3)
#Two levels
levels = [100, 200]
#Create 150 values
values = numpy.array([i for i in range(150)])
#Reshape values to 4D array (level, time, lats, lons)
values = values.reshape(len(levels), len(times),len(latitudes),len(longitudes))
#Ingest values to netCDF file
latitudes[:] = latitudes
longitudes[:] = longitudes
times[:] = times
levels[:] = levels
values[:] = values
#Assign time info to time variable
netCDF_file.variables['time'].units = 'months since 2001-01-01 00:00:00'
netCDF_file.close()
return file_path
if __name__ == '__main__':
unittest.main()
|
import os
import time
import pytest
import numpy as np
from jina import Document, DocumentArray
from .. import MongoDBStorage
NUM_DOCS = 10
@pytest.fixture
def storage():
return MongoDBStorage()
@pytest.fixture
def docs_to_index():
docu_array = DocumentArray()
for idx in range(0, NUM_DOCS):
d = Document(text=f'hello {idx}')
d.embedding = np.random.random(20)
docu_array.append(d)
return docu_array
@pytest.fixture
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(5)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from app import app, logging
from flask import (request, make_response)
@app.route('/api/airtime/dlr/', methods=['POST'])
def airtime_dlr_callback():
if request.method == 'POST':
# Reads the variables sent via POST from our gateway
_from = request.values.get('from', None)
to = request.values.get('to', None)
id_ = request.values.get('text', None)
print id_, _from, to
try:
# write to redis
pass
except Exception as e:
logging.error('Storing fail -> ', e)
resp = make_response('Ok', 200)
resp.headers['Content-Type'] = 'application/json'
resp.cache_control.no_cache = True
return resp
else:
resp = make_response('Error', 400)
resp.headers['Content-Type'] = 'application/json'
resp.cache_control.no_cache = True
return resp
|
"""Use to transfer a MySQL database to SQLite."""
from __future__ import division
import logging
import re
import sqlite3
from datetime import timedelta
from decimal import Decimal
from math import ceil
from os.path import realpath
from sys import stdout
import mysql.connector
import six
from mysql.connector import errorcode
from tqdm import tqdm, trange
from mysql_to_sqlite3.sqlite_utils import (
CollatingSequences,
adapt_decimal,
adapt_timedelta,
convert_decimal,
convert_timedelta,
encode_data_for_sqlite,
)
if six.PY2:
from .sixeptions import * # pylint: disable=W0401
class MySQLtoSQLite:
"""Use this class to transfer a MySQL database to SQLite."""
COLUMN_PATTERN = re.compile(r"^[^(]+")
COLUMN_LENGTH_PATTERN = re.compile(r"\(\d+\)$")
def __init__(self, **kwargs):
"""Constructor."""
if not kwargs.get("mysql_database"):
raise ValueError("Please provide a MySQL database")
if not kwargs.get("mysql_user"):
raise ValueError("Please provide a MySQL user")
self._mysql_database = str(kwargs.get("mysql_database"))
self._mysql_tables = (
tuple(kwargs.get("mysql_tables"))
if kwargs.get("mysql_tables") is not None
else tuple()
)
self._limit_rows = int(kwargs.get("limit_rows") or 0)
if kwargs.get("collation") is not None and kwargs.get("collation").upper() in {
CollatingSequences.BINARY,
CollatingSequences.NOCASE,
CollatingSequences.RTRIM,
}:
self._collation = kwargs.get("collation").upper()
else:
self._collation = CollatingSequences.BINARY
self._prefix_indices = kwargs.get("prefix_indices") or False
self._without_foreign_keys = (
True
if len(self._mysql_tables) > 0
else (kwargs.get("without_foreign_keys") or False)
)
self._without_data = kwargs.get("without_data") or False
self._mysql_user = str(kwargs.get("mysql_user"))
self._mysql_password = (
str(kwargs.get("mysql_password")) if kwargs.get("mysql_password") else None
)
self._mysql_host = str(kwargs.get("mysql_host") or "localhost")
self._mysql_port = int(kwargs.get("mysql_port") or 3306)
self._mysql_ssl_disabled = kwargs.get("mysql_ssl_disabled") or False
self._current_chunk_number = 0
self._chunk_size = int(kwargs.get("chunk")) if kwargs.get("chunk") else None
self._sqlite_file = kwargs.get("sqlite_file") or None
self._buffered = kwargs.get("buffered") or False
self._vacuum = kwargs.get("vacuum") or False
self._quiet = kwargs.get("quiet") or False
self._logger = self._setup_logger(
log_file=kwargs.get("log_file") or None, quiet=self._quiet
)
sqlite3.register_adapter(Decimal, adapt_decimal)
sqlite3.register_converter("DECIMAL", convert_decimal)
sqlite3.register_adapter(timedelta, adapt_timedelta)
sqlite3.register_converter("TIME", convert_timedelta)
self._sqlite = sqlite3.connect(
realpath(self._sqlite_file), detect_types=sqlite3.PARSE_DECLTYPES
)
self._sqlite.row_factory = sqlite3.Row
self._sqlite_cur = self._sqlite.cursor()
self._json_as_text = kwargs.get("json_as_text") or False
self._sqlite_json1_extension_enabled = (
not self._json_as_text and self._check_sqlite_json1_extension_enabled()
)
try:
self._mysql = mysql.connector.connect(
user=self._mysql_user,
password=self._mysql_password,
host=self._mysql_host,
port=self._mysql_port,
ssl_disabled=self._mysql_ssl_disabled,
)
if not self._mysql.is_connected():
raise ConnectionError("Unable to connect to MySQL")
self._mysql_cur = self._mysql.cursor(buffered=self._buffered, raw=True)
self._mysql_cur_prepared = self._mysql.cursor(prepared=True)
self._mysql_cur_dict = self._mysql.cursor(
buffered=self._buffered,
dictionary=True,
)
try:
self._mysql.database = self._mysql_database
except (mysql.connector.Error, Exception) as err:
if hasattr(err, "errno") and err.errno == errorcode.ER_BAD_DB_ERROR:
self._logger.error("MySQL Database does not exist!")
raise
self._logger.error(err)
raise
except mysql.connector.Error as err:
self._logger.error(err)
raise
@classmethod
def _setup_logger(cls, log_file=None, quiet=False):
formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
logger = logging.getLogger(cls.__name__)
logger.setLevel(logging.DEBUG)
if not quiet:
screen_handler = logging.StreamHandler(stream=stdout)
screen_handler.setFormatter(formatter)
logger.addHandler(screen_handler)
if log_file:
file_handler = logging.FileHandler(realpath(log_file), mode="w")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
@classmethod
def _valid_column_type(cls, column_type):
return cls.COLUMN_PATTERN.match(column_type.strip())
@classmethod
def _column_type_length(cls, column_type):
suffix = cls.COLUMN_LENGTH_PATTERN.search(column_type)
if suffix:
return suffix.group(0)
return ""
@classmethod
def _translate_type_from_mysql_to_sqlite(
cls, column_type, sqlite_json1_extension_enabled=False
):
"""Handle MySQL 8."""
try:
column_type = column_type.decode()
except (UnicodeDecodeError, AttributeError):
pass
# This could be optimized even further, however is seems adequate.
match = cls._valid_column_type(column_type)
if not match:
raise ValueError("Invalid column_type!")
data_type = match.group(0).upper()
if data_type.endswith(" UNSIGNED"):
data_type = data_type.replace(" UNSIGNED", "")
if data_type in {
"BIGINT",
"BLOB",
"BOOLEAN",
"DATE",
"DATETIME",
"DECIMAL",
"DOUBLE",
"FLOAT",
"INTEGER",
"MEDIUMINT",
"NUMERIC",
"REAL",
"SMALLINT",
"TIME",
"TINYINT",
"YEAR",
}:
return data_type
if data_type in {
"BIT",
"BINARY",
"LONGBLOB",
"MEDIUMBLOB",
"TINYBLOB",
"VARBINARY",
}:
return "BLOB"
if data_type in {"NCHAR", "NVARCHAR", "VARCHAR"}:
return data_type + cls._column_type_length(column_type)
if data_type == "CHAR":
return "CHARACTER" + cls._column_type_length(column_type)
if data_type == "INT":
return "INTEGER"
if data_type in "TIMESTAMP":
return "DATETIME"
if data_type == "JSON" and sqlite_json1_extension_enabled:
return "JSON"
return "TEXT"
@classmethod
def _translate_default_from_mysql_to_sqlite(
cls, column_default=None, column_type=None
):
try:
column_default = column_default.decode()
except (UnicodeDecodeError, AttributeError):
pass
if column_default is None:
return ""
if isinstance(column_default, bool):
if column_type == "BOOLEAN" and sqlite3.sqlite_version >= "3.23.0":
if column_default:
return "DEFAULT(TRUE)"
return "DEFAULT(FALSE)"
return "DEFAULT '{}'".format(int(column_default))
if (
six.PY2
and isinstance(
column_default, unicode # noqa: ignore=F405 pylint: disable=E0602
)
) or isinstance(column_default, str):
if column_default.upper() in {
"CURRENT_TIME",
"CURRENT_DATE",
"CURRENT_TIMESTAMP",
}:
return "DEFAULT {}".format(column_default.upper())
return "DEFAULT '{}'".format(column_default)
@classmethod
def _data_type_collation_sequence(
cls, collation=CollatingSequences.BINARY, column_type=None
):
if column_type and collation != CollatingSequences.BINARY:
if column_type.startswith(
(
"CHARACTER",
"NCHAR",
"NVARCHAR",
"TEXT",
"VARCHAR",
)
):
return "COLLATE {collation}".format(collation=collation)
return ""
def _check_sqlite_json1_extension_enabled(self):
try:
self._sqlite_cur.execute("PRAGMA compile_options")
return "ENABLE_JSON1" in set(row[0] for row in self._sqlite_cur.fetchall())
except sqlite3.Error:
return False
def _build_create_table_sql(self, table_name):
sql = 'CREATE TABLE IF NOT EXISTS "{}" ('.format(table_name)
primary = ""
indices = ""
self._mysql_cur_dict.execute("SHOW COLUMNS FROM `{}`".format(table_name))
for row in self._mysql_cur_dict.fetchall():
column_type = self._translate_type_from_mysql_to_sqlite(
column_type=row["Type"],
sqlite_json1_extension_enabled=self._sqlite_json1_extension_enabled,
)
sql += '\n\t"{name}" {type} {notnull} {default} {collation},'.format(
name=row["Field"],
type=column_type,
notnull="NULL" if row["Null"] == "YES" else "NOT NULL",
default=self._translate_default_from_mysql_to_sqlite(
row["Default"], column_type
),
collation=self._data_type_collation_sequence(
self._collation, column_type
),
)
self._mysql_cur_dict.execute(
"""
SELECT INDEX_NAME AS `name`,
IF (NON_UNIQUE = 0 AND INDEX_NAME = 'PRIMARY', 1, 0) AS `primary`,
IF (NON_UNIQUE = 0 AND INDEX_NAME <> 'PRIMARY', 1, 0) AS `unique`,
GROUP_CONCAT(COLUMN_NAME ORDER BY SEQ_IN_INDEX) AS `columns`
FROM information_schema.STATISTICS
WHERE TABLE_SCHEMA = %s
AND TABLE_NAME = %s
GROUP BY INDEX_NAME, NON_UNIQUE
""",
(self._mysql_database, table_name),
)
for index in self._mysql_cur_dict.fetchall():
if int(index["primary"]) == 1:
primary += "\n\tPRIMARY KEY ({columns})".format(
columns=", ".join(
'"{}"'.format(column) for column in index["columns"].split(",")
)
)
else:
indices += """CREATE {unique} INDEX IF NOT EXISTS "{name}" ON "{table}" ({columns});""".format(
unique="UNIQUE" if int(index["unique"]) == 1 else "",
name="{table}_{name}".format(table=table_name, name=index["name"])
if self._prefix_indices
else index["name"],
table=table_name,
columns=", ".join(
'"{}"'.format(column) for column in index["columns"].split(",")
),
)
sql += primary
sql = sql.rstrip(", ")
if not self._without_foreign_keys:
server_version = self._mysql.get_server_version()
self._mysql_cur_dict.execute(
"""
SELECT k.COLUMN_NAME AS `column`,
k.REFERENCED_TABLE_NAME AS `ref_table`,
k.REFERENCED_COLUMN_NAME AS `ref_column`,
c.UPDATE_RULE AS `on_update`,
c.DELETE_RULE AS `on_delete`
FROM information_schema.TABLE_CONSTRAINTS AS i
{JOIN} information_schema.KEY_COLUMN_USAGE AS k
ON i.CONSTRAINT_NAME = k.CONSTRAINT_NAME
{JOIN} information_schema.REFERENTIAL_CONSTRAINTS AS c
ON c.CONSTRAINT_NAME = i.CONSTRAINT_NAME
WHERE i.TABLE_SCHEMA = %s
AND i.TABLE_NAME = %s
AND i.CONSTRAINT_TYPE = %s
GROUP BY i.CONSTRAINT_NAME,
k.COLUMN_NAME,
k.REFERENCED_TABLE_NAME,
k.REFERENCED_COLUMN_NAME,
c.UPDATE_RULE,
c.DELETE_RULE
""".format(
JOIN="JOIN"
if (server_version[0] == 8 and server_version[2] > 19)
else "LEFT JOIN"
),
(self._mysql_database, table_name, "FOREIGN KEY"),
)
for foreign_key in self._mysql_cur_dict.fetchall():
sql += """,\n\tFOREIGN KEY("{column}") REFERENCES "{ref_table}" ("{ref_column}") ON UPDATE {on_update} ON DELETE {on_delete}""".format(
**foreign_key
)
sql += "\n);"
sql += indices
return sql
def _create_table(self, table_name, attempting_reconnect=False):
try:
if attempting_reconnect:
self._mysql.reconnect()
self._sqlite_cur.executescript(self._build_create_table_sql(table_name))
self._sqlite.commit()
except mysql.connector.Error as err:
if err.errno == errorcode.CR_SERVER_LOST:
if not attempting_reconnect:
self._logger.warning(
"Connection to MySQL server lost." "\nAttempting to reconnect."
)
self._create_table(table_name, True)
else:
self._logger.warning(
"Connection to MySQL server lost."
"\nReconnection attempt aborted."
)
raise
self._logger.error(
"MySQL failed reading table definition from table %s: %s",
table_name,
err,
)
raise
except sqlite3.Error as err:
self._logger.error("SQLite failed creating table %s: %s", table_name, err)
raise
def _transfer_table_data(
self, table_name, sql, total_records=0, attempting_reconnect=False
):
if attempting_reconnect:
self._mysql.reconnect()
try:
if self._chunk_size is not None and self._chunk_size > 0:
for chunk in trange(
self._current_chunk_number,
int(ceil(total_records / self._chunk_size)),
disable=self._quiet,
):
self._current_chunk_number = chunk
self._sqlite_cur.executemany(
sql,
(
tuple(
encode_data_for_sqlite(col) if col is not None else None
for col in row
)
for row in self._mysql_cur.fetchmany(self._chunk_size)
),
)
else:
self._sqlite_cur.executemany(
sql,
(
tuple(
encode_data_for_sqlite(col) if col is not None else None
for col in row
)
for row in tqdm(
self._mysql_cur.fetchall(),
total=total_records,
disable=self._quiet,
)
),
)
self._sqlite.commit()
except mysql.connector.Error as err:
if err.errno == errorcode.CR_SERVER_LOST:
if not attempting_reconnect:
self._logger.warning(
"Connection to MySQL server lost." "\nAttempting to reconnect."
)
self._transfer_table_data(
table_name=table_name,
sql=sql,
total_records=total_records,
attempting_reconnect=True,
)
else:
self._logger.warning(
"Connection to MySQL server lost."
"\nReconnection attempt aborted."
)
raise
self._logger.error(
"MySQL transfer failed reading table data from table %s: %s",
table_name,
err,
)
raise
except sqlite3.Error as err:
self._logger.error(
"SQLite transfer failed inserting data into table %s: %s",
table_name,
err,
)
raise
def transfer(self):
"""The primary and only method with which we transfer all the data."""
if len(self._mysql_tables) > 0:
# transfer only specific tables
self._mysql_cur_prepared.execute(
"""
SELECT TABLE_NAME
FROM information_schema.TABLES
WHERE TABLE_SCHEMA = SCHEMA()
AND TABLE_NAME IN ({placeholders})
""".format(
placeholders=("%s, " * len(self._mysql_tables)).rstrip(" ,")
),
self._mysql_tables,
)
tables = (row[0] for row in self._mysql_cur_prepared.fetchall())
else:
# transfer all tables
self._mysql_cur.execute(
"""
SELECT TABLE_NAME
FROM information_schema.TABLES
WHERE TABLE_SCHEMA = SCHEMA()
"""
)
tables = (row[0].decode() for row in self._mysql_cur.fetchall())
try:
# turn off foreign key checking in SQLite while transferring data
self._sqlite_cur.execute("PRAGMA foreign_keys=OFF")
for table_name in tables:
self._logger.info(
"%sTransferring table %s",
"[WITHOUT DATA] " if self._without_data else "",
table_name,
)
# reset the chunk
self._current_chunk_number = 0
# create the table
self._create_table(table_name)
if not self._without_data:
# get the size of the data
if self._limit_rows > 0:
# limit to the requested number of rows
self._mysql_cur_dict.execute(
"""
SELECT COUNT(*) AS `total_records`
FROM (SELECT * FROM `{table_name}` LIMIT {limit}) AS `table`
""".format(
table_name=table_name, limit=self._limit_rows
)
)
else:
# get all rows
self._mysql_cur_dict.execute(
"SELECT COUNT(*) AS `total_records` FROM `{table_name}`".format(
table_name=table_name
)
)
total_records = int(
self._mysql_cur_dict.fetchone()["total_records"]
)
# only continue if there is anything to transfer
if total_records > 0:
# populate it
self._mysql_cur.execute(
"SELECT * FROM `{table_name}` {limit}".format(
table_name=table_name,
limit="LIMIT {}".format(self._limit_rows)
if self._limit_rows > 0
else "",
)
)
columns = [column[0] for column in self._mysql_cur.description]
# build the SQL string
sql = """
INSERT OR IGNORE
INTO "{table}" ({fields})
VALUES ({placeholders})
""".format(
table=table_name,
fields=('"{}", ' * len(columns))
.rstrip(" ,")
.format(*columns),
placeholders=("?, " * len(columns)).rstrip(" ,"),
)
self._transfer_table_data(
table_name=table_name, sql=sql, total_records=total_records
)
except Exception: # pylint: disable=W0706
raise
finally:
# re-enable foreign key checking once done transferring
self._sqlite_cur.execute("PRAGMA foreign_keys=ON")
if self._vacuum:
self._logger.info(
"Vacuuming created SQLite database file.\nThis might take a while."
)
self._sqlite_cur.execute("VACUUM")
self._logger.info("Done!")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Material related operations
"""
import numpy as np
import mechkit
tensors = mechkit.tensors.Basic()
class SpecificMaterial(object):
def __init__(self):
self.con = mechkit.notation.VoigtConverter(silent=True)
def _copy_upper_triangle(self, matrix):
r"""Copy upper triangle to lower triangle, i.e. make symmetric"""
index_lower_triangle = np.tril_indices(6, -1)
matrix[index_lower_triangle] = matrix.T[index_lower_triangle]
return matrix
def _voigt_to_tensor(self, stiffness):
return self.con.to_tensor(
self.con.voigt_to_mandel6(stiffness, voigt_type="stiffness")
)
class AlignedStiffnessFactory(SpecificMaterial):
def __init__(self):
self.number_independent_param = {"hexagonal_axis1": 5}
self.func = {"hexagonal_axis1": self.hexagonal_axis1}
super().__init__()
def positiv_definit(self, label):
nbr_param = self.number_independent_param[label]
func = self.func[label]
eigen = np.array([-1])
while not all(eigen > 0):
C = func(*np.random.rand(nbr_param).tolist())
eigen = np.linalg.eig(C)[0]
return C
def hexagonal_axis1(self, C1111, C1122, C1133, C2222, C1313):
voigt_half = np.array(
[
[C1111, C1122, C1122, 0, 0, 0],
[0, C2222, C1133, 0, 0, 0],
[0, 0, C2222, 0, 0, 0],
[0, 0, 0, C2222 - C1133, 0, 0],
[0, 0, 0, 0, C1313, 0],
[0, 0, 0, 0, 0, C1313],
],
dtype="float64",
)
voigt = self._copy_upper_triangle(matrix=voigt_half)
return self.con.voigt_to_mandel6(voigt, voigt_type="stiffness")
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TimeDistributed wrapper."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
# isort: off
from tensorflow.python.training.tracking import (
util as trackable_util,
)
class TimeDistributedTest(test_combinations.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_timedistributed_dense(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)
)
)
model.compile(optimizer="rmsprop", loss="mse")
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10,
)
# test config
model.get_config()
# check whether the model variables are present in the
# trackable list of objects
checkpointed_object_ids = {
id(o) for o in trackable_util.list_objects(model)
}
for v in model.variables:
self.assertIn(id(v), checkpointed_object_ids)
def test_timedistributed_static_batch_size(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4), batch_size=10
)
)
model.compile(optimizer="rmsprop", loss="mse")
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10,
)
def test_timedistributed_invalid_init(self):
x = tf.constant(np.zeros((1, 1)).astype("float32"))
with self.assertRaisesRegex(
ValueError,
"Please initialize `TimeDistributed` layer with a "
"`tf.keras.layers.Layer` instance.",
):
keras.layers.TimeDistributed(x)
def test_timedistributed_conv2d(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Conv2D(5, (2, 2), padding="same"),
input_shape=(2, 4, 4, 3),
)
)
model.add(keras.layers.Activation("relu"))
model.compile(optimizer="rmsprop", loss="mse")
model.train_on_batch(
np.random.random((1, 2, 4, 4, 3)),
np.random.random((1, 2, 4, 4, 5)),
)
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_timedistributed_stacked(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)
)
)
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.add(keras.layers.Activation("relu"))
model.compile(optimizer="rmsprop", loss="mse")
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 3)),
epochs=1,
batch_size=10,
)
def test_regularizers(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(
2, kernel_regularizer="l1", activity_regularizer="l1"
),
input_shape=(3, 4),
)
)
model.add(keras.layers.Activation("relu"))
model.compile(optimizer="rmsprop", loss="mse")
self.assertEqual(len(model.losses), 2)
def test_TimeDistributed_learning_phase(self):
with self.cached_session():
# test layers that need learning_phase to be set
np.random.seed(1234)
x = keras.layers.Input(shape=(3, 2))
y = keras.layers.TimeDistributed(keras.layers.Dropout(0.999))(
x, training=True
)
model = keras.models.Model(x, y)
y = model.predict(np.random.random((10, 3, 2)))
self.assertAllClose(np.mean(y), 0.0, atol=1e-1, rtol=1e-1)
def test_TimeDistributed_batchnorm(self):
with self.cached_session():
# test that wrapped BN updates still work.
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.BatchNormalization(center=True, scale=True),
name="bn",
input_shape=(10, 2),
)
)
model.compile(optimizer="rmsprop", loss="mse")
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
self.assertAllClose(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model.train_on_batch(
np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)),
)
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
def test_TimeDistributed_trainable(self):
# test layers that need learning_phase to be set
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.TimeDistributed(keras.layers.BatchNormalization())
_ = layer(x)
self.assertEqual(len(layer.trainable_weights), 2)
layer.trainable = False
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.trainable_weights) == 2
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self):
with self.cached_session():
# test with unspecified shape and Embeddings with mask_zero
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Embedding(5, 6, mask_zero=True),
input_shape=(None, None),
)
) # N by t_1 by t_2 by 6
model.add(
keras.layers.TimeDistributed(
keras.layers.SimpleRNN(7, return_sequences=True)
)
)
model.add(
keras.layers.TimeDistributed(
keras.layers.SimpleRNN(8, return_sequences=False)
)
)
model.add(keras.layers.SimpleRNN(1, return_sequences=False))
model.compile(optimizer="rmsprop", loss="mse")
model_input = np.random.randint(
low=1, high=5, size=(10, 3, 4), dtype="int32"
)
for i in range(4):
model_input[i, i:, i:] = 0
model.fit(
model_input, np.random.random((10, 1)), epochs=1, batch_size=10
)
mask_outputs = [model.layers[0].compute_mask(model.input)]
for layer in model.layers[1:]:
mask_outputs.append(
layer.compute_mask(layer.input, mask_outputs[-1])
)
func = keras.backend.function([model.input], mask_outputs[:-1])
mask_outputs_val = func([model_input])
ref_mask_val_0 = model_input > 0 # embedding layer
ref_mask_val_1 = ref_mask_val_0 # first RNN layer
ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer
ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2]
for i in range(3):
self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i])
self.assertIs(mask_outputs[-1], None) # final layer
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_TimeDistributed_with_masking_layer(self):
# test with Masking layer
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Masking(
mask_value=0.0,
),
input_shape=(None, 4),
)
)
model.add(keras.layers.TimeDistributed(keras.layers.Dense(5)))
model.compile(optimizer="rmsprop", loss="mse")
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4))
for i in range(4):
model_input[i, i:, :] = 0.0
model.compile(optimizer="rmsprop", loss="mse")
model.fit(
model_input, np.random.random((10, 3, 5)), epochs=1, batch_size=6
)
mask_outputs = [model.layers[0].compute_mask(model.input)]
mask_outputs += [
model.layers[1].compute_mask(
model.layers[1].input, mask_outputs[-1]
)
]
func = keras.backend.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
self.assertEqual((mask_outputs_val[0]).all(), model_input.all())
self.assertEqual((mask_outputs_val[1]).all(), model_input.all())
def test_TimeDistributed_with_different_time_shapes(self):
time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5))
ph_1 = keras.backend.placeholder(shape=(None, 10, 13))
out_1 = time_dist(ph_1)
self.assertEqual(out_1.shape.as_list(), [None, 10, 5])
ph_2 = keras.backend.placeholder(shape=(None, 1, 13))
out_2 = time_dist(ph_2)
self.assertEqual(out_2.shape.as_list(), [None, 1, 5])
ph_3 = keras.backend.placeholder(shape=(None, 1, 18))
with self.assertRaisesRegex(ValueError, "is incompatible with"):
time_dist(ph_3)
def test_TimeDistributed_with_invalid_dimensions(self):
time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5))
ph = keras.backend.placeholder(shape=(None, 10))
with self.assertRaisesRegex(
ValueError,
"`TimeDistributed` Layer should be passed an `input_shape `",
):
time_dist(ph)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_TimeDistributed_reshape(self):
class NoReshapeLayer(keras.layers.Layer):
def call(self, inputs):
return inputs
# Built-in layers that aren't stateful use the reshape implementation.
td1 = keras.layers.TimeDistributed(keras.layers.Dense(5))
self.assertTrue(td1._always_use_reshape)
# Built-in layers that are stateful don't use the reshape
# implementation.
td2 = keras.layers.TimeDistributed(
keras.layers.RNN(keras.layers.SimpleRNNCell(10), stateful=True)
)
self.assertFalse(td2._always_use_reshape)
# Custom layers are not allowlisted for the fast reshape implementation.
td3 = keras.layers.TimeDistributed(NoReshapeLayer())
self.assertFalse(td3._always_use_reshape)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_TimeDistributed_output_shape_return_types(self):
class TestLayer(keras.layers.Layer):
def call(self, inputs):
return tf.concat([inputs, inputs], axis=-1)
def compute_output_shape(self, input_shape):
output_shape = tf.TensorShape(input_shape).as_list()
output_shape[-1] = output_shape[-1] * 2
output_shape = tf.TensorShape(output_shape)
return output_shape
class TestListLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super().compute_output_shape(input_shape)
return shape.as_list()
class TestTupleLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super().compute_output_shape(input_shape)
return tuple(shape.as_list())
# Layers can specify output shape as list/tuple/TensorShape
test_layers = [TestLayer, TestListLayer, TestTupleLayer]
for layer in test_layers:
input_layer = keras.layers.TimeDistributed(layer())
inputs = keras.backend.placeholder(shape=(None, 2, 4))
output = input_layer(inputs)
self.assertEqual(output.shape.as_list(), [None, 2, 8])
self.assertEqual(
input_layer.compute_output_shape([None, 2, 4]).as_list(),
[None, 2, 8],
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
# TODO(scottzhu): check why v1 session failed.
def test_TimeDistributed_with_mask_first_implementation(self):
np.random.seed(100)
rnn_layer = keras.layers.LSTM(4, return_sequences=True, stateful=True)
data = np.array(
[
[[[1.0], [1.0]], [[0.0], [1.0]]],
[[[1.0], [0.0]], [[1.0], [1.0]]],
[[[1.0], [0.0]], [[1.0], [1.0]]],
]
)
x = keras.layers.Input(shape=(2, 2, 1), batch_size=3)
x_masking = keras.layers.Masking()(x)
y = keras.layers.TimeDistributed(rnn_layer)(x_masking)
model_1 = keras.models.Model(x, y)
model_1.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
output_with_mask = model_1.predict(data, steps=1)
y = keras.layers.TimeDistributed(rnn_layer)(x)
model_2 = keras.models.Model(x, y)
model_2.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
output = model_2.predict(data, steps=1)
self.assertNotAllClose(output_with_mask, output, atol=1e-7)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer=[keras.layers.LSTM, keras.layers.Dense]
)
)
def test_TimeDistributed_with_ragged_input(self, layer):
if tf.executing_eagerly():
self.skipTest("b/143103634")
np.random.seed(100)
layer = layer(4)
ragged_data = tf.ragged.constant(
[
[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]], [[6.0], [6.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]], [[9.0], [9.0]]],
],
ragged_rank=1,
)
x_ragged = keras.Input(shape=(None, 2, 1), dtype="float32", ragged=True)
y_ragged = keras.layers.TimeDistributed(layer)(x_ragged)
model_1 = keras.models.Model(x_ragged, y_ragged)
model_1._run_eagerly = test_utils.should_run_eagerly()
output_ragged = model_1.predict(ragged_data, steps=1)
x_dense = keras.Input(shape=(None, 2, 1), dtype="float32")
masking = keras.layers.Masking()(x_dense)
y_dense = keras.layers.TimeDistributed(layer)(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
model_2._run_eagerly = test_utils.should_run_eagerly()
output_dense = model_2.predict(dense_data, steps=1)
output_ragged = convert_ragged_tensor_value(output_ragged)
self.assertAllEqual(output_ragged.to_tensor(), output_dense)
@test_combinations.run_all_keras_modes
def test_TimeDistributed_with_ragged_input_with_batch_size(self):
np.random.seed(100)
layer = keras.layers.Dense(16)
ragged_data = tf.ragged.constant(
[
[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]], [[6.0], [6.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]], [[9.0], [9.0]]],
],
ragged_rank=1,
)
# Use the first implementation by specifying batch_size
x_ragged = keras.Input(
shape=(None, 2, 1), batch_size=3, dtype="float32", ragged=True
)
y_ragged = keras.layers.TimeDistributed(layer)(x_ragged)
model_1 = keras.models.Model(x_ragged, y_ragged)
output_ragged = model_1.predict(ragged_data, steps=1)
x_dense = keras.Input(shape=(None, 2, 1), batch_size=3, dtype="float32")
masking = keras.layers.Masking()(x_dense)
y_dense = keras.layers.TimeDistributed(layer)(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
output_dense = model_2.predict(dense_data, steps=1)
output_ragged = convert_ragged_tensor_value(output_ragged)
self.assertAllEqual(output_ragged.to_tensor(), output_dense)
def test_TimeDistributed_set_static_shape(self):
layer = keras.layers.TimeDistributed(keras.layers.Conv2D(16, (3, 3)))
inputs = keras.Input(batch_shape=(1, None, 32, 32, 1))
outputs = layer(inputs)
# Make sure the batch dim is not lost after array_ops.reshape.
self.assertListEqual(outputs.shape.as_list(), [1, None, 30, 30, 16])
@test_combinations.run_all_keras_modes
def test_TimeDistributed_with_mimo(self):
dense_1 = keras.layers.Dense(8)
dense_2 = keras.layers.Dense(16)
class TestLayer(keras.layers.Layer):
def __init__(self):
super().__init__()
self.dense_1 = dense_1
self.dense_2 = dense_2
def call(self, inputs):
return self.dense_1(inputs[0]), self.dense_2(inputs[1])
def compute_output_shape(self, input_shape):
output_shape_1 = self.dense_1.compute_output_shape(
input_shape[0]
)
output_shape_2 = self.dense_2.compute_output_shape(
input_shape[1]
)
return output_shape_1, output_shape_2
np.random.seed(100)
layer = TestLayer()
data_1 = tf.constant(
[
[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]]],
]
)
data_2 = tf.constant(
[
[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]]],
]
)
x1 = keras.Input(shape=(None, 2, 1), dtype="float32")
x2 = keras.Input(shape=(None, 2, 1), dtype="float32")
y1, y2 = keras.layers.TimeDistributed(layer)([x1, x2])
model_1 = keras.models.Model([x1, x2], [y1, y2])
model_1.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
output_1 = model_1.predict((data_1, data_2), steps=1)
y1 = dense_1(x1)
y2 = dense_2(x2)
model_2 = keras.models.Model([x1, x2], [y1, y2])
output_2 = model_2.predict((data_1, data_2), steps=1)
self.assertAllClose(output_1, output_2)
model_1.fit(
x=[
np.random.random((10, 2, 2, 1)),
np.random.random((10, 2, 2, 1)),
],
y=[
np.random.random((10, 2, 2, 8)),
np.random.random((10, 2, 2, 16)),
],
epochs=1,
batch_size=3,
)
def test_TimeDistributed_Attention(self):
query_input = keras.layers.Input(shape=(None, 1, 10), dtype="float32")
value_input = keras.layers.Input(shape=(None, 4, 10), dtype="float32")
# Query-value attention of shape [batch_size, Tq, filters].
query_value_attention_seq = keras.layers.TimeDistributed(
keras.layers.Attention()
)([query_input, value_input])
model = keras.models.Model(
[query_input, value_input], query_value_attention_seq
)
model.compile(optimizer="rmsprop", loss="mse")
model.fit(
[
np.random.random((10, 8, 1, 10)),
np.random.random((10, 8, 4, 10)),
],
np.random.random((10, 8, 1, 10)),
epochs=1,
batch_size=10,
)
# test config and serialization/deserialization
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def convert_ragged_tensor_value(inputs):
if isinstance(inputs, tf.compat.v1.ragged.RaggedTensorValue):
flat_values = tf.convert_to_tensor(
value=inputs.flat_values, name="flat_values"
)
return tf.RaggedTensor.from_nested_row_splits(
flat_values, inputs.nested_row_splits, validate=False
)
return inputs
if __name__ == "__main__":
tf.test.main()
|
import tensorflow as tf
class Blur:
"""
Base class for Blur objects.
"""
def __init__(self, size=1):
"""
Initialize a Blur object with a specified kernel size.
https://en.wikipedia.org/wiki/Kernel_(image_processing)
Note: blurring is most efficient when size is odd.
Args:
size: pixel height and width of a square kernel
"""
self.size = max(size, 1)
self.kernel = self.create_kernel(size=self.size)
def create_kernel(self, size=1):
"""
Create kernel to apply blurring. Must be implemented by child class
Args:
size: pixel height and width of a square kernel
Returns:
kernel of shape [filter_height, filter_width, in_channels, channel_multiplier]
"""
raise NotImplementedError
def apply(self, img):
"""
Apply blurring to an image or list of images
Args:
img: an image or list of images. Format is [height, width, channels]
or [number of images, height, width, channels]. Note: all images must be
of the same dimension.
Returns:
processed image of the same dimensions as the input
"""
img_dim = img.shape
img = self.format_input(img)
img = tf.nn.depthwise_conv2d(img, self.kernel, strides=[1, 1, 1, 1], padding="SAME")
img = self.format_output(img, img_dim)
return img
@staticmethod
def format_input(img):
"""
Format an input prior to blurring. Single images are reshaped to
contain a batch dimension. Output is shape: [number of images, height, width, channels]
Args:
img: image or list of images
Returns:
input image(s) with shape = [number of images, height, width, channels]
"""
if len(tf.shape(img)) != 3 and len(tf.shape(img)) != 4:
num_dims = len(tf.shape(img))
img_shape = tf.shape(img).numpy()
raise ValueError('Input image must have shape '
'[ batch height width channels ] or [ height width channels ]. '
f'Current input has {num_dims} dimensions: {img_shape}')
elif len(tf.shape(img)) == 3:
img = tf.expand_dims(img, 0)
img = tf.cast(img, tf.float32)
return img
@staticmethod
def format_output(img, img_dim):
"""
Format the output after blurring to have the same shape as the intial input.
Args:
img: output image or list of images
img_dim: initial dimensions of image
Returns:
reshaped output image(s)
"""
img = tf.reshape(img, img_dim)
return img
|
i = 4
d = 4.0
s = 'HackerRank '
# Declare second integer, double, and String variables.
# For declaring we need to give some value to the variables
# Read and save an integer, double, and String to your variables.
inte=int(input())
do=float(input())
st=input()
# Print the sum of both integer variables on a new line.
print(i+inte)
# Print the sum of the double variables on a new line.
print(d+do)
# Concatenate and print the String variables on a new line
# The 's' variable above should be printed first.
print(s+st)
|
# _*_ coding: utf-8 _*_
__author__ = 'nick'
__date__ = '2019/2/20 1:06'
import xadmin
from xadmin import views
from .models import EmailVerifyRecord, ViewPage, UserProfiles
class BaseSetting:
# 更改后台的主题样式
enable_themes = True
use_bootswatch = True
class GlobalSettings:
# 更改后台的标题和页脚显示
site_title = u"点点后台管理系统"
site_footer = u"点点在线网"
# 应用模型在后台页面导航栏显示为下拉样式
menu_style = "accordion"
# 此处不要继承任何类。默认继承 python 的 object
class EmailVerifyRecordAdmin:
# 用于展示邮件验证码这张表的信息
# 按 email, code, send_type, send_time 的列方式显式
list_display = ['email', 'code', 'send_type', 'send_time']
# 通过设定 search_fields 字段来实现对数据表的「查」操作
search_fields = ['email', 'code', 'send_type']
# list_filter 实现筛选功能
list_filter = ['email', 'code', 'send_type', 'send_time']
class ViewPageAdmin:
list_display = ['index', 'title', 'image', 'url', 'add_time']
search_fields = ['index', 'title', 'image', 'url']
list_filter = ['index', 'title', 'image', 'url', 'add_time']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(ViewPage, ViewPageAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSettings)
|
import mock
from zeep import exceptions
def get_side_effect(
returned_token="RETURNED_TOKEN",
returned_status=100,
raise_zeep_fault=False,
raise_zeep_error=False,
):
# noinspection PyPep8Naming
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
class ClientService:
class PaymentRequest:
Status = returned_status
Authority = returned_token
def __init__(self, *args, **kwargs):
if raise_zeep_fault:
raise exceptions.Fault("FAKE ZEEP FAULT")
if raise_zeep_error:
raise exceptions.Error("FAKE ZEEP ERROR")
class PaymentVerification:
Status = returned_status
def __init__(self, *args, **kwargs):
if raise_zeep_error:
raise exceptions.Error("FAKE ZEEP ERROR")
class Client:
service = ClientService()
transport = mock.MagicMock()
def __init__(self, *args, **kwargs):
pass
def side_effect(*args, **kwargs):
return Client(*args, **kwargs)
return side_effect
|
class TeampassHttpException(Exception):
def __init__(self, http_code, msg=None):
self.msg = msg
self.http_code = http_code
def __str__(self):
return u"HTTP CODE: {}, Error: {}".format(self.http_code, self.msg)
def __unicode__(self):
return u"HTTP CODE: {}, Error: {}".format(self.http_code, self.msg)
class TeampassApiException(Exception):
def __init__(self, msg=None):
self.msg = msg
def __str__(self):
return u"Error: {}".format(self.msg)
def __unicode__(self):
return u"Error: {}".format(self.msg)
|
"""
Tests for cogdb.eddn
"""
import datetime
import pathlib
import shutil
import tempfile
try:
import rapidjson as json
except ImportError:
import json
import cogdb.eddn
import cogdb.schema
from cogdb.schema import TrackByID
from cogdb.eddb import FactionActiveState
EXAMPLE_JOURNAL_CARRIER = """{
"$schemaRef": "https://eddn.edcd.io/schemas/journal/1",
"header": {
"gatewayTimestamp": "2020-08-03T11:03:25.661784Z",
"softwareName": "E:D Market Connector [Windows]",
"softwareVersion": "3.4.6.0",
"uploaderID": "337ea068329694dde54f7b868cd6bc48e1622753"
},
"message": {
"Body": "GCRV 1568 A",
"BodyID": 2,
"BodyType": "Star",
"Conflicts": [
{
"Faction1": {
"Name": "Future of Lan Gundi",
"Stake": "Gagnan Hub",
"WonDays": 0
},
"Faction2": {
"Name": "Silver Bridge PLC",
"Stake": "",
"WonDays": 0
},
"Status": "active",
"WarType": "war"
}
],
"Docked": true,
"Factions": [
{
"ActiveStates": [
{
"State": "CivilLiberty"
}
],
"Allegiance": "Federation",
"FactionState": "CivilLiberty",
"Government": "Democracy",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.086,
"Name": "Independents of GCRV 1568"
},
{
"ActiveStates": [
{
"State": "War"
}
],
"Allegiance": "Federation",
"FactionState": "War",
"Government": "Democracy",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.121,
"Name": "Future of Lan Gundi"
},
{
"ActiveStates": [
{
"State": "Boom"
},
{
"State": "War"
}
],
"Allegiance": "Federation",
"FactionState": "War",
"Government": "Corporate",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.121,
"Name": "Silver Bridge PLC"
},
{
"Allegiance": "Independent",
"FactionState": "None",
"Government": "Corporate",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.05,
"Name": "GCRV 1568 Incorporated"
},
{
"Allegiance": "Independent",
"FactionState": "None",
"Government": "Dictatorship",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.055,
"Name": "GCRV 1568 Focus"
},
{
"Allegiance": "Independent",
"FactionState": "None",
"Government": "Corporate",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.065,
"Name": "GCRV 1568 Natural Interstellar"
},
{
"Allegiance": "Independent",
"FactionState": "None",
"Government": "Dictatorship",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.054,
"Name": "GCRV 1568 Law Party"
},
{
"ActiveStates": [
{
"State": "Boom"
}
],
"Allegiance": "Independent",
"FactionState": "Boom",
"Government": "Cooperative",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.448,
"Name": "Aseveljet",
"RecoveringStates": [
{
"State": "PirateAttack",
"Trend": 0
}
]
}
],
"MarketID": 3700062976,
"Population": 377684748,
"PowerplayState": "Exploited",
"Powers": [
"Li Yong-Rui"
],
"StarPos": [
-33.90625,
-63,
-82.875
],
"StarSystem": "GCRV 1568",
"StationEconomies": [
{
"Name": "$economy_Carrier;",
"Proportion": 1
}
],
"StationEconomy": "$economy_Carrier;",
"StationFaction": {
"Name": "FleetCarrier"
},
"StationGovernment": "$government_Carrier;",
"StationName": "H8X-0VZ",
"StationServices": [
"dock",
"autodock",
"blackmarket",
"commodities",
"contacts",
"exploration",
"outfitting",
"crewlounge",
"rearm",
"refuel",
"repair",
"shipyard",
"engineer",
"flightcontroller",
"stationoperations",
"stationMenu",
"carriermanagement",
"carrierfuel",
"voucherredemption"
],
"StationType": "FleetCarrier",
"SystemAddress": 2862335641955,
"SystemAllegiance": "Independent",
"SystemEconomy": "$economy_Agri;",
"SystemFaction": {
"FactionState": "Boom",
"Name": "Aseveljet"
},
"SystemGovernment": "$government_Cooperative;",
"SystemSecondEconomy": "$economy_Industrial;",
"SystemSecurity": "$SYSTEM_SECURITY_high;",
"event": "Location",
"timestamp": "2020-08-03T11:03:24Z"
}
}
"""
EXAMPLE_JOURNAL_STATION = """{
"$schemaRef": "https://eddn.edcd.io/schemas/journal/1",
"header": {
"gatewayTimestamp": "2020-08-03T11:04:12.802484Z",
"softwareName": "E:D Market Connector [Windows]",
"softwareVersion": "4.0.4",
"uploaderID": "e0dcd76cabca63a40bb58e97a5d98ce2efe0be10"
},
"message": {
"Body": "Mattingly Port",
"BodyID": 65,
"BodyType": "Station",
"Conflicts": [
{
"Faction1": {
"Name": "Udegobo Silver Power Int",
"Stake": "Haarsma Keep",
"WonDays": 1
},
"Faction2": {
"Name": "Revolutionary Mpalans Confederation",
"Stake": "",
"WonDays": 0
},
"Status": "active",
"WarType": "war"
}
],
"Docked": true,
"Factions": [
{
"Allegiance": "Federation",
"FactionState": "None",
"Government": "Corporate",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.102386,
"Name": "Ochosag Federal Company"
},
{
"ActiveStates": [
{
"State": "Boom"
}
],
"Allegiance": "Federation",
"FactionState": "Boom",
"Government": "Democracy",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.643141,
"Name": "Social Ahemakino Green Party"
},
{
"ActiveStates": [
{
"State": "War"
}
],
"Allegiance": "Federation",
"FactionState": "War",
"Government": "Corporate",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.078529,
"Name": "Udegobo Silver Power Int"
},
{
"Allegiance": "Independent",
"FactionState": "None",
"Government": "Dictatorship",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.014911,
"Name": "Defence Party of Ahemakino"
},
{
"ActiveStates": [
{
"State": "War"
}
],
"Allegiance": "Federation",
"FactionState": "War",
"Government": "Confederacy",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.078529,
"Name": "Revolutionary Mpalans Confederation"
},
{
"Allegiance": "Independent",
"FactionState": "None",
"Government": "Corporate",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.037773,
"Name": "Ahemakino Bridge Organisation"
},
{
"Allegiance": "Independent",
"FactionState": "None",
"Government": "Dictatorship",
"Happiness": "$Faction_HappinessBand2;",
"Influence": 0.044732,
"Name": "Natural Ahemakino Defence Party"
}
],
"MarketID": 3229716992,
"Population": 9165120,
"PowerplayState": "Controlled",
"Powers": [
"Felicia Winters"
],
"StarPos": [
123.25,
-3.21875,
-97.4375
],
"StarSystem": "Ahemakino",
"StationAllegiance": "Federation",
"StationEconomies": [
{
"Name": "$economy_Industrial;",
"Proportion": 0.8
},
{
"Name": "$economy_Refinery;",
"Proportion": 0.2
}
],
"StationEconomy": "$economy_Industrial;",
"StationFaction": {
"FactionState": "Boom",
"Name": "Social Ahemakino Green Party"
},
"StationGovernment": "$government_Democracy;",
"StationName": "Mattingly Port",
"StationServices": [
"dock",
"autodock",
"blackmarket",
"commodities",
"contacts",
"exploration",
"missions",
"outfitting",
"crewlounge",
"rearm",
"refuel",
"repair",
"shipyard",
"tuning",
"engineer",
"missionsgenerated",
"flightcontroller",
"stationoperations",
"powerplay",
"searchrescue",
"materialtrader",
"stationMenu",
"shop"
],
"StationType": "Coriolis",
"SystemAddress": 6131367809730,
"SystemAllegiance": "Federation",
"SystemEconomy": "$economy_Industrial;",
"SystemFaction": {
"FactionState": "Boom",
"Name": "Social Ahemakino Green Party"
},
"SystemGovernment": "$government_Democracy;",
"SystemSecondEconomy": "$economy_Refinery;",
"SystemSecurity": "$SYSTEM_SECURITY_high;",
"event": "Location",
"timestamp": "2020-08-03T11:04:11Z"
}
}
"""
EXAMPLE_CARRIER_DISC = """{
"$schemaRef": "https://eddn.edcd.io/schemas/journal/1",
"header": {
"gatewayTimestamp": "2021-05-18T00:34:39.006381Z",
"softwareName": "EDDiscovery",
"softwareVersion": "12.0.2.0",
"uploaderID": "e2e46eabd77f4eea0f8cd655183b4d980fb08338"
},
"message": {
"Body": "Cha Eohm XN-X a69-1",
"BodyID": 0,
"BodyType": "Star",
"Docked": true,
"MarketID": 3703705600,
"Population": 0,
"StarPos": [
-9207.15625,
-39.9375,
58557.125
],
"StarSystem": "Nanomam",
"StationEconomies": [
{
"Name": "$economy_Carrier;",
"Proportion": 1.0
}
],
"StationEconomy": "$economy_Carrier;",
"StationFaction": {
"Name": "FleetCarrier"
},
"StationGovernment": "$government_Carrier;",
"StationName": "KLG-9TL",
"StationServices": [
"dock",
"autodock",
"commodities",
"contacts",
"exploration",
"outfitting",
"crewlounge",
"rearm",
"refuel",
"repair",
"shipyard",
"engineer",
"flightcontroller",
"stationoperations",
"stationMenu",
"carriermanagement",
"carrierfuel"
],
"StationType": "FleetCarrier",
"SystemAddress": 21970368135760,
"SystemAllegiance": "",
"SystemEconomy": "$economy_None;",
"SystemGovernment": "$government_None;",
"SystemSecondEconomy": "$economy_None;",
"SystemSecurity": "$GAlAXY_MAP_INFO_state_anarchy;",
"event": "Location",
"timestamp": "2021-05-20T19:03:20.11111Z"
}
}"""
EXAMPLE_CARRIER_EDMC = """{
"$schemaRef": "https://eddn.edcd.io/schemas/journal/1",
"header": {
"gatewayTimestamp": "2021-05-18T00:34:42.526845Z",
"softwareName": "E:D Market Connector [Windows]",
"softwareVersion": "5.0.1",
"uploaderID": "70787c46bbd4497e1af3c5f04609be60f09d0835"
},
"message": {
"Body": "Prua Phoe EQ-Z b45-7 A",
"BodyID": 1,
"BodyType": "Star",
"Docked": true,
"MarketID": 3701618176,
"Population": 0,
"StarPos": [
-5497.5625,
-462.3125,
11445.25
],
"StarSystem": "Nanomam",
"StationEconomies": [
{
"Name": "$economy_Carrier;",
"Proportion": 1.0
}
],
"StationEconomy": "$economy_Carrier;",
"StationFaction": {
"Name": "FleetCarrier"
},
"StationGovernment": "$government_Carrier;",
"StationName": "OVE-111",
"StationServices": [
"dock",
"autodock",
"blackmarket",
"commodities",
"contacts",
"exploration",
"outfitting",
"crewlounge",
"rearm",
"refuel",
"repair",
"shipyard",
"engineer",
"flightcontroller",
"stationoperations",
"stationMenu",
"carriermanagement",
"carrierfuel",
"voucherredemption"
],
"StationType": "FleetCarrier",
"SystemAddress": 15990296033161,
"SystemAllegiance": "",
"SystemEconomy": "$economy_None;",
"SystemGovernment": "$government_None;",
"SystemSecondEconomy": "$economy_None;",
"SystemSecurity": "$GAlAXY_MAP_INFO_state_anarchy;",
"event": "Location",
"odyssey": false,
"timestamp": "2021-05-20T19:03:20.11111Z"
}
}"""
def test_create_id_maps(eddb_session):
maps = cogdb.eddn.create_id_maps(eddb_session)
assert 'Thargoid' in maps['Allegiance']
def test_edmcjournal_header():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
assert parser.header["softwareName"] == "E:D Market Connector [Windows]"
def test_edmcjournal_body():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
assert parser.body["BodyID"] == 65
def test_edmcjournal_date_obj():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
assert parser.date_obj == datetime.datetime(2020, 8, 3, 11, 4, 11, tzinfo=datetime.timezone.utc)
def test_edmcjournal_timestamp():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
assert parser.timestamp == 1596452651
def test_edmcjournal_system_is_useful():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
assert not parser.system_is_useful
parser.parse_msg()
assert parser.system_is_useful
def test_edmcjournal_parse_msg_journal():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
result = parser.parse_msg()
assert result['system']
assert result['station']
assert result['factions']
assert result['influences']
assert result['conflicts']
def test_edmcjournal_parse_msg_carrier():
msg = json.loads(EXAMPLE_CARRIER_EDMC)
parser = cogdb.eddn.create_parser(msg)
result = parser.parse_msg()
parser.parse_system()
parser.parse_and_flush_carrier()
assert result['system']
assert result['carriers']
assert result['station']
assert not result.get('factions')
assert not result.get('influences')
assert not result.get('conflicts')
def test_edmcjournal_update_database():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
parser.parse_msg()
parser.update_database()
result = parser.parsed
# Since updating EDDB, these already exist in db so just test valid IDs were set.
assert result['system']['id'] == 569
assert result['station']['id'] == 35712
assert result['factions']['Ahemakino Bridge Organisation']['id'] == 55927
assert result['influences'][0]['faction_id'] == 26800
assert result['influences'][0]['system_id'] == 569
assert result['conflicts'][0]['faction1_id'] == 68340
def test_edmcjournal_parse_system():
expected = {
'controlling_minor_faction_id': 55925,
'id': 569,
'name': 'Ahemakino',
'population': 9165120,
'power_id': 6,
'power_state_id': 16,
'primary_economy_id': 4,
'secondary_economy_id': 6,
'security_id': 48,
'updated_at': 1596452651,
'x': 123.25,
'y': -3.21875,
'z': -97.4375
}
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
result = parser.parse_system()
assert result == expected
def test_edmcjournal_flush_system_to_db():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
parser.parse_system()
# TODO: Atm this implicit in parse_system, potentially separate.
parser.flush_system_to_db()
assert parser.flushed
def test_edmcjournal_parse_and_flush_carrier_edmc_id(session, f_track_testbed):
msg = json.loads(EXAMPLE_CARRIER_EDMC)
parser = cogdb.eddn.create_parser(msg)
parser.parsed['system'] = {
"name": "Rana",
"updated_at": "2021-05-20T19:03:20.11111Z",
}
result = parser.parse_and_flush_carrier()
id = 'OVE-111'
expected = {
id: {
'id': id,
'system': 'Rana',
'updated_at': parser.date_obj.replace(tzinfo=None),
}
}
assert result == expected
session.commit()
tracked = session.query(TrackByID).filter(TrackByID.id == id).one()
assert tracked.system == "Rana"
parser.session.rollback()
parser.eddb_session.rollback()
def test_edmcjournal_parse_and_flush_carrier_disc_system(session, f_track_testbed):
msg = json.loads(EXAMPLE_CARRIER_DISC)
parser = cogdb.eddn.create_parser(msg)
parser.parsed['system'] = {
"name": "Nanomam",
"updated_at": "2021-05-20 19:03:20",
}
result = parser.parse_and_flush_carrier()
id = 'KLG-9TL'
expected = {
id: {
'id': 'KLG-9TL',
'override': False,
'system': 'Nanomam',
'updated_at': parser.date_obj.replace(tzinfo=None),
}
}
assert result == expected
session.commit()
tracked = session.query(TrackByID).filter(TrackByID.id == id).one()
assert tracked.system == "Nanomam"
parser.session.rollback()
parser.eddb_session.rollback()
def test_edmcjournal_parse_station():
expected = {
'controlling_minor_faction_id': 55925,
'economies': [{'economy_id': 4, 'primary': True, 'proportion': 0.8},
{'economy_id': 6, 'primary': False, 'proportion': 0.2}],
'features': {'blackmarket': True,
'commodities': True,
'dock': True,
'market': False,
'outfitting': True,
'rearm': True,
'refuel': True,
'repair': True,
'shipyard': True,
'update': False},
'name': 'Mattingly Port',
'system_id': 569,
'type_id': 3,
'updated_at': 1596452651
}
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
parser.parse_system()
result = parser.parse_station()
assert result == expected
def test_edmcjournal_flush_station_to_db():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
parser.parse_system()
result = parser.parse_station()
assert result
assert len(parser.flushed) == 1
parser.flush_station_to_db()
assert parser.flushed[1].name == "Mattingly Port"
def test_edmcjournal_parse_factions():
expect = ({
'Ahemakino Bridge Organisation': {'allegiance_id': 4,
'government_id': 64,
'id': 55927,
'name': 'Ahemakino Bridge Organisation',
'state_id': 80,
'updated_at': 1596452651},
'Defence Party of Ahemakino': {'allegiance_id': 4,
'government_id': 112,
'id': 55926,
'name': 'Defence Party of Ahemakino',
'state_id': 80,
'updated_at': 1596452651},
'Natural Ahemakino Defence Party': {'allegiance_id': 4,
'government_id': 112,
'id': 55928,
'name': 'Natural Ahemakino Defence Party',
'state_id': 80,
'updated_at': 1596452651},
'Ochosag Federal Company': {'allegiance_id': 3,
'government_id': 64,
'id': 26800,
'name': 'Ochosag Federal Company',
'state_id': 80,
'updated_at': 1596452651},
'Revolutionary Mpalans Confederation': {'active_states': [FactionActiveState(system_id=569, faction_id=58194, state_id=73)],
'allegiance_id': 3,
'government_id': 48,
'id': 58194,
'name': 'Revolutionary Mpalans '
'Confederation',
'state_id': 73,
'updated_at': 1596452651},
'Social Ahemakino Green Party': {'active_states': [FactionActiveState(system_id=569, faction_id=55925, state_id=16)],
'allegiance_id': 3,
'government_id': 96,
'id': 55925,
'name': 'Social Ahemakino Green Party',
'state_id': 16,
'updated_at': 1596452651},
'Udegobo Silver Power Int': {'active_states': [FactionActiveState(system_id=569, faction_id=68340, state_id=73)],
'allegiance_id': 3,
'government_id': 64,
'id': 68340,
'name': 'Udegobo Silver Power Int',
'state_id': 73,
'updated_at': 1596452651}
},
[
{'faction_id': 26800,
'happiness_id': 2,
'influence': 0.102386,
'is_controlling_faction': False,
'system_id': 569,
'updated_at': 1596452651},
{'faction_id': 55925,
'happiness_id': 2,
'influence': 0.643141,
'is_controlling_faction': True,
'system_id': 569,
'updated_at': 1596452651},
{'faction_id': 68340,
'happiness_id': 2,
'influence': 0.078529,
'is_controlling_faction': False,
'system_id': 569,
'updated_at': 1596452651},
{'faction_id': 55926,
'happiness_id': 2,
'influence': 0.014911,
'is_controlling_faction': False,
'system_id': 569,
'updated_at': 1596452651},
{'faction_id': 58194,
'happiness_id': 2,
'influence': 0.078529,
'is_controlling_faction': False,
'system_id': 569,
'updated_at': 1596452651},
{'faction_id': 55927,
'happiness_id': 2,
'influence': 0.037773,
'is_controlling_faction': False,
'system_id': 569,
'updated_at': 1596452651},
{'faction_id': 55928,
'happiness_id': 2,
'influence': 0.044732,
'is_controlling_faction': False,
'system_id': 569,
'updated_at': 1596452651}
])
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
parser.parse_system()
parser.parse_station()
result = parser.parse_factions()
assert result == expect
def test_edmcjournal_flush_factions_to_db():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
parser.parse_system()
parser.parse_station()
result = parser.parse_factions()
assert result
parser.flush_factions_to_db()
assert parser.flushed[1].name == "Ochosag Federal Company"
def test_edmcjournal_flush_influences_to_db():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
parser.parse_system()
parser.parse_station()
result = parser.parse_factions()
assert result
parser.flush_influences_to_db()
assert parser.flushed[2].faction_id == 55925
assert parser.flushed[2].is_controlling_faction
assert parser.flushed[2].happiness_id == 2
def test_edmcjournal_parse_conflicts():
expect = [{
'faction1_days': 1,
'faction1_id': 68340,
'faction1_stake_id': 59829,
'faction2_days': 0,
'faction2_id': 58194,
'faction2_stake_id': None,
'status_id': 2,
'system_id': 569,
'type_id': 6,
'updated_at': 1596452651
}]
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
parser.parse_system()
parser.parse_station()
parser.parse_factions()
result = parser.parse_conflicts()
assert result == expect
def test_edmcjournal_flush_conflicts_to_db():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
parser = cogdb.eddn.create_parser(msg)
parser.parse_system()
parser.parse_station()
parser.parse_factions()
result = parser.parse_conflicts()
assert result
parser.flush_conflicts_to_db()
assert parser.flushed[1].faction1_id == 68340
assert parser.flushed[1].faction2_id == 58194
def test_log_fname():
msg = json.loads(EXAMPLE_JOURNAL_STATION)
expect = "journal_1_2020_08_03T11_04_11Z_E_D_Market_Connector_Windows_"
assert cogdb.eddn.log_fname(msg) == expect
def test_log_msg():
try:
msg = json.loads(EXAMPLE_JOURNAL_STATION)
t_dir = tempfile.mkdtemp()
cogdb.eddn.log_msg(msg, path=t_dir, fname='test.txt')
pat = pathlib.Path(t_dir)
assert list(pat.glob('test.*'))
finally:
shutil.rmtree(t_dir)
|
import contextlib
import json
import mimetypes
import os
import subprocess
import sys
import threading
import time
import traceback
from binaryornot.check import is_binary
from collections import deque
from datetime import datetime, timedelta
from pathlib import Path
from textwrap import dedent
from .server import Server, file_hash, timestamp
class Job:
HEARTBEAT_INTERVAL = timedelta(seconds=60)
def __init__(self, host: str, job_id: int):
self.job_id = job_id
self.server = Server(host)
self.root = Path(str(job_id)).resolve()
self.root.mkdir(parents=True, exist_ok=True)
self.log_file = self.root / f'{self.job_id}.log'
self.done = threading.Event()
self.beat = threading.Thread(target=self.heartbeat)
self.beat.start()
def heartbeat(self):
def send():
try:
if self.log_file.is_file:
tail = ''.join(deque(open(self.log_file), 100))
else:
tail = ''
self.server.post(f'/executor_api/jobs/{self.job_id}/heartbeat',
json=dict(log_tail=tail))
except:
print(traceback.format_exc())
chkpt = None
while not self.done.is_set():
time.sleep(0.5)
if chkpt is None or datetime.utcnow() >= chkpt:
send()
chkpt = datetime.utcnow() + self.HEARTBEAT_INTERVAL
def download(self):
with open(self.log_file, 'at', buffering=1) as log_file:
with contextlib.redirect_stdout(log_file):
self.job = self.server.get(f'/executor_api/jobs/{self.job_id}')
job_files = self.server.get(f'/executor_api/jobs/{self.job_id}/files')
job_packages = self.server.get(f'/executor_api/jobs/{self.job_id}/packages')
(self.root / 'in').mkdir(parents=True, exist_ok=True)
(self.root / 'in' / 'params.json').write_text(
json.dumps(
{m['name'] : m['value'] for m in self.job['fields']},
ensure_ascii=False))
files = {self.root / f['name']: (self.root, f)
for f in job_files if Path(f['name']).parts[0] != 'out'}
for package in job_packages:
path = self.root / 'in' / str(package['id'])
path.mkdir(parents=True, exist_ok=True)
(path / 'label').write_text(package['label'])
(path / 'fields.json').write_text(
json.dumps(
{m['name'] : m['value'] for m in package['fields']},
ensure_ascii=False))
for p, f in files.values():
self.server.download(f, folder=p)
print(f'[{timestamp()}] Job inputs downloaded')
def execute(self):
env = os.environ.copy()
env.pop('RNDFLOW_REFRESH_TOKEN')
base_url = os.environ.get('JUPYTER_BASE_URL')
if self.job.get('is_interactive') and base_url:
script = f"$jupyter_interactive --allow-root --no-browser --ip='*' --NotebookApp.base_url={base_url} --NotebookApp.token=''"
else:
script = self.job['node']['script'] or "echo 'Empty script: nothing to do :('\nexit 1"
script_wrapper = dedent(f"""\
if ! command -v ts; then
echo "ts is not installed in the container!" > {self.job_id}.log
exit 1
fi
if ! command -v tee; then
echo "tee is not installed in the container!" > {self.job_id}.log
exit 1
fi
if command -v jupyter-lab; then
jupyter_interactive=jupyter-lab
else
jupyter_interactive=jupyter-notebook
fi
(
{script}
) 2>&1 | ts "[%Y-%m-%d %H:%M:%S]" | tee -a {self.job_id}.log
rc=${{PIPESTATUS[0]}}
exit $rc
""")
p = subprocess.run(script_wrapper, cwd=self.root, shell=True, executable="/bin/bash")
self.status = p.returncode
def upload(self):
with open(self.log_file, 'at', buffering=1) as log_file:
with contextlib.redirect_stdout(log_file):
print(f'[{timestamp()}] Uploading job output to server...')
exclude_dirs = ('in', '__pycache__', '.ipynb_checkpoints')
def enumerate_files():
for dir, dirs, files in os.walk(self.root):
path = Path(dir)
dirs[:] = [d for d in dirs
if (path / d).relative_to(self.root).parts[0] not in exclude_dirs]
for f in files:
yield path / f
def upload_files(paths):
p2h = {Path(path) : file_hash(path) for path in paths}
h2p = {h : p for p,h in p2h.items()}
links = self.server.post(f'/executor_api/jobs/{self.job_id}/upload_objects',
json={ 'objects': list(h2p.keys()) })
for item in links:
path = h2p[item['object_id']]
link = item['link']
binary = is_binary(str(path))
type,_ = mimetypes.guess_type(str(path))
if type is None:
type = 'application/x-binary' if binary else 'text/plain'
if link is not None:
with open(path, 'rb') as f:
self.server.raw_session.put(link, data=f, headers={
'Content-Type': type,
'Content-Length': str(path.stat().st_size)
}).raise_for_status()
files = []
for path,h in p2h.items():
binary = is_binary(str(path))
type,_ = mimetypes.guess_type(str(path))
if type is None:
type = 'application/x-binary' if binary else 'text/plain'
files.append(dict(
name = str(path.relative_to(self.root)),
type = type,
content_hash = h,
is_executable = os.access(path, os.X_OK),
is_binary = binary,
size = path.stat().st_size
))
return files
self.done.set()
self.beat.join()
self.server.put(f'/executor_api/jobs/{self.job_id}', json={
'status': str(self.status),
'files': upload_files(enumerate_files())
})
def __enter__(self):
self.download()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.upload()
#---------------------------------------------------------------------------
def main():
import argparse
from getpass import getpass
parser = argparse.ArgumentParser()
parser.add_argument('--host', dest='host', required=True)
parser.add_argument('--job', dest='job', required=True, type=int)
args = parser.parse_args()
if 'RNDFLOW_REFRESH_TOKEN' not in os.environ:
raise Exception('Access token not found in environment')
with Job(args.host, args.job) as job:
job.execute()
|
#!/usr/bin/env python
from __future__ import division
import pdb
import numpy as np
from scipy.optimize import fsolve
from geometry import Geometry, Strip, Panel
import plotting
import matplotlib.pyplot as plt
from math import sin, cos, asin, exp, pi, atan, tan, log
from numpy import sqrt
class AeroModel(object):
'''Class for the forces on an aerodynamic geometry'''
def __init__(self, altitude=20, M=8, alpha=0.0, dynamic_pressure=None):
## Sea level properties
self.T0 = 288.15 # Temperature in
self.p0 = 101325 # pressure in Pascals
self.rho0 = 1.225 # density in kg/m^2
self.a0 = 340.3 # speed of sound in m/s
self.g0 = 9.80665 # acceleration due to gravity m/s^2
self.rE = 6378137 # Earth's mean radius at the equator
self.R = 287.058
self.gamma = 1.4
## Orientation of lift and drag vectors
alpha = alpha*np.pi/180.
Rotmat = np.array([ [np.cos(-1*alpha), 0, np.sin(-1*alpha)],
[0, 1, 0],
[-1*np.sin(-1*alpha), 0, np.cos(-1*alpha)]])
self.drag_nvec = Rotmat.dot(np.array([1, 0, 0]))
self.lift_nvec = Rotmat.dot(np.array([0, 0, 1]))
self.fs_nvec = -1*self.drag_nvec
assert(abs(np.dot(self.drag_nvec, self.lift_nvec)) < 1e-6)
if dynamic_pressure is not None:
def fun(h):
p, T, rho = self.getAtmosphericProperties(h)
a = np.sqrt(self.gamma*self.R*T)
q = 0.5*rho*(M*a)**2
return q - dynamic_pressure
altitude = fsolve(fun, x0=altitude)
## Free stream properties
self.p_fs, self.T_fs, self.rho_fs = \
self.getAtmosphericProperties(altitude)
self.M_fs = M
self.a_fs = sqrt(self.gamma*self.R*self.T_fs)
self.dynamic_pressure = 0.5*self.rho_fs*(self.M_fs*self.a_fs)**2
def analyze_geometries(self, geoms):
try:
iter(geoms)
except TypeError:
geoms = [geoms]
Lift = 0
Drag = 0
for geom in geoms:
L, D = self.analyze_geometry(geom)
Lift += L
Drag += D
print 'Lift: ', L, ' Drag: ', D
return Lift, Drag
def analyze_geometry(self, geom, coeffs=False):
if not type(geom) == Geometry:
raise TypeError('Analyze requires instance of Geometry class')
Lift = 0
Drag = 0
for strip in geom:
L, D, strip_info = self.strip_analysis(strip, geom.tangent_method)
Lift += L
Drag += D
if coeffs:
Cl = Lift/(self.dynamic_pressure*geom.ref_area)
Cd = Drag/(self.dynamic_pressure*geom.ref_area)
return Cl, Cd
else:
return Lift, Drag
def strip_analysis(self, strip, tangent_method='cone'):
if not type(strip) == Strip:
raise TypeError('Strip analysis requries instance of Strip class')
plifts, flifts = np.zeros(len(strip)), np.zeros(len(strip))
pdrags, fdrags = np.zeros(len(strip)), np.zeros(len(strip))
M1, p1, T1, rho1, i1 = self.M_fs, self.p_fs, self.T_fs, self.rho_fs, 0.
pstrip, Mstrip = np.zeros(len(strip)), np.zeros(len(strip))
istrip, betastrip = np.zeros(len(strip)), np.zeros(len(strip))
sstrip, zstrip = np.zeros(len(strip)), np.zeros(len(strip))
in_shadow = None
turbulent_blayer = False
for ip, panel in enumerate(strip):
## Only work with non-zero area panels
if panel.getArea() < 1e-8:
continue
panel_location = np.dot(panel.getCentroid(), self.drag_nvec)
norm = panel.getNormalVector()
sign = np.sign(np.dot(norm, self.drag_nvec))
## Determine whether panel is in shadow
if in_shadow is None:
if np.dot(norm, self.drag_nvec) > 0:
in_shadow = True
else:
in_shadow = False
else:
if sign == 0:
sign = 1
if sign != prev_sign:
in_shadow = not in_shadow
## Uset tangent method or PM expansion to find local properties
incidence = np.pi/2 - np.arccos(
np.dot(panel.getNormalVector(), self.fs_nvec))
if False:
print 'In Shadow: ', in_shadow
print 'Incidence: ', incidence
print 'Area: ', panel.getArea()
print 'Norm: ', panel.getNormalVector()
print 'M, p, T, rho: ', M1, p1, T1, rho1
print ' '
if not in_shadow:
if abs(incidence) > 1e-6:
if tangent_method.lower() == 'wedge':
M, p, T, rho, beta = self.tangentWedge(incidence)
else:
M, p, T, rho, beta = self.tangentCone(incidence)
Mw, pw, Tw, rhow, betaw = self.tangentWedge(incidence)
# pdb.set_trace()
else:
M, p, T, rho, beta = M1, p1, T1, rho1, np.pi/2
else:
del_inc = abs(incidence - i1)
M, p, T, rho = self.PMExpansion(del_inc, M1, p1, T1, rho1)
## Determine whether boundary layer transitions
if not turbulent_blayer:
transition_point = self.blayerTransition(M, p, T, rho)
if panel_location > transition_point:
turbulent_blayer = True
## Evaluate skin friction
tau = self.referenceTempFriction(panel_location, M, p, T, rho,
turbulent_blayer, tangent_method)
## Update previous panel's properties
M1, p1, T1, rho1, i1 = M, p, T, rho, incidence
prev_norm, prev_sign = norm, sign
pstrip[ip], Mstrip[ip], istrip[ip] = p, M, incidence
sstrip[ip] = in_shadow
## Evaluate panel forces in format (lift, drag)
plifts[ip] = p*(-1*panel.getAreaVector()).dot(self.lift_nvec)
pdrags[ip] = p*(-1*panel.getAreaVector()).dot(self.drag_nvec)
flifts[ip] = tau*(panel.getArea())*sin(incidence)
fdrags[ip] = tau*(panel.getArea())*cos(incidence)
# plotting.plot_geometry(Geometry([strip]))
# plt.show()
# pdb.set_trace()
pLift, fLift = sum(plifts), sum(flifts)
pDrag, fDrag = sum(pdrags), sum(fdrags)
info = {'Pressures': pstrip, 'Machs': Mstrip}
return pLift + fLift, pDrag + fDrag, info
def blayerTransition(self, M, p, T, rho):
mu = sutherlandViscosity(T)
logRe = 6.421*np.exp(1.209e-4 * M**2.641)
ReT = 10**logRe
return ReT*mu/(rho*M*np.sqrt(self.gamma*self.R*T))
def referenceTempFriction(self, x, M, p, T, rho, turb, method):
g = self.gamma
mu = sutherlandViscosity(T)
Re = M*np.sqrt(g*self.R*T)*rho*x / mu
if not turb:
Pr = 0.72
r = Pr**(1./2.)
Tw = T*(1 + r*((g-1)/2)*M**2)
Tref = T*(0.45 + 0.55*(Tw/T) + 0.16*r*((g-1)/2.)*M**2)
else:
Pr = 0.9
r = Pr**(1./3.)
Tw = T*(1 + r*((g-1)/2)*M**2)
Tref = T*(0.5 + 0.5*(Tw/T) + 0.16*r*((g-1)/2.)*M**2)
rhoref = p/(self.R*Tref)
muref = 1.458*1e-6 * Tref**(3./2)/(Tref + 110.4)
Reref = M*np.sqrt(g*self.R*Tref)*rhoref*x/muref
if not turb:
Cf = 0.664/np.sqrt(Reref)
else:
Cf = 0.02296/(Re**0.139) * (rhoref/rho)**0.861 * (muref/mu)**0.139
tau = Cf*0.5*self.rho_fs*(self.M_fs*self.a_fs)**2
return tau
def getAtmosphericProperties(self, altitude):
fs_props = {}
hkm = altitude # in kilometres
hm = hkm*1000 # in metres
if hkm < 0 or hkm > 80 :
print('h outside of range')
return
if hkm <= 11:
fs_props['T'] = self.self.T0 - 0.0065*hm
fs_props['p'] = self.self.p0*(fs_props['T']/self.T0)**(5.2559)
else:
fs_props['T'] = 216
fs_props['p'] = 22630*exp(-0.00015769*(hm-11000))
fs_props['rho'] = fs_props['p']/(self.R*fs_props['T'])
# fs_props['a'] = sqrt(self.gamma*self.R*fs_props['T'])
# fs_props['mu'] = self.sutherland_viscosity(fs_props['T'])
return fs_props['p'], fs_props['T'], fs_props['rho']
def PMExpansion(self, del_theta, M1, p1, T1, rho1):
if del_theta == 0:
return M1, p1, T1, rho1
else:
## Prandtl Meyer function solving
nu1 = prandtlMeyer(M1, self.gamma)
nu2 = del_theta + nu1
fun = lambda M: prandtlMeyer(M, self.gamma) - nu2
fprime = lambda M: prandtlMeyerGradient(M, self.gamma)
# print fprime(M1)
# print (fun(M1+1e-6)-fun(M1))/1e-6
M2 = float(fsolve(fun, x0=M1, fprime=fprime))
g = self.gamma
## Isentropic expansion, from Anderson Section 9.6
T2 = T1*(1+((g-1)/2.)*M1**2)/(1+((g-1)/2.)*M2**2)
p2 = p1*(T2/T1)**( g / (g-1) )
rho2 = rho1*(T2/T1)**( 1/(g-1) )
rho3 = p2 / (self.R * T2)
rho0 = p1 / (self.R * T1)
return M2, p2, T2, rho2
def tangentCone(self, theta):
'''Flow across an conical shock wave giving surface properties.
Using Rasmussen (1967) - this is an approximation for high Mach nubmers
and small cone angles.
For exact solutions, should solve the Taylor Maccoll (1933) equations
directly, which requires a numerical solution or a lookup table.
'''
g = self.gamma
th = theta
sinth = sin(th)
costh = cos(th)
beta = asin( sinth*sqrt(((g+1)/2) + (self.M_fs*sinth)**(-2)) )
b = beta
sinb = sin(b)
Msq = self.M_fs**2
cosb = cos(b)
M_norm = self.M_fs*sin(b)
__ = ( cosb**2*(1 + 2*(b-th)) ) / \
(1 + ((g-1)/2)*Msq*(sin(b)**2 - 2*(b-th)**2*costh**2) )
M = sqrt( Msq * __ )
p_other = self.p_fs*(1 + (2*g/(g+1))*(M_norm**2-1) )
__ = 1 + (((g+1)*Msq*sinth**2 + 2)/((g-1)*Msq*sinth**2 + 2))* \
log(((g+1)/2) + 1/(self.M_fs*sinth)**2)
Cp = __*sinth**2
p = Cp*0.5*self.rho_fs*(self.M_fs*self.a_fs)**2
T = self.T_fs*(( 1 + (2*g/g+1))*M_norm**2-1) * \
( (2+(g-1)*M_norm**2)/((g+1)*M_norm**2) )
rho = self.rho_fs*((g+1)*M_norm**2)/(2+(g-1)*M_norm**2)
return M, p, T, rho, beta
def tangentWedge(self, theta):
'''Flow solution across an obique shock wave.
Anderson. Fundamentals of Aerodynamics, 1991'''
## Checked against shock tables - working
g = self.gamma
th = theta
beta = betaFromTM(th, self.M_fs, self.gamma)
M_norm = self.M_fs*sin(beta);
M = sqrt((1 + ((g-1)/2)*M_norm**2) / (g*M_norm**2 - (g-1)/2)) / \
sin(beta-th);
p = self.p_fs*(1 + (2*g/(g+1))*(M_norm**2 - 1))
rho = self.rho_fs*((g+1)*M_norm**2)/(2 + (g-1)*M_norm**2)
T = self.T_fs * (p/self.p_fs) * (self.rho_fs/rho);
return M, p, T, rho, beta
def betaFromTM(theta, M, gamma):
'''Obtains beta (radians) from theta (radians) and M in closed form.
Rudd and Lewis. Journal of Aircraft Vol. 35, No. 4, 1998'''
gam = gamma
n = 0 # weak shock
mu = asin(1/M) # Mach wave angle
c = tan(mu)**2
a = (( gam-1)/2+(gam+1)*c/2)*tan(theta)
b = (( gam+1)/2+(gam+3)*c/2)*tan(theta)
d = sqrt(4*(1-3*a*b)**3/((27*a**2*c+9*a*b-2)**2)-1)
return atan((b+9*a*c)/(2*(1-3*a*b)) - \
(d*(27*a**2*c+9*a*b-2))/(6*a*(1-3*a*b)) * \
tan(n*pi/3+1/3*atan(1/d)))
def thetaFromBM(beta, M, gamma):
'''Obtains theta (radians) from beta (radians) and M in closed form.
Anderson. Fundamentals of Aerodynamics, 1991'''
g = gamma
__ = (M**2*sin(beta)**2 - 1) / (M**2*(g + cos(2*beta)) + 2)
return atan(2*(1./tan(beta))*__)
def sutherlandViscosity(T=298):
mu0 = 1.827*10**-5
T0 = 291.15
C = 120. # Sutherland's constant
mu = mu0*((T0 + C)/(T + C))*(T/T0)**(3./2)
return mu
def prandtlMeyer(M, gamma=1.4):
g = gamma
nu = np.sqrt((g+1)/(g-1)) * np.arctan(np.sqrt(((g-1)/(g+1))*(M**2-1))) - \
np.arctan(np.sqrt(M**2 -1))
return nu
def prandtlMeyerGradient(M, gamma=1.4):
A = np.sqrt((gamma+1)/(gamma-1))
B = (gamma-1)/(gamma+1)
grad = A*( 1/(1 + B*(M**2-1)) )*( B**0.5*M / (M**2-1)**0.5 ) - \
(1/M**2)*(M/(M**2 - 1)**0.5)
return grad
if __name__ == "__main__":
pass
|
import sys
from .cli import main
sys.exit(main()) # pragma: no cover
|
class SlashException(Exception):
@classmethod
def throw(cls, *args, **kwargs):
raise cls(*args, **kwargs)
class TerminatedException(BaseException):
pass
INTERRUPTION_EXCEPTIONS = (KeyboardInterrupt, TerminatedException)
class NoActiveSession(SlashException):
pass
class ParallelServerIsDown(SlashException):
pass
class ParallelTimeout(SlashException):
pass
class InteractiveParallelNotAllowed(SlashException):
pass
class CannotLoadTests(SlashException):
pass
class InvalidConfiguraion(SlashException):
pass
CLI_ABORT_EXCEPTIONS = (CannotLoadTests, InvalidConfiguraion)
class FixtureException(CannotLoadTests):
pass
class CyclicFixtureDependency(FixtureException):
pass
class UnresolvedFixtureStore(FixtureException):
pass
class UnknownFixtures(FixtureException):
pass
class InvalidFixtureScope(FixtureException):
pass
class InvalidFixtureName(FixtureException):
pass
class ParameterException(CannotLoadTests):
pass
class TaggingConflict(CannotLoadTests):
pass
class IncorrectScope(SlashException):
pass
class InvalidTest(SlashException):
pass
class CannotAddCleanup(SlashException):
pass
class TmuxSessionNotExist(SlashException):
pass
class TmuxExecutableNotFound(SlashException):
pass
class SlashInternalError(SlashException):
def __init__(self, *args, **kwargs):
# Internal errors should basically never happen. This is why we use the constructor here to notify the active session that
# an internal error ocurred, for testability.
# It is highly unlikely that such exception objects would ever get constructed without being raised, and this helps overcome accidental
# catch-alls in exception handling
from slash.ctx import context
if context.session is not None:
context.session.notify_internal_error()
super(SlashInternalError, self).__init__(*args, **kwargs)
def __str__(self):
return "\n".join(("INTERNAL ERROR:",
super(SlashInternalError, self).__str__(),
"Please open issue at: https://github.com/getslash/slash/issues/new"))
class TestFailed(AssertionError):
"""
This exception class distinguishes actual test failures (mostly assertion errors,
but possibly other conditions as well) from regular asserts.
This is important, since regular code that is tested can use asserts, and that
should not be considered a test failure (but rather a code failure)
"""
pass
class ExpectedExceptionNotCaught(TestFailed):
def __init__(self, msg, expected_types):
self.expected_types = expected_types
super(ExpectedExceptionNotCaught, self).__init__(msg)
FAILURE_EXCEPTION_TYPES = (TestFailed, AssertionError, ExpectedExceptionNotCaught)
class SkipTest(SlashException):
"""
This exception should be raised in order to interrupt the execution of the currently running test, marking
it as skipped
"""
def __init__(self, reason="Test skipped"):
super(SkipTest, self).__init__(reason)
self.reason = reason
|
import numpy as np
import pandas as pd
class Stat:
# base class of stat objects
def get_constant(self, name, n):
factor = pd.read_csv('factor_table.csv')
return factor[name][factor['n']==n].to_numpy()[0]
def get_params(self):
print(self.params)
def set_params(self, params):
assert len(params) == len(self.params), 'param length must be equal to the length of stat params'
for i, key in enumerate(self.params.keys()):
self.params[key] = params[i]
class x_bar(Stat):
# x_bar stat
def __init__(self, n=30):
self.n = n
self.ddof = 1 if self.n > 1 else 0
self.params = {'mu': np.nan, 'sigma': np.nan}
if self.n <= 25:
self.factor = {'c4': super().get_constant('c4', self.n)}
else:
self.factor = {'c4': 4 * (self.n - 1) / (4 * self.n - 3)}
def estimate_params(self, x):
assert np.ndim(x) == 1, 'x must be 1-d array'
self.params['mu'] = np.mean(x)
if len(x) % self.n == 0:
m_ = len(x) // self.n
else:
x = np.append(x, np.nan * np.ones(self.n - len(x) % self.n))
m_ = len(x) // self.n
s_bar_ = np.nan_to_num(np.nanstd(x.reshape(m_, self.n), axis=1, ddof=self.ddof)).mean()
self.params['sigma'] = s_bar_.mean() / (self.factor['c4'] * np.sqrt(self.n))
def __call__(self, x):
assert np.ndim(x) == 1, 'x must be 1-d array'
if len(x) % self.n == 0:
m_ = len(x) // self.n
else:
x = np.append(x, np.nan * np.ones(self.n - len(x) % self.n))
m_ = len(x) // self.n
return np.nanmean(x.reshape(m_, self.n), axis=1)
def __repr__(self):
return 'x bar (w={})'.format(self.n)
class S(Stat):
# S stat
def __init__(self, n=30):
# n is subsample size
self.n = n
self.ddof = 1 if self.n > 1 else 0
self.params = {'mu': np.nan, 'sigma': np.nan}
if self.n <= 25:
self.factor = {'c4': super().get_constant('c4', self.n)}
else:
self.factor = {'c4': 4 * (self.n - 1) / (4 * self.n - 3)}
def estimate_params(self, x):
s_bar_ = self(x).mean()
sigma_hat_ = s_bar_ / self.factor['c4']
self.params['mu'] = s_bar_
self.params['sigma'] = sigma_hat_ * np.sqrt(1 - np.square(self.factor['c4']))
def __call__(self, x):
assert np.ndim(x) == 1, 'x must be 1-d array'
if len(x) % self.n == 0:
m_ = len(x) // self.n
else:
x = np.append(x, np.nan * np.ones(self.n - len(x) % self.n))
m_ = len(x) // self.n
return np.nan_to_num(np.nanstd(x.reshape(m_, self.n), axis=1, ddof=self.ddof))
def __repr__(self):
return 'S (w={})'.format(self.n)
class ma(Stat):
# moving average stat
def __init__(self, n=30):
# n is the window size
self.n = n
self.params = {'mu': np.nan, 'sigma': np.nan}
def estimate_params(self, x):
assert np.ndim(x) == 1, 'x must be 1-d array'
self.params['mu'] = np.mean(x)
self.params['sigma'] = np.nanmean(pd.Series(x).rolling(self.n).std())
def __call__(self, x):
assert np.ndim(x) == 1, 'x must be 1-d array'
mav = np.cumsum(x)
mav[self.n:] = mav[self.n:] - mav[:-self.n]
return mav[self.n - 1:] / self.n
def __repr__(self):
return 'MA (w={})'.format(self.n)
class R(Stat):
# R stat
def __init__(self, n=30):
# n is subsample size
self.n = n
self.factor = {'d2': super().get_constant('d2', self.n), 'd3': super().get_constant('d3', self.n)}
def estimate_params(self, x):
R_ = self(x)
self.params['mu'] = np.mean(R_)
self.params['sigma'] = np.mean(R_) * self.factor['d3'] / self.factor['d2']
def __call__(self, x):
assert np.ndim(x) == 1, 'x must be 1-d array'
if len(x) % self.n == 0:
m_ = len(x) // self.n
else:
x = np.append(x, np.nan * np.ones(self.n - len(x) % self.n))
m_ = len(x) // self.n
return np.nanmax(x.reshape(m_, self.n), axis=1) - np.nanmin(x.reshape(m_, self.n), axis=1)
|
from django.apps import AppConfig
class DjkalendarConfig(AppConfig):
name = 'djkalendar'
|
codontable= {"UUU":"F", "UUC":"F", "UUA":"L", "UUG":"L",
"UCU":"S", "UCC":"s", "UCA":"S", "UCG":"S",
"UAU":"Y", "UAC":"Y", "UAA":"STOP", "UAG":"STOP",
"UGU":"C", "UGC":"C", "UGA":"STOP", "UGG":"W",
"CUU":"L", "CUC":"L", "CUA":"L", "CUG":"L",
"CCU":"P", "CCC":"P", "CCA":"P", "CCG":"P",
"CAU":"H", "CAC":"H", "CAA":"Q", "CAG":"Q",
"CGU":"R", "CGC":"R", "CGA":"R", "CGG":"R",
"AUU":"I", "AUC":"I", "AUA":"I", "AUG":"M",
"ACU":"T", "ACC":"T", "ACA":"T", "ACG":"T",
"AAU":"N", "AAC":"N", "AAA":"K", "AAG":"K",
"AGU":"S", "AGC":"S", "AGA":"R", "AGG":"R",
"GUU":"V", "GUC":"V", "GUA":"V", "GUG":"V",
"GCU":"A", "GCC":"A", "GCA":"A", "GCG":"A",
"GAU":"D", "GAC":"D", "GAA":"E", "GAG":"E",
"GGU":"G", "GGC":"G", "GGA":"G", "GGG":"G",}
""" this is the table already with dNA codons, so we don't need .replace bellow on this case
codontable = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',
}
"""
def translate(DNA):
excess=0
if len(DNA)/3.0 != int(len(DNA)/3.0):
excess=len(DNA)%3
DNA=DNA[:-excess]
protein=""
for a in range(0, len(DNA), 3):
DNA=DNA.upper()
codon=DNA[a:a+3].replace("T","U")
protein=protein+codontable[codon]
return protein + "?"*excess
DNA="ATGAAAA"
print translate(DNA)
|
#!/usr/bin/python
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
from pyqtgraph.ptime import time
app = QtGui.QApplication([])
p = pg.plot()
p.setWindowTitle('pyqtgraph performance: InfiniteLine')
p.setRange(QtCore.QRectF(0, -10, 5000, 20))
p.setLabel('bottom', 'Index', units='B')
curve = p.plot()
# Add a large number of horizontal InfiniteLine to plot
for i in range(100):
line = pg.InfiniteLine(pos=np.random.randint(5000), movable=True)
p.addItem(line)
data = np.random.normal(size=(50, 5000))
ptr = 0
lastTime = time()
fps = None
def update():
global curve, data, ptr, p, lastTime, fps
curve.setData(data[ptr % 10])
ptr += 1
now = time()
dt = now - lastTime
lastTime = now
if fps is None:
fps = 1.0/dt
else:
s = np.clip(dt*3., 0, 1)
fps = fps * (1-s) + (1.0/dt) * s
p.setTitle('%0.2f fps' % fps)
app.processEvents() # force complete redraw for every plot
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
# Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
# -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2018-08-23 14:13:36
# @Last Modified by: 何睿
# @Last Modified time: 2018-08-23 14:13:36
import random
def DirectInsertionSort():
# 直接插入排序,递增排序:
number_list = random.sample(range(0, 500), 20)
print("%-*s" % (15, "UnSorted List"), number_list)
length = len(number_list)
for index in range(1, length):
before = index-1
compare = number_list[index]
while before >= 0 and compare <= number_list[before]:
number_list[before+1] = number_list[before]
before -= 1
number_list[before+1] = compare
print("%-*s" % (15, "Sorted List"), number_list)
if __name__ == "__main__":
DirectInsertionSort()
|
from pytorch_fit.visuals import plot_history
history = {'Loss': {'train_loss': [0.4477573685088561,
0.15534080869922653,
0.11754136811213749,
0.10495792154564278,
0.09372215639394738,
0.08527283754541294,
0.08190120248899775,
0.07828701003477793,
0.0730938561831276,
0.06850165408220495,
0.0711940613197323,
0.06844800637568947],
'val_loss': [0.4393462081015393,
0.34352790466450817,
0.2998191922891272,
0.2722488125348523,
0.25585710016061564,
0.20031215202452354,
0.20961321681838105,
0.19667473664622212,
0.18720172884832784,
0.1736660957969342,
0.17767173810891476,
0.202568020564585]},
'Accuracy': {'train_acc': [0.8931666666666667,
0.96595,
0.97415,
0.9769,
0.9798833333333333,
0.9814333333333334,
0.9823166666666666,
0.9831333333333333,
0.9840833333333333,
0.9850833333333333,
0.9845833333333334,
0.9853166666666666],
'val_acc': [0.9827,
0.9854,
0.9878,
0.9894,
0.9897,
0.9916,
0.9913,
0.9913,
0.9923,
0.9929,
0.994,
0.9927]}}
def main():
fig = plot_history(history, engine="plotly", title="Plotting History")
fig.show()
fig = plot_history(history, title="Plotting History")
fig.show()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import unittest
from utils import TaskQueue
from page_crawl import Page
class TestTaskPriorityQueue(unittest.TestCase):
def check_empty_queue(self, queue):
self.assertTrue(queue.total_task_cnt == 0)
for pri in range(10):
self.assertTrue(queue.prio_task_cnt[pri] == 0)
self.assertTrue(queue.prio_task_list[pri] == [])
def test_init(self):
q = TaskQueue()
self.check_empty_queue(q)
def test_simple_enqueue_dequeue(self):
q = TaskQueue()
task = Page('http://www.google.com', 1, 80)
q.en_queue(task)
self.assertTrue(q.total_task_cnt == 1)
self.assertTrue(q.prio_task_cnt[0] == 1)
self.assertTrue(q.prio_task_list[0] == [task])
outtask = q.de_queue()
self.assertTrue(outtask.depth == 1)
self.assertTrue(outtask.score == 80)
self.assertTrue(outtask.url == 'http://www.google.com')
self.check_empty_queue(q)
def test_bulk_enqueue_dequeue(self):
q = TaskQueue()
for cnt in range(10000):
task = Page('http://www.nyu.edu/engineering', 2, 60)
q.en_queue(task)
self.assertTrue(q.total_task_cnt == 10000)
self.assertTrue(q.prio_task_cnt[0] == 10000)
self.assertTrue(len(q.prio_task_list[0]) == 10000)
while 1:
outtask = q.de_queue()
if not outtask:
break
self.assertTrue(outtask.url == 'http://www.nyu.edu/engineering')
self.assertTrue(outtask.depth == 2)
self.assertTrue(outtask.score == 60)
self.check_empty_queue(q)
if __name__ == '__main__':
unittest.main()
|
# Copyright (C) 2013-2021, Stefan Schwarzer
# and ftputil contributors (see `doc/contributors.txt`)
# See the file LICENSE for licensing terms.
"""
tool.py - helper code
"""
import os
__all__ = ["same_string_type_as", "as_str", "as_str_path"]
def same_string_type_as(type_source, string, encoding):
"""
Return a string of the same type as `type_source` with the content from
`string`.
If the `type_source` and `string` don't have the same type, use `encoding`
to encode or decode, whatever operation is needed.
"""
if isinstance(type_source, bytes) and isinstance(string, str):
return string.encode(encoding)
elif isinstance(type_source, str) and isinstance(string, bytes):
return string.decode(encoding)
else:
return string
def as_str(string, encoding):
"""
Return the argument `string` converted to a unicode string if it's a
`bytes` object. Otherwise just return the string.
If a conversion is necessary, use `encoding`.
If `string` is neither `str` nor `bytes`, raise a `TypeError`.
"""
if isinstance(string, bytes):
return string.decode(encoding)
elif isinstance(string, str):
return string
else:
raise TypeError("`as_str` argument must be `bytes` or `str`")
def as_str_path(path, encoding):
"""
Return the argument `path` converted to a unicode string if it's a `bytes`
object. Otherwise just return the string.
If a conversion is necessary, use `encoding`.
Instead of passing a `bytes` or `str` object for `path`, you can pass a
`PathLike` object that can be converted to a `bytes` or `str` object.
If the `path` can't be converted to a `bytes` or `str`, a `TypeError` is
raised.
"""
path = os.fspath(path)
return as_str(path, encoding)
|
from django import forms
from .models import *
class OrderForm(forms.ModelForm):
class Meta:
model = Order
fields = ("customer_name","customer_email","customer_phone","customer_address","delivery","payment","confirm","comments")
|
from Player import Player
from Constants import *
class Computer(Player):
def __init__(self, colour):
super().__init__(colour)
pass
def make_move_decision(self, board, colour):
pass
|
import os
from flask import Flask, flash, request, redirect, url_for, send_from_directory, render_template, send_file
from werkzeug.utils import secure_filename
from redis import Redis
import rq
import math
import time
UPLOAD_FOLDER = 'uploaded'
ALLOWED_EXTENSIONS = {'wav', 'mp3'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = '5UC3M35C0U17735'
def split_this_song(song_path):
os.system("spleeter separate -i " + UPLOAD_FOLDER + '/' + song_path + " -p spleeter:2stems -o output")
def redirect_done(job_id):
return redirect(url_for('dir_listing', req_path='output'))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploaded/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
@app.route('/<path:req_path>')
def dir_listing(req_path):
BASE_DIR = '.'
abs_path = os.path.join(BASE_DIR, req_path)
if os.path.isfile(abs_path):
return send_file(abs_path)
passed_info = []
files = os.listdir(abs_path)
for f in files:
info = {}
info['name'] = f[:-4]
info['ext'] = f[-4:]
info['size'] = convert_size(os.stat(abs_path + '/' + f).st_size)
info['action'] = abs_path + '/' + f
info['action'] = info['action'][9:]
passed_info.append(info)
print(passed_info)
return render_template('result.html', files=passed_info)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
queue = rq.Queue('split', connection=Redis.from_url('redis://'))
split_job = queue.enqueue(split_this_song, filename)
os.mkdir('./output/'+filename[:-4])
return redirect(url_for('dir_listing', req_path='output/'+filename[:-4]))
return render_template('index.html')
|
# Generated by Django 3.1.7 on 2021-04-11 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_info_auth'),
]
operations = [
migrations.AlterField(
model_name='info',
name='auth',
field=models.CharField(default=None, max_length=100),
),
]
|
#!/usr/bin/env python
from flask_wtf import Form
from wtforms import StringField, IntegerField, BooleanField, DateField,DateTimeField
from wtforms.validators import DataRequired
class SearchForm(Form):
server = StringField('server', validators = [DataRequired()])
dt = DateField('Date', format = '%Y-%m-%d')
|
# coding: utf-8
from kay.utils.forms import modelform
from static.models import SPage
class SPageForm(modelform.ModelForm):
class Meta:
model = SPage
fields = ('title', 'meta_desc', 'body')
|
class FourConcealedPungs:
name = "Four Concealed Pungs"
points = 64
excluded = ("All Pungs", "Concealed Hand", "Three Concealed Pungs", "Two Concealed Pungs")
def __init__(self, handObj):
self.hand = handObj
def examine_standard_hand(self):
return self.hand.standardhand
def examine_no_chows(self):
pungs_and_kongs = self.hand.pungs + self.hand.kongs
not_pair = self.hand.pungs + self.hand.kongs + self.hand.knitted + self.hand.chows
no_chows = True if len(pungs_and_kongs) == len(not_pair) else False
return no_chows
def examine_four_concealed(self):
four_concealed = False
concealed_sets = []
limit_value = 4
pungs_and_kongs = self.hand.pungs + self.hand.kongs
#some tilesets are concealed
if type(getattr(self.hand, "concealed", None)) is list:
for tileset in pungs_and_kongs:
concealed_sets.append(tileset.concealed)
four_concealed = all(concealed_sets)
#the hand is concealed
elif getattr(self.hand, "concealed", None) == True:
four_concealed = True if len(pungs_and_kongs) == limit_value else False
#the hand is melded
else: four_concealed = False
return four_concealed
def evaluate(self):
standard_hand = self.examine_standard_hand()
no_chows = self.examine_no_chows()
four_concealed = self.examine_four_concealed()
four_concealed_pungs = all([standard_hand, no_chows, four_concealed])
return four_concealed_pungs
|
from .forms import ContactForm
from django.conf import settings
from django.contrib import messages
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils import timezone
import random
import requests
# Create your views here.
BASE_URL = 'https://db.aclark.net'
CLIENT_URL = '%s/api/clients/?format=json' % BASE_URL
SERVICE_URL = '%s/api/services/?format=json' % BASE_URL
TESTIMONIAL_URL = '%s/api/testimonials/?format=json' % BASE_URL
PROFILE_URL = '%s/api/profiles/?format=json' % BASE_URL
def about(request):
context = {}
context['active_nav'] = 'about'
return render(request, 'about.html', context)
def page(request, slug=None):
context = {}
return render(request, 'page.html', context)
def blog(request):
context = {}
context['active_nav'] = 'more'
return render(request, 'blog.html', context)
def book(request):
context = {}
context['active_nav'] = 'more'
return render(request, 'book.html', context)
def clients(request):
context = {}
clients = requests.get(CLIENT_URL).json()
context['clients'] = clients
context['active_nav'] = 'clients'
return render(request, 'clients.html', context)
def community(request):
context = {}
context['active_nav'] = 'more'
return render(request, 'community.html', context)
def contact(request):
context = {}
now = timezone.datetime.now
msg = 'Sent, thank you. Please expect a reply within 24 hours. '
msg += 'For urgent matters please contact Alex Clark <aclark@aclark.net>.'
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
message = form.cleaned_data['message']
sender = form.cleaned_data['email']
message = '\n'.join([message, sender])
recipients = [settings.EMAIL_FROM]
subject = settings.EMAIL_SUBJECT % now().strftime(
'%m/%d/%Y %H:%M:%S')
send_mail(subject, message, settings.EMAIL_FROM, recipients)
messages.add_message(request, messages.SUCCESS, msg)
return HttpResponseRedirect(reverse('home'))
else:
form = ContactForm()
context['form'] = form
context['active_nav'] = 'contact'
return render(request, 'contact.html', context)
def history(request):
context = {}
context['active_nav'] = 'more'
return render(request, 'history.html', context)
def home(request):
context = {}
context['show_carousel'] = True
testimonials = requests.get(TESTIMONIAL_URL).json()
context['testimonial'] = random.choice(testimonials)
return render(request, 'home.html', context)
def location(request):
context = {}
context['active_nav'] = 'more'
return render(request, 'location.html', context)
def opensource(request):
context = {}
context['active_nav'] = 'more'
return render(request, 'opensource.html', context)
def projects(request):
context = {}
context['active_nav'] = 'projects'
return render(request, 'projects.html', context)
def services(request):
context = {}
services = requests.get(SERVICE_URL).json()
context['services'] = services
context['active_nav'] = 'services'
return render(request, 'services.html', context)
def testimonials(request):
context = {}
testimonials = requests.get(TESTIMONIAL_URL).json()
context['testimonials'] = testimonials
context['active_nav'] = 'testimonials'
return render(request, 'testimonials.html', context)
def team(request):
context = {}
profiles = requests.get(PROFILE_URL).json()
context['profiles'] = profiles
context['active_nav'] = 'team'
return render(request, 'team.html', context)
def now(request):
return HttpResponseRedirect('http://blog.aclark.net/now')
|
f=open("Test.txt","a+")
while(str!=0):
str=input()
if(str!=0):
f.write(str)
f.seek(0,0)
print("File contents are:")
str=f.read()
print(str)
|
def run_transaction(driver, function, string, **kwargs):
with driver.session() as session:
return session.read_transaction(function, string=string, **kwargs)
def cleanup_string(string):
try:
return string.strip()
except AttributeError:
return ValueError("search string is invalid")
def strip_tag(string: str):
splited_string = string.split("#")
if len(splited_string) == 2:
return splited_string[0].strip(), splited_string[1].strip().lower()
else:
return string.strip(), ""
|
'''
author : Plaban Nayak
mail : nayakpplaban@gmail.com
'''
|
# impy - a post-processor for HYADES implosion simulations
# Copyright (c) Massachusetts Institute of Technology / Alex Zylstra
# Distributed under the MIT License
import platform
import os
import pkgutil
import tkinter as tk
import tkinter.ttk as ttk
# from tkinter.filedialog import askdirectory
from impy.gui.WindowManager import WindowManager
from impy.gui.widgets.Progress_Dialog import Progress_Dialog
from impy.gui.widgets.Option_Prompt import Option_Prompt
import matplotlib, matplotlib.pyplot
from impy.util import ImplosionRunner
from impy.util import ModuleRunner
import impy.implosions
import impy.modules
from impy.implosions.Implosion import *
from impy.modules.Module import *
import multiprocessing
# auto import implosions and modules:
for importer, modname, ispkg in pkgutil.iter_modules(impy.implosions.__path__):
module = __import__('impy.implosions.'+modname, fromlist="dummy")
print("Imported implosion: ", module)
for importer, modname, ispkg in pkgutil.iter_modules(impy.modules.__path__):
module = __import__('impy.modules.'+modname, fromlist="dummy")
print("Imported module: ", module)
class impy(tk.Toplevel):
"""Post-processor for hydrodynamic simulation results."""
__author__ = 'Alex Zylstra'
__date__ = '2015-01-02'
__version__ = '0.2.1'
def __init__(self):
super(impy, self).__init__(None)
# Set window background
if platform.system() == 'Darwin':
self.configure(background='#E8E9E8')
else:
self.configure(background='#F1F1F1')
self.grid()
self.modControlVars = dict()
self.modControlChecks = dict()
self.modules = dict()
self.modRedisplay = dict()
self.processes = []
self.windows = []
self.__createWidgets__()
self.minsize(300,200)
self.title('impy')
self.style = ttk.Style()
# Set default font
if platform.system() == 'Darwin':
self.style.configure('.', font=('Helvetica', 14))
else:
self.style.configure('.', font=('Helvetica', 12))
# check theme for Windows
themes = self.style.theme_names()
if 'vista' in themes:
self.style.theme_use('vista')
self.wm = WindowManager(self.winfo_screenwidth(), self.winfo_screenheight())
self.wm.addWindow(self)
# stretch the column to fill all space:
tk.Grid.columnconfigure(self, 0, weight=1)
tk.Grid.columnconfigure(self, 1, weight=1)
tk.Grid.columnconfigure(self, 2, weight=1)
# add a key binding to close:
self.bind('<Escape>', self.close)
self.protocol("WM_DELETE_WINDOW", self.close)
# Text for shortcuts:
if platform.system() == 'Darwin':
shortcutType = '⌘'
shortcutModifier = 'Command-'
else:
shortcutType = 'Ctrl+'
shortcutModifier = 'Control-'
# Top-level menu bar:
menubar = tk.Menu(self)
# File menu:
fileMenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='File', menu=fileMenu)
helpMenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Help', menu=helpMenu)
self.config(menu=menubar)
# Tabs not supported by Windows
if platform.system() == 'Darwin':
tempText = 'Open File\t\t'
else:
tempText = 'Open File '
fileMenu.add_command(label=tempText + shortcutType + 'O', command=self.openFile)
self.bind('<' + shortcutModifier + 'o>', self.openFile)
createMenu = tk.Menu(fileMenu, tearoff=0)
fileMenu.add_cascade(label='Create...', menu=createMenu)
# Auto generate creation options for each implosion type:
for impType in allImplosions():
createMenu.add_command(label='Create ' + impType.name(), command= lambda a=impType.name(): self.open(a))
# Tabs not supported on windows
if platform.system() == 'Darwin':
tempText = 'Save\t\t\t'
else:
tempText = 'Save '
fileMenu.add_command(label=tempText + shortcutType + 'S', command= lambda: self.save('CSV'))
self.bind('<' + shortcutModifier + 's>', lambda *args: self.save('CSV'))
fileMenu.add_command(label='Save pickle', command= lambda: self.save('pickle'))
fileMenu.add_command(label='Save plots', command= lambda: self.save('plots'))
# Tabs not supported on Windows
if platform.system() == 'Darwin':
tempText = 'Quit\t\t\t'
else:
tempText = 'Quit '
fileMenu.add_command(label=tempText + shortcutType + 'Q', command=self.close)
self.bind('<' + shortcutModifier + 'q>', self.close)
# Options in the help menu:
helpMenu.add_command(label='About', command=self.__about__),
self.__configureMatplotlib__()
def __createWidgets__(self):
"""Create the UI elements for the main window"""
label1 = ttk.Label(self, text='Type:')
label1.grid(row=0, column=0)
self.typeLabelVar = tk.StringVar()
self.typeLabel = ttk.Label(self, textvar=self.typeLabelVar)
self.typeLabel.grid(row=0, column=1, sticky='ns')
impInfoButton = ttk.Button(self, text='Info', command=self.__impInfo__, width=4)
impInfoButton.grid(row=0, column=2, sticky='ns')
label2 = ttk.Label(self, text='File:')
label2.grid(row=1, column=0)
self.fileLabelVar = tk.StringVar()
self.fileLabel = ttk.Label(self, textvar=self.fileLabelVar)
self.fileLabel.grid(row=1, column=1, sticky='ns')
bar1 = ttk.Separator(self)
bar1.grid(row=2, column=0, columnspan=3, sticky='nsew')
label3 = ttk.Label(self, text='Modules')
label3.grid(row=3, column=0, columnspan=3, sticky='ns')
# Add for each module:
row=4
for mod in allModules():
label = ttk.Label(self, text=mod.name())
label.grid(row=row, column=0)
self.modControlVars[mod.name()] = tk.IntVar()
check = ttk.Checkbutton(self, variable=self.modControlVars[mod.name()], command= lambda a=mod.name():self.__runModule__(a), state=tk.DISABLED)
check.grid(row=row, column=1)
self.modControlChecks[mod.name()] = check
infoButton = ttk.Button(self, text='Info', command= lambda a=mod.name(): self.__modInfo__(a), width=4)
infoButton.grid(row=row, column=2)
row += 1
def __configureMatplotlib__(self):
# set matplotlib backend
if matplotlib.get_backend() != 'tkagg':
matplotlib.pyplot.switch_backend('TkAgg')
matplotlib.pyplot.rc('font', **{'size':'8'})
matplotlib.pyplot.rc('text', **{'usetex':False})
matplotlib.rcParams['toolbar'] = 'None'
def close(self, *args):
"""Handle closing the application."""
matplotlib.pyplot.close("all")
for p in self.processes:
if p.is_alive():
p.terminate()
for w in self.windows:
w.withdraw()
self.withdraw()
self.quit()
def openFile(self, *args):
"""Open an implosion file. Attempts to handle conflicts (two implosions can open same type) gracefully."""
# Get file types:
filetypes = []
correspondingImp = []
for impType in allImplosions():
for type in impType.getFileTypes():
filetypes.append(type)
correspondingImp.append(impType)
# Prompt the user:
from tkinter.filedialog import askopenfilename
FILEOPENOPTIONS = dict(defaultextension='.nc',
filetypes=filetypes,
multiple=False,
parent=self)
filename = askopenfilename(**FILEOPENOPTIONS)
if filename == '':
return
# Create the implosion:
self.filename = filename
ext = '*.' + filename.split('.')[-1]
#TODO: this will not open lilac dump because of its not-fixed extension
# Find out which types this corresponds to:
typeIndex = []
for i in range(len(filetypes)):
if filetypes[i][1] == ext:
typeIndex.append(i)
# If none:
if len(typeIndex) == 0:
return
# Detect 'collisions', i.e. multiple implosions can open this file type
if len(typeIndex) > 1:
# Prompt the user to choose:
from impy.gui.widgets.Option_Prompt import Option_Prompt
opts = []
for i in typeIndex:
opts.append(correspondingImp[i].name())
p = Option_Prompt(self, title='Choose implosion type', options=opts)
# Figure out which was chosen:
chosenName = p.result
if p.result is None:
return
for i in typeIndex:
if correspondingImp[i].name() == p.result:
typeIndex = i
break
# If only one option, convert from list:
else:
typeIndex = typeIndex[0]
# Now one is chosen:
self.impType = correspondingImp[typeIndex].name()
self.imp = correspondingImp[typeIndex](type='File', args=filename)
if not self.imp.ready():
return
# Set info to display:
self.typeLabelVar.set(self.impType)
self.fileLabelVar.set(os.path.split(self.filename)[-1])
self.__runImplosion__()
def open(self, type, *args):
"""Create a specified implosion using its own constructor.
:param type: A string containing the name of the implosion class.
"""
print('Creating '+type)
# Make the correct type:
for impType in allImplosions():
if impType.name() == type:
self.impType = impType.name()
self.imp = impType(type='GUI')
break
if not self.imp.ready():
return
self.typeLabelVar.set(self.impType)
try:
self.fileLabelVar.set(os.path.split(self.imp.filename)[-1])
except:
self.fileLabelVar.set('')
self.__runImplosion__()
def save(self, saveType, *args):
"""Save results from all open modules to a directory.
:param saveType: The type of thing to save: 'plots', 'csv', or 'pickle'.
"""
# Get a directory to save to:
dir = askdirectory(parent=self)
if dir is None or dir == '':
return
# Create it if necessary:
if not os.path.dirname(dir):
os.makedirs(dir)
# For saving plots, need to get the type:
if saveType == 'plots':
formats = ['']+list(matplotlib.pyplot.gcf().canvas.get_supported_filetypes().keys())
prompt = Option_Prompt(self, title='Plot format', options=formats)
# Loop over modules and save their stuff if the checkbox is selected:
for mod in self.modules.keys():
if self.modControlVars[mod].get() == 1:
if saveType == 'plots':
if prompt.result is not None:
prefix = mod
self.modules[mod].savePlots(dir, prefix, prompt.result)
else:
fname = os.path.join(dir, mod + '.' + saveType)
self.modules[mod].save(fname, type=saveType)
def __runImplosion__(self):
"""Generate the implosion. Computation work done in a different process"""
# Create a progress dialog:
dialog = Progress_Dialog()
dialog.set_text('Implosion generation...')
dialog.update_idletasks()
# Use helper function:
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(target=ImplosionRunner.run, args=(child_conn,))
self.processes.append(p)
# start the process and send implosion:
p.start()
try:
parent_conn.send(self.imp)
except:
raise Exception('Implosion object passed to ImplosionRunner is not pickleable!')
obj = None
# Loop while the process is active:
def callback():
nonlocal dialog, p, parent_conn
if dialog.cancelled:
dialog.withdraw()
p.terminate()
return
# Try to receive from the Pipe:
if parent_conn.poll():
# Update the progress, or end otherwise:
obj = parent_conn.recv()
if isinstance(obj, Exception):
from tkinter.messagebox import showerror
showerror('Error!', 'A problem occurred generating the implosion (class '+self.imp.name()+')\n'+obj.__str__())
dialog.withdraw()
p.terminate()
return
elif isinstance(obj, float):
dialog.set(100*obj)
elif isinstance(obj, Implosion):
# Pass info back to the main app:
self.imp = obj
self.after(10, self.__postImplosion__)
dialog.withdraw()
p.terminate()
return
self.after(25, callback)
self.after(10, callback)
def __postImplosion__(self):
"""Tasks to execute after the implosion is generated."""
# Enable modules:
for key in self.modControlChecks.keys():
self.modControlChecks[key].configure(state=tk.NORMAL)
# Run any modules that were already checked in refresh mode
for key in self.modControlChecks.keys():
self.modRedisplay[key] = (self.modControlVars[key].get() == 1)
for mod in allModules():
if self.modRedisplay[mod.name()]:
self.__runModule__(mod.name())
def __runModule__(self, modName):
"""Run a specified module.
:param mod: The name of the module class to run
"""
mod = None
for x in allModules():
if x.name() == modName:
mod = x
if mod is None:
return
# Check whether we are loading or unloading:
if self.modControlVars[mod.name()].get() == 1:
# Prepare the module if not done already:
if mod.name() not in self.modules.keys():
self.modules[mod.name()] = mod(type='GUI')
# Create a progress bar:
dialog = Progress_Dialog()
dialog.set_text('Running ' + mod.name() + '...')
dialog.update_idletasks()
# Run using helper function:
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(target=ModuleRunner.run, args=(child_conn,))
self.processes.append(p)
p.start()
try:
parent_conn.send(self.modules[mod.name()])
except:
print(mod.__dict__)
raise Exception('Module object passed to ModuleRunner is not pickle-able!')
try:
parent_conn.send(self.imp)
except:
raise Exception('Implosion object passed to ModuleRunner is not pickle-able!')
obj = None
# Callpacks while the process is active:
def callback():
nonlocal p, dialog, parent_conn
if dialog.cancelled:
p.terminate()
dialog.withdraw()
return
# Try to receive from the pipe:
if parent_conn.poll():
# Update the progress, or end otherwise:
obj = parent_conn.recv()
if isinstance(obj, Exception):
from tkinter.messagebox import showerror
showerror('Error!', 'A problem occurred running '+mod.name()+'\n'+obj.__str__())
# Uncheck from main window:
self.modControlVars[mod.name()].set(0)
dialog.withdraw()
p.terminate()
return
elif isinstance(obj, float) or isinstance(obj, int):
dialog.set(100*obj)
elif isinstance(obj, Module):
# Pass back to the main app:
self.modules[mod.name()].copy(obj)
self.after(10, lambda: self.__postModule__(obj))
dialog.withdraw()
p.terminate()
return # Callback loop breaks here
self.after(25, callback) # loop
# Start callback loop in a bit:
self.after(10, callback)
else:
# If a module is unchecked, it is removed and deleted:
self.__deleteModule__(mod)
def __postModule__(self, mod):
"""Handle tasks to be done after completion of a module run."""
self.modules[mod.name()].display(wm=self.wm, refresh=self.modRedisplay[mod.name()])
try:
self.modules[mod.name()].protocol(name='WM_DELETE_WINDOW', func=lambda: self.__deleteModule__(mod))
except:
pass
def __deleteModule__(self, mod):
"""Delete a module"""
if mod.name() in self.modules.keys():
self.modules[mod.name()].withdraw()
# remove all plots generated by that module:
for fig in self.modules[mod.name()].getPlots():
matplotlib.pyplot.close(fig)
self.modules.__delitem__(mod.name())
self.modControlVars[mod.name()].set(0)
def __impInfo__(self):
"""Show some info about the current implosion type."""
title = ''
text = ''
try:
title = self.impType + ' Information'
# Identify the implosion type:
for impType in allImplosions():
if impType.name() == self.impType:
text += 'Author: ' + impType.__author__ + '\n'
text += 'Date: ' + impType.__date__ + '\n'
text += 'Version: ' + impType.__version__ + '\n'
text += '\n'
text += self.imp.info()
except:
pass
if text != '' and title != '':
from tkinter.messagebox import showinfo
showinfo(title=title, message=text)
def __modInfo__(self, type):
"""Show some info about a selected module."""
title = ''
text = ''
try:
title = type + ' Information'
mod = self.modules[type]
text += 'Author: ' + mod.__author__ + '\n'
text += 'Date: ' + mod.__date__ + '\n'
text += 'Version: ' + mod.__version__ + '\n'
text += '\n'
text += mod.detailedInfo()
except:
pass
if text != '' and title != '':
from tkinter.messagebox import showinfo
showinfo(title=title, message=text)
def __about__(self):
from tkinter.messagebox import showinfo
title = 'impy'
text = 'Author: ' + self.__author__ + '\n'
text += 'Date: ' + self.__date__ + '\n'
text += 'Version: ' + self.__version__ + '\n'
showinfo(title=title, message=text)
if __name__ == "__main__":
multiprocessing.freeze_support()
root = tk.Tk()
root.withdraw()
app = impy()
root.mainloop()
|
import numpy as np
import math
import pandas as pd
def rastrigin_FitnessFunc(pop):
print("pop")
print(pop)
def rastr(x):
summ = 0
summ += x[0]**2 - 10.0 * np.cos(2 * math.pi * x[0])
summ += x[1]**2 - 10.0 * np.cos(2 * math.pi * x[1])
return (20 + summ)/40
genes = []
for i in range(len(pop)):
genes.append(pop[i])
df = pd.DataFrame(data=genes)
print(df)
df_fitness = pd.DataFrame(data=rastr(df))
df_fitness = df_fitness.transpose()
return df_fitness
|
from typing import Tuple, List, Dict, Union
import fiona
from geopandas import GeoDataFrame
from shapely.geometry import Polygon
from map_poster_creator.color_schemes import get_color_schemes
from map_poster_creator.geojson import get_polygon_from_geojson, get_map_geometry_from_poly, MapGeometry
from map_poster_creator.logs import log_processing, logging
from map_poster_creator.plotting import plot_and_save
logger = logging.getLogger(__name__)
@log_processing
def get_roads_data(shp_path: str) -> GeoDataFrame:
roads = GeoDataFrame.from_file(f"{shp_path}/gis_osm_roads_free_1.shp", encoding='utf-8')
return roads # TODO: Add Path or os.path
@log_processing
def get_water_data(shp_path: str) -> GeoDataFrame:
water = GeoDataFrame.from_file(f"{shp_path}/gis_osm_water_a_free_1.shp", encoding='utf-8')
return water # TODO: Add Path or os.path
@log_processing
def get_greens_data(shp_path: str) -> GeoDataFrame:
greens = GeoDataFrame.from_file(f"{shp_path}/gis_osm_pois_a_free_1.shp", encoding='utf-8')
return greens # TODO: Add Path or os.path
@log_processing
def get_boundary_shape(geojson) -> Tuple[Polygon, MapGeometry]:
poly = get_polygon_from_geojson(geojson)
geometry = get_map_geometry_from_poly(poly)
return poly, geometry
@log_processing
def preprocessing_roads(poly: Polygon, roads: GeoDataFrame) -> GeoDataFrame:
town = roads.loc[roads['geometry'].apply(lambda g: poly.contains(g))].copy()
town = town[~town.fclass.isin(['footway', "steps"])]
town['speeds'] = [speed for speed in town['maxspeed']]
return town
@log_processing
def preprocessing_other(poly: Polygon, dataframe: GeoDataFrame) -> GeoDataFrame:
town = dataframe.loc[dataframe['geometry'].apply(lambda g: poly.contains(g))].copy()
return town
def create_poster(
base_shp_path: str,
geojson_path: str,
colors: Union[List[Union[Dict, str]], None],
layers: List[str],
config: dict,
output_prefix: str,
user_color_scheme: bool = False,
):
poly, geometry = get_boundary_shape(geojson=geojson_path)
roads = get_roads_data(base_shp_path)
water = get_water_data(base_shp_path)
greens = get_greens_data(base_shp_path)
roads_df = preprocessing_roads(poly=poly, roads=roads)
water_df = preprocessing_other(poly=poly, dataframe=water)
greens_df = preprocessing_other(poly=poly, dataframe=greens)
# TODO: Support user color scheme
for color in colors:
if color not in get_color_schemes().keys():
logger.warning(f"Color {color} not found in base color scheme. "
f"Available colors: {', '.join(get_color_schemes().keys())}")
print("")
print(f"Plot and Save {color} map")
plot_and_save(
roads=roads_df,
water=water,
greens=greens_df,
geometry=geometry,
path=f'{output_prefix}_{color}.png',
dpi=1200,
color=get_color_schemes()[color]
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import oskar
from mpi4py import MPI
if __name__ == '__main__':
# Check command line arguments.
if len(sys.argv) < 3:
raise RuntimeError(
'Usage: mpiexec -n <np> '
'python sim_mpi_multi_channel.py '
'<freq_start_MHz> <freq_inc_MHz>')
# Global options.
precision = 'single'
phase_centre_ra_deg = 0.0
phase_centre_dec_deg = -60.0
# Get MPI communicator and rank, and set values that depend on the rank.
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
frequency_start_hz = 1e6 * float(sys.argv[-2])
frequency_inc_hz = 1e6 * float(sys.argv[-1])
frequency_hz = frequency_start_hz + frequency_inc_hz * rank
filename = ('test_data_%05.1f.ms' % (frequency_hz / 1e6))
# Set up the sky model.
sky = oskar.Sky.generate_grid(phase_centre_ra_deg, phase_centre_dec_deg,
16, 5, precision=precision)
sky.append_sources(phase_centre_ra_deg, phase_centre_dec_deg, 1.0)
# Set up the telescope model.
tel = oskar.Telescope(precision)
tel.set_channel_bandwidth(100e3)
tel.set_time_average(10.0)
tel.set_pol_mode('Scalar')
tel.load('SKA1-LOW_v5_single_random.tm')
# Set station properties after stations have been defined.
tel.set_phase_centre(phase_centre_ra_deg, phase_centre_dec_deg)
tel.set_station_type('Isotropic')
# Set up the basic simulator and run simulation.
simulator = oskar.Interferometer(precision)
simulator.set_settings_path(os.path.abspath(__file__))
simulator.set_max_sources_per_chunk(sky.num_sources+1)
simulator.set_sky_model(sky)
simulator.set_telescope_model(tel)
simulator.set_observation_frequency(frequency_hz)
simulator.set_observation_time(
start_time_mjd_utc=51544.375, length_sec=10800.0, num_time_steps=180)
simulator.set_output_measurement_set(filename)
start = time.time()
simulator.run()
print('Simulation for %05.1f MHz completed after %.3f seconds.' %
(frequency_hz / 1e6, time.time() - start))
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from math import ceil
DEFAULT = {
"second": "second",
"seconds": "seconds",
"minute": "minute",
"minutes": "minutes",
"hour": "hour",
"hours": "hours",
"day": "day",
"days": "days",
}
SHORT = {
"second": "sec",
"seconds": "sec",
"minute": "min",
"minutes": "min",
"hour": "hour",
"hours": "hrs",
"day": "day",
"days": "days",
}
SHORTCAP = {
"second": "Sec",
"seconds": "Sec",
"minute": "Min",
"minutes": "Min",
"hour": "Hr",
"hours": "Hr",
"day": "Day",
"days": "Days",
}
def duration(value, locales=DEFAULT):
if not isinstance(value, timedelta):
value = timedelta(microseconds=max(0, value))
if value == timedelta(seconds=1):
return "1 %s" % (locales["second"])
if value < timedelta(minutes=1):
return "%.3g %s" % (value.seconds+float(value.microseconds) / 1000000, locales["seconds"])
if value < timedelta(hours=1):
return "%.3g %s" % (value.seconds / 60, value.seconds >= 120 and locales["minutes"] or locales["minute"])
if value < timedelta(days=1):
return "%d %s" % (ceil(value.seconds / 3600.0), value.seconds > 3600 and locales["hours"] or locales["hour"])
return "%.3g %s" % (value.days, value.days > 1 and locales["days"] or locales["day"])
|
import datetime
import decimal
import time
import unittest
import uuid
import dateutil.parser
import django.test
import django.urls
import django.utils.timezone
import mock
import rest_framework.test
from behave import *
import api.tests.factories
import jetstream.tests.tas_api_mock_utils
@step('we create a new user')
def create_new_user(context):
"""
:type context: behave.runner.Context
"""
assert context.persona
user = api.tests.factories.UserFactory.create(username=context.persona['username'], is_staff=False,
is_superuser=False)
user.set_password(context.persona['password'])
user.save()
context.persona['user'] = user
@step('we create a new admin user')
def create_new_admin_user(context):
"""
:type context: behave.runner.Context
"""
assert context.persona
user = api.tests.factories.UserFactory.create(username=context.persona['username'], is_staff=True,
is_superuser=True)
user.set_password(context.persona['password'])
user.save()
context.persona['user'] = user
@step('I log in')
def i_log_in(context):
"""
:type context: behave.runner.Context
"""
assert context.persona
client = rest_framework.test.APIClient()
context.persona['client'] = client
with django.test.modify_settings(AUTHENTICATION_BACKENDS={
'prepend': 'django.contrib.auth.backends.ModelBackend',
'remove': ['django_cyverse_auth.authBackends.MockLoginBackend']
}):
login_result = client.login(username=context.persona['username'], password=context.persona['password'])
context.test.assertTrue(login_result)
@step('I log in with valid XSEDE project required and default quota plugin enabled')
def i_log_in(context):
"""
:type context: behave.runner.Context
"""
assert context.persona
client = rest_framework.test.APIClient()
context.persona['client'] = client
with django.test.override_settings(
AUTHENTICATION_BACKENDS=['django_cyverse_auth.authBackends.MockLoginBackend'],
ALWAYS_AUTH_USER=context.persona['username'],
DEFAULT_QUOTA_PLUGINS=['jetstream.plugins.quota.default_quota.JetstreamSpecialAllocationQuota'],
):
with django.test.modify_settings(
VALIDATION_PLUGINS={
'prepend': 'jetstream.plugins.auth.validation.XsedeProjectRequired',
'remove': 'atmosphere.plugins.auth.validation.AlwaysAllow'
}
):
login_result = client.login(username=context.persona['username'], password=context.persona['password'])
context.test.assertTrue(login_result)
if 'user' not in context.persona:
import core.models
context.persona['user'] = core.models.AtmosphereUser.objects.get_by_natural_key(
context.persona['username'])
assert context.persona['user'].username == context.persona['username']
@step('I get my allocation sources from the API I should see')
def get_allocation_sources_from_api(context):
assert context.persona
client = context.persona['client']
response = client.get('/api/v2/allocation_sources')
context.persona['response'] = response
context.test.assertEqual(response.status_code, 200)
api_allocation_sources = []
for raw_result in response.data['results']:
api_result = {}
for heading in context.table.headings:
raw_value = raw_result[heading]
cleaned_value = raw_value
if isinstance(raw_value, datetime.datetime):
rounded_datetime = raw_value.replace(microsecond=0)
formatted_datetime = datetime.datetime.strftime(rounded_datetime, u'%Y-%m-%d %H:%M:%S%z')
cleaned_value = formatted_datetime
if heading == 'start_date':
# a datetime formatted as a string
parsed_datetime = dateutil.parser.parse(raw_value)
rounded_datetime = parsed_datetime.replace(microsecond=0)
formatted_datetime = datetime.datetime.strftime(rounded_datetime, u'%Y-%m-%d %H:%M:%S%z')
cleaned_value = formatted_datetime
api_result[heading] = cleaned_value
api_allocation_sources.append(api_result)
raw_expected_allocation_sources = [dict(zip(row.headings, row.cells)) for row in context.table]
expected_allocation_sources = []
transform_map = {
'name': unicode,
'compute_allowed': int,
'start_date': str,
'end_date': lambda x: None if x == 'None' else unicode(x),
'compute_used': decimal.Decimal,
'global_burn_rate': decimal.Decimal,
'updated': str,
'renewal_strategy': unicode,
'user_compute_used': decimal.Decimal,
'user_burn_rate': decimal.Decimal,
'user_snapshot_updated': str
}
for raw_row in raw_expected_allocation_sources:
clean_row = {}
for key, value in raw_row.iteritems():
transform = transform_map[key]
clean_row[key] = transform(value)
expected_allocation_sources.append(clean_row)
context.test.maxDiff = None
context.test.assertItemsEqual(expected_allocation_sources, api_allocation_sources)
@step('we create an allocation source through the API')
def we_create_allocation_source_through_api(context):
assert context.persona
client = context.persona['client']
for row in context.table:
response = client.post('/api/v2/allocation_sources',
{
'renewal_strategy': row['renewal strategy'],
'name': row['name'],
'compute_allowed': row['compute allowed']
})
if 'uuid' in response.data and response.data['uuid']:
allocation_source_ids = context.persona.get('allocation_source_ids', {})
allocation_source_ids[row['name']] = response.data['uuid']
context.persona['allocation_source_ids'] = allocation_source_ids
@step('we assign allocation source "{allocation_source_name}" to user "{username}" via the API')
def assign_allocation_source_to_user_via_api(context, allocation_source_name, username):
assert context.persona
client = context.persona['client']
allocation_source_id = context.persona['allocation_source_ids'][allocation_source_name]
context.persona['response'] = client.post('/api/v2/user_allocation_sources',
{
'username': username,
'source_id': allocation_source_id
})
@step('we create a provider "{provider_location}"')
def set_up_provider(context, provider_location):
assert context.persona
import core.models
provider = api.tests.factories.ProviderFactory.create(location=provider_location, public=True, type__name='mock')
core.models.ProviderCredential.objects.get_or_create(
provider=provider,
key='auth_url',
value='https://localhost/')
core.models.ProviderCredential.objects.get_or_create(
provider=provider,
key='project_name',
value='some_project')
core.models.ProviderCredential.objects.get_or_create(
provider=provider,
key='region_name',
value='some_region')
core.models.ProviderCredential.objects.get_or_create(
provider=provider,
key='admin_url',
value='https://localhost/')
context.persona['provider'] = provider
@step('we create an account for the current persona on provider "{provider_location}"')
def create_account(context, provider_location):
"""This does not use the factory
We want to test the default quota plugin.
"""
assert context.persona
import core.models
provider = core.models.Provider.objects.get(location=provider_location)
user = context.persona['user']
with mock.patch('service.driver.get_account_driver', autospec=True) as mock_get_account_driver:
mock_account_driver = mock.MagicMock(provider)
def mock_create_account_method(username, password=None, project_name=None,
role_name=None, quota=None, max_quota=False):
factory_identity = api.tests.factories.IdentityFactory.create_identity(
created_by=user,
provider=provider,
quota=quota)
return factory_identity
mock_account_driver.create_account = mock.MagicMock(side_effect=mock_create_account_method)
mock_get_account_driver.return_value = mock_account_driver
with mock.patch.multiple('jetstream.allocation',
tacc_api_post=mock.DEFAULT,
tacc_api_get=mock.DEFAULT,
) as mock_methods:
mock_methods['tacc_api_post'].side_effect = jetstream.tests.tas_api_mock_utils._make_mock_tacc_api_post(
context)
mock_methods['tacc_api_get'].side_effect = jetstream.tests.tas_api_mock_utils._make_mock_tacc_api_get(
context)
with django.test.override_settings(
DEFAULT_QUOTA_PLUGINS=['jetstream.plugins.quota.default_quota.JetstreamSpecialAllocationQuota']
):
import core.plugins
core.plugins.DefaultQuotaPluginManager.list_of_classes = getattr(django.conf.settings,
'DEFAULT_QUOTA_PLUGINS', [])
new_identity = core.models.user.create_new_account_for(provider, user)
context.persona['user_identity'] = new_identity
@step('we create an identity for the current persona on provider "{provider_location}"')
def create_identity(context, provider_location):
assert context.persona
import core.models
provider = core.models.Provider.objects.get(location=provider_location)
user_identity = api.tests.factories.IdentityFactory.create_identity(
created_by=context.persona['user'],
provider=provider)
context.persona['user_identity'] = user_identity
@step('I should have the following quota on provider "{}"')
def should_have_quota_on_provider(context, provider_location):
assert context.persona
import core.models
provider = core.models.Provider.objects.get(location=provider_location)
username = context.persona['username']
user_identity = core.models.user.get_default_identity(username, provider)
expected_quota = dict([(row[0], int(row[1])) for row in context.table.rows])
quota_keys = expected_quota.keys()
actual_quota = user_identity.quota
actual_quota_dict = dict([(key, getattr(actual_quota, key)) for key in quota_keys])
context.test.assertDictEqual(expected_quota, actual_quota_dict)
@step('we make the current identity the admin on provider "{provider_location}"')
def create_identity(context, provider_location):
assert context.persona
import core.models
provider = core.models.Provider.objects.get(location=provider_location)
user_identity = context.persona['user_identity']
core.models.AccountProvider.objects.get_or_create(
provider=provider,
identity=user_identity)
core.models.Identity.update_credential(user_identity, 'key', 'admin', replace=True)
core.models.Identity.update_credential(user_identity, 'secret', 'adminsecret', replace=True)
core.models.Identity.update_credential(user_identity, 'secret', 'adminsecret', replace=True)
@step('we create a provider machine for current persona')
def create_provider_machine(context):
assert context.persona
user_identity = context.persona['user_identity']
user = context.persona['user']
provider_machine = api.tests.factories.ProviderMachineFactory.create_provider_machine(user, user_identity)
context.persona['provider_machine'] = provider_machine
@step('we create an active instance')
def create_active_instance(context):
assert context.persona
user = context.persona['user']
user_identity = context.persona['user_identity']
provider_machine = context.persona['provider_machine']
import core.models
context.test.assertIsInstance(provider_machine, core.models.ProviderMachine)
provider = provider_machine.provider
active_instance = api.tests.factories.InstanceFactory.create(name='Instance in active', provider_alias=uuid.uuid4(),
source=provider_machine.instance_source,
created_by=user,
created_by_identity=user_identity,
start_date=django.utils.timezone.now())
active_status = api.tests.factories.InstanceStatusFactory.create(name='active')
single_cpu_size = api.tests.factories.InstanceSizeFactory.create(
name='single_cpu_size',
provider=provider,
cpu=1,
disk=100,
root=10,
mem=4096
)
api.tests.factories.InstanceHistoryFactory.create(
status=active_status,
activity='',
instance=active_instance,
size=single_cpu_size
)
context.persona['active_instance'] = active_instance
@step('I set "{key}" to attribute "{attribute}" of "{persona_var}"')
@step('I set "{key}" to another variable "{persona_var}"')
def set_key_to_persona_var_and_attribute(context, key, persona_var, attribute=None):
assert context.persona is not None, u'no persona is setup'
if attribute:
context.persona[key] = getattr(context.persona[persona_var], attribute)
else:
context.persona[key] = context.persona[persona_var]
@step('I set "{key}" to allocation source with name "{allocation_source_name}"')
def set_key_to_persona_var_and_attribute(context, key, allocation_source_name):
assert context.persona is not None, u'no persona is setup'
import core.models
allocation_source = core.models.AllocationSource.objects.get(name=allocation_source_name)
context.persona[key] = allocation_source
@when('we get the details for the active instance via the API')
def get_details_for_active_instance(context):
assert context.persona
client = context.persona['client']
active_instance = context.persona['active_instance']
url = django.urls.reverse('api:v2:instance-detail',
args=(active_instance.provider_alias,))
# Try a few times. Sometimes this does not find the instance on the first try.
for i in range(10):
response = client.get(url)
if 'version' in response.data:
continue
time.sleep(0.1)
context.persona['response'] = response
context.persona['provider_alias'] = response.data['version']['id']
@when('I assign allocation source "{allocation_source_name}" to active instance')
def assign_allocation_source_to_active_instance(context, allocation_source_name):
assert context.persona
active_instance = context.persona['active_instance']
client = context.persona['client']
response = client.post('/api/v2/instance_allocation_source',
{
'instance_id': active_instance.provider_alias,
'allocation_source_name': allocation_source_name
})
context.persona['response'] = response
@step('the API response code is {response_code:d}')
def api_response_code_is(context, response_code):
assert context.persona
assert isinstance(context.test, unittest.case.TestCase)
context.test.assertEqual(context.persona['response'].status_code, response_code)
@step('a dummy browser')
def dummy_browser(context):
context.single_browser = True
context.browser = 'dummy'
context.is_connected = True
|
from setuptools import setup, find_packages
setup(
# this will be my Library name.
name='option-pricing-python',
# Want to make sure people know who made it.
author='Davis W. Edwards',
# also an email they can use to reach out.
author_email='davis.edwards@understandtrading.com',
# I'm in alpha development still, so a compliant version number is a1.
# read this as MAJOR VERSION 0, MINOR VERSION 1, MAINTENANCE VERSION 0
version='0.1.0',
# here is a simple description of the library, this will appear when
# someone searches for the library on https://pypi.org/search
description='A libary to price financial options using closed-form solutions written in Python.',
# want to make sure that I specify the long description as MARKDOWN.
long_description_content_type="text/markdown",
# here is the URL you can find the code, this is just the GitHub URL.
url='https://github.com/obolary/option-pricing-python',
# there are some dependencies to use the library, so let's list them out.
install_requires=[],
# some keywords for my library.
keywords='finance, options',
# here are the packages I want "build."
packages=find_packages(include=['option_pricing']),
# you will need python 3.7 to use this libary.
python_requires='>=3.7'
)
|
from .list import ProjectListQuery
from .duplication_checks import ProjectDuplicationCheckQuery
class ProjectsQuery(
ProjectDuplicationCheckQuery,
ProjectListQuery,
):
pass
|
from PIL import Image, ImageStat
import numpy as np
from progressbar import ProgressBar
import photo_mosaic.util
from photo_mosaic import dbmanager
class PhotoMosaicGenerator:
def __init__(self, materials_db: str):
self._db = dbmanager.DBManager(dbname=materials_db)
self._materials = self._db.get_materials()
def generate(self, target_image_name: str, n_split: int, output_image_name: str, threshold_near=5):
print('generating photo mosaic...')
target_image = Image.open(target_image_name)
target_image = photo_mosaic.util.convert_to_rgb_image(target_image)
gen_image = Image.new('RGB', target_image.size, 'white')
mat_size = gen_image.size[0] // n_split
target_image = np.array(target_image, 'f')
print(target_image_name, target_image.shape)
pbar = ProgressBar(max_value=n_split*n_split)
v_imgs = np.array_split(target_image, n_split, axis=1)
for n_v, v in enumerate(v_imgs):
h_imgs = np.array_split(v, n_split, axis=0)
for n_h, h in enumerate(h_imgs):
# 対象画像の平均値計算
mean = np.mean(h, axis=0)
mean = np.mean(mean, axis=0)
# 近似画像取得
mat_image_name = self.get_near_image_name(mean, threshold=threshold_near)
# 近似画像を指定位置に貼り付け
mat_image = Image.open(mat_image_name)
mat_image = photo_mosaic.util.convert_to_rgb_image(mat_image)
mat_image = photo_mosaic.util.trim_into_square(mat_image)
mat_image = mat_image.resize((mat_size, mat_size))
gen_image.paste(mat_image, (n_v*mat_size, n_h*mat_size))
pbar.update(n_v*n_split+n_h)
gen_image.save(output_image_name)
def get_near_image_name(self, rgb, threshold):
"""
指定のRGB値からもっとも近い画像を返す
しきい値内に収まっている画像が複数存在する場合はランダムで返す
"""
base_point = np.array(rgb)
candidates = list()
for mat in self._materials:
m_point = np.array([mat.R, mat.G, mat.B])
diff = np.linalg.norm(base_point-m_point)
candidates.append([mat.name, diff])
candidates.sort(key=lambda x:x[1])
most_near_diff = candidates[0][1]
for idx, mat in enumerate(candidates):
if (mat[1]-most_near_diff) > threshold:
break
if idx == 0:
idx = 1
return candidates[np.random.randint(idx)][0]
|
try:
from mac import Mac
from censor import Censor
except ImportError:
from heyyou.mac import Mac
from heyyou.censor import Censor
class Config:
interface: str = None
mac: Mac = None
auth_token: str = None
summary: bool = False
censor: Censor = None
def __init__(self, args):
Config.interface = args.interface
Config.auth_token = args.auth_token
Config.summary = args.summary
Config.censor = Censor(args.censor)
if args.mac_vendors:
Config.mac = Mac(args.mac_vendors)
|
# importing nessesary modules in our Program
from tkinter import *
import tkinter.messagebox as msg
# making root variable as Tk class and Updating Title, minimum and maximum size and Title of our GUI application
root = Tk()
root.geometry("655x333")
root.maxsize(655, 333)
root.minsize(655, 333)
root.title("ListBox using Tkinter")
# creating a function which will be called when the Button is Pressed
def ad():
global i
lbx.insert(ACTIVE, f"{i}")
i = i+1
# creating a global variable i
i = 0
# creating a Listbox and packing It
lbx = Listbox(root)
lbx.pack(ipadx=20)
# inserting a default item in the ListBox
lbx.insert(END, "First Item of this List Box")
# creating a Button and packing it
Button(root, text="Add Item", command=ad).pack()
making our GUI program to run into a Event Loop
root.mainloop()
|
from dottygen.generator.dotty_generator import DottyGenerator
|
from time import sleep
from random import choice
def color_it(text, color):
red = "\033[1;31;40m"
green = "\033[1;32;40m"
yellow = "\033[1;33;40m"
cyan = "\033[1;36;40m"
purple = "\033[1;35;40m"
normal = "\033[0;37;40m"
if color == "red":
return "{}{}{}".format(red, text, normal)
elif color == "green":
return "{}{}{}".format(green, text, normal)
elif color == "yellow":
return "{}{}{}".format(yellow, text, normal)
elif color == "cyan":
return "{}{}{}".format(cyan, text, normal)
elif color == "purple":
return "{}{}{}".format(purple, text, normal)
else:
return text
colors = ["red", "green", "yellow", "cyan", "purple"]
hello = "Hello from Raspberry Pi"
congrats = "Congrats you finished your first Challenge!"
for i in range(10):
rand_color = choice(colors)
print(color_it(hello, rand_color))
print(color_it(congrats, rand_color))
print()
sleep(0.5)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.