content stringlengths 5 1.05M |
|---|
#!/usr/bin/python
#
# Script decode drum table/patterns from Zoom F/W
# (c) Simon Wood, 10 May 2020
#
from optparse import OptionParser
from construct import *
#--------------------------------------------------
# Define drum table/patterns using Construct (v2.9)
# requires:
# https://github.com/construct/construct
Item = Struct(
"name" / PaddedString(9, "ascii"),
"sample1" / PaddedString(12, "ascii"),
"sample2" / PaddedString(12, "ascii"),
"sample3" / PaddedString(12, "ascii"),
"sample4" / PaddedString(12, "ascii"),
"sample5" / PaddedString(12, "ascii"),
"sample6" / PaddedString(12, "ascii"),
"sample7" / PaddedString(12, "ascii"),
"sample8" / PaddedString(12, "ascii"),
"sample9" / PaddedString(12, "ascii"), # Should be 'Click.raw'
"tsig_top" / Byte,
"tsig_bot" / Byte,
"bars" / Byte,
"pointer" / Int24ul,
Const(b"\xC0"),
)
Table = Struct(
"items" / GreedyRange(Item),
)
Sample = Struct(
"end" / Peek(Byte),
Check(this.end != 0xF0),
"element" / BitStruct(
"sample" / Default(BitsInteger(4), 0),
"volume" / Default(BitsInteger(4), 0),
),
"skiptime" / Byte,
)
Pattern = Struct(
"config1" / Byte,
"config2" / Byte,
Const(b"\x00"),
"elements" / GreedyRange(Sample),
Const(b"\xF0"),
)
#-------------------------------------------
usage = "usage: %prog [options] FILENAME"
parser = OptionParser(usage)
parser.add_option("-d", "--dump", help="dump configuration to text",
action="store_true", dest="dump")
parser.add_option("-T", "--table",
help="offset to pattern table within '129' file (G1Four V2.00 use 407304)",
dest="table")
parser.add_option("-D", "--drums",
help="offset to drum data within '129' file (G1Four V2.00 use 457078)",
dest="drums")
parser.add_option("-P", "--pointer",
help="print out the pointers to drum data as a sorted list",
action="store_true", dest="pointer")
parser.add_option("-p", "--pattern",
help="print drum data representation for a pattern", dest="pattern")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("FILE not specified")
# Read data from file
infile = open(args[0], "rb")
if not infile and not options.test:
sys.exit("Unable to open config FILE for reading")
else:
data = infile.read()
infile.close()
table = []
if options.table:
table = Table.parse(data[int(options.table):])
if options.dump:
print(table)
if options.drums:
pointers = []
for item in table['items']:
new = [item['name'], int(item['pointer'])]
pointers.append(new)
# sort them for future use
pointers = sorted(pointers, key=lambda tup: tup[1])
if options.pointer:
first = pointers[0][1]
for item in pointers:
print("%s %s " % (item[0].ljust(10, ' '), str.format('0x{:08X}', item[1] - first + int(options.drums))))
if options.pattern and int(options.pattern) <= len(pointers):
first = pointers[0][1]
item = table['items'][int(options.pattern)-1]
pointer = int(item['pointer']) - first + int(options.drums)
count = 0
ascii = [list(" " * 200) for i in range(9)]
graphic = "0123456789ABCDEF"
graphic = "..,,ooxxOOXX$$##"
pattern = Pattern.parse(data[pointer:])
if options.dump:
print(pattern)
for element in pattern['elements']:
ascii[element['element']['sample'] - 1][count] = graphic[element['element']['volume']]
if element['skiptime']:
#count += 1
count += element['skiptime']
print("Pattern %s : %s (%s)" % (options.pattern, item['name'], str.format('0x{:08X}', pointer)))
print("Bars %d (%d/%d)" % (item['bars'], item['tsig_top'], item['tsig_bot']))
print("---")
for key in range(9):
print("%s :%s:" % (item['sample'+str(key+1)].ljust(12, ' '), "".join(ascii[key][:count])))
|
#
# Lockstep Platform SDK for Python
#
# (c) 2021-2022 Lockstep, Inc.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
# @author Lockstep Network <support@lockstep.io>
# @copyright 2021-2022 Lockstep, Inc.
# @link https://github.com/Lockstep-Network/lockstep-sdk-python
#
from dataclasses import dataclass
@dataclass
class AttachmentModel:
"""
An Attachment is a file that can be attached to various account
attributes within Lockstep. This data model contains metadata about
the attachment. You can upload and download attachments into the
Lockstep Platform along with this metadata. Attachments can be used
for invoices, payments, legal documents, or any other external files
that you wish to track. See [Extensibility](https://developer.lockstep.io/docs/extensibility)
for more information.
"""
attachmentId: str | None = None
groupKey: str | None = None
tableKey: str | None = None
objectKey: str | None = None
fileName: str | None = None
fileExt: str | None = None
attachmentTypeId: str | None = None
isArchived: bool | None = None
originAttachmentId: str | None = None
viewInternal: bool | None = None
viewExternal: bool | None = None
erpKey: str | None = None
appEnrollmentId: str | None = None
created: str | None = None
createdUserId: str | None = None
attachmentType: str | None = None
|
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from myhpom.models import State
class StateModelTestCase(TestCase):
def test_order_by_ad(self):
# Files with ADs should be at the front of the list, and they should be
# in alphabetical order (regardless of the template filename)
NC = State.objects.filter(name='NC')
SC = State.objects.filter(name='SC')
AK = State.objects.filter(name='AK')
NC.update(advance_directive_template=SimpleUploadedFile('afile.txt', ''))
SC.update(advance_directive_template=SimpleUploadedFile('zfile.txt', ''))
first_states = State.objects.order_by_ad()[:3]
self.assertEqual(list(first_states), [NC.first(), SC.first(), AK.first()])
last_state = State.objects.order_by_ad().last()
self.assertTrue(last_state.is_territory)
|
# JDC suggests two tests:
# * Likelihood always zero, confirm that we get uniform distribution
# * Likelihood Gaussian in GB radius, confirm that we get expected result
import numpy as np
import pytest
from networkx import nx
from bayes_implicit_solvent.samplers import tree_rjmc
from bayes_implicit_solvent.typers import GBTypingTree, AtomSpecificationProposal
def assert_self_consistency(initial_tree, max_tries=100):
"""Up to max_tries times, sample the creation proposal, then sample the deletion proposal, and
if you get back initial tree, confirm that log_prob_forward_over_reverse is consistent in the two directions"""
for _ in range(max_tries):
elaborated_proposal = initial_tree.sample_creation_proposal()
elaborate_tree = elaborated_proposal['proposal']
pruned_proposal = elaborate_tree.sample_deletion_proposal()
pruned_tree = pruned_proposal['proposal']
if (tuple(pruned_tree.ordered_nodes) == tuple(initial_tree.ordered_nodes)):
f = elaborated_proposal['log_prob_forward_over_reverse']
r = - pruned_proposal['log_prob_forward_over_reverse']
if not np.isclose(f, r):
pytest.fail('Inconsistent pair detected \n\t{}\n\t{}'.format(elaborated_proposal, pruned_proposal))
else:
return True
print(RuntimeWarning(
"Wasn't able to make a reversible pair of jumps in {} attempts for\n{}".format(max_tries, initial_tree)))
def construct_initial_tree():
"""Construct a basic tree with a hydrogen and the ability to specify connectivity"""
specifiers = ['X1', 'X2', 'X3', 'X4']
atom_specification_proposal = AtomSpecificationProposal(atomic_specifiers=specifiers)
un_delete_able_types = ['*', '[#1]', '[#2]']
initial_tree = GBTypingTree(un_delete_able_types=un_delete_able_types)
for base_type in un_delete_able_types[1:]:
initial_tree.add_child(child_smirks=base_type, parent_smirks='*')
return initial_tree
def test_proposal_self_consistency_on_random_walk(walk_length=100):
"""Sample a sequence of elaborate trees, then evaluate the self-consistency of create/delete
proposals for each tree visited in this sequence"""
print('attempting a random walk')
traj = [construct_initial_tree()]
for _ in range(walk_length):
traj.append(traj[-1].sample_creation_proposal()['proposal'])
for tree in traj:
assert_self_consistency(tree)
def test_atom_specification_proposal(n_tests=50):
np.random.seed(0)
initial_tree = construct_initial_tree()
# adding and removing a single specifier
for _ in range(n_tests):
assert_self_consistency(initial_tree)
print('depth-1 trees okay')
# adding and removing more than one specifier
for _ in range(n_tests):
elaborated_proposal = initial_tree.sample_creation_proposal()['proposal']
assert_self_consistency(elaborated_proposal)
print('depth-2 trees okay')
from simtk import unit
from scipy.stats import multivariate_normal
@pytest.mark.slow
def test_uniform_sampling_normal(depth_cutoff=2, n_iterations=10000):
"""Test that a sampler targeting these discrete structures and associated continuous parameters jointly
obtains a uniform distribution over bounded-depth trees when appropriate.
To do this, we ensure that each discrete tree has the same normalizing constant (namely, 1).
# TODO: Choice of continuous distribution is arbitrary, as long as normalized. May switch to uniform instead of Gaussian.
"""
np.random.seed(0)
# specifiers = ['X1', 'X2', 'X3']
specifiers = ['X1', 'X2']
# specifiers = ['X1']
# TODO: Set up testing fixtures with different numbers of specifiers, depth_cutoffs, etc.
atom_specification_proposal = AtomSpecificationProposal(atomic_specifiers=specifiers)
N = len(atom_specification_proposal.atomic_specifiers)
un_delete_able_types = ['*', '[#1]']
initial_tree = GBTypingTree(proposal_sigma=1.0 * unit.nanometer, un_delete_able_types=un_delete_able_types)
for base_type in un_delete_able_types[1:]:
initial_tree.add_child(child_smirks=base_type, parent_smirks='*')
from math import factorial
n_trees_at_length = lambda length: int(factorial(N) / factorial(N - length))
number_of_trees_at_each_length = list(map(n_trees_at_length, range(len(specifiers) + 1)))
def log_prob(tree):
"""To induce a uniform marginal distribution over *discrete* trees
up to depth cutoff without duplicated nodes:
1. check that the discrete tree is valid -- if not, return a log-probability of -inf
2. define a normalized distribution over each tree's *continuous* parameters,
namely a multivariate normal distribution
If we sample the resulting probability distribution properly, we should obtain:
1. A uniform marginal distribution over valid discrete trees
2. A gaussian distribution over the continuous parameters within each model
"""
N_nodes = tree.number_of_nodes
no_duplicates = (len(set(tree.nodes)) == N_nodes)
within_depth_limit = (max(nx.shortest_path_length(tree.G, source='*').values()) <= depth_cutoff)
if no_duplicates and within_depth_limit:
mean_vector = np.ones(N_nodes) # a length-N_nodes vector of 1's
tree_radii = tree.get_radii() # a length-N_nodes vector of the radii associated with nodes in the tree
return multivariate_normal.logpdf(x=tree_radii, mean=mean_vector)
else:
return - np.inf
np.random.seed(0)
result = tree_rjmc(initial_tree, log_prob,
n_iterations=n_iterations,
fraction_cross_model_proposals=0.5,
)
radii = [tree.get_radii() for tree in result['traj']]
np.save('sampled_radii.npy', radii)
print('number of possible distinct discrete trees at each length',
list(zip(range(len(number_of_trees_at_each_length)), number_of_trees_at_each_length)))
number_of_possibilities = sum(number_of_trees_at_each_length)
print('number of possibilities:', number_of_possibilities)
print('initial tree:')
print(initial_tree)
traj = result['traj']
string_representations = list(map(str, traj))
print('number of distinct sampled models (as reflected in string representation)', len(set(string_representations)))
for s in list(set(string_representations))[:5]:
print(s)
discrete_models = [tuple(t.ordered_nodes[2:]) for t in traj]
distinct_discrete_models = sorted(list(set(discrete_models)))
discrete_model_index_dict = dict(zip(distinct_discrete_models, range(len(distinct_discrete_models))))
discrete_model_traj = [discrete_model_index_dict[d] for d in discrete_models]
np.save('discrete_model_traj.npy', discrete_model_traj)
for d in distinct_discrete_models:
print(d)
print("number of distinct sampled models (as reflected in choice of smirks)", len(distinct_discrete_models))
thinning = 20
lengths = np.array([len(d) for d in discrete_models[::thinning]])
expected_length_distribution = len(lengths) * (np.array(number_of_trees_at_each_length) / np.sum(number_of_trees_at_each_length))
actual_length_distribution = np.zeros(len(expected_length_distribution))
for t in range(len(expected_length_distribution)):
actual_length_distribution[t] += sum(lengths == t)
print('expected_length_distribution', expected_length_distribution)
print('actual_length_distribution', actual_length_distribution)
threshold = 0.001
from scipy.stats import chisquare
chi2_result = chisquare(f_obs=actual_length_distribution, f_exp=expected_length_distribution)
print(chi2_result)
assert (chi2_result.pvalue > threshold)
from scipy.stats import kstest
for i in range(max(lengths)):
rvs = np.array([r[i] for r in radii if len(r) > i])
# check that we're not producing mean-zero Gaussian values
kstest_result = kstest(rvs[::thinning], 'norm')
pvalue_should_be_under_threshold = kstest_result.pvalue
assert (pvalue_should_be_under_threshold < threshold)
# check that we're producing mean 1.0 Gaussian values
from scipy.stats import norm
kstest_result = kstest(rvs[::thinning], norm(loc=1.0).cdf)
pvalue_should_be_over_threshold = kstest_result.pvalue
assert (pvalue_should_be_over_threshold > threshold)
return result
@pytest.mark.slow
def test_uniform_sampling_flat(depth_cutoff=2, n_iterations=100000):
"""Test that a sampler targeting these discrete structures and associated continuous parameters jointly
obtains a uniform distribution over bounded-depth trees when appropriate.
To do this, we ensure that each discrete tree has the same normalizing constant (namely, 1).
# TODO: Choice of continuous distribution is arbitrary, as long as normalized. May switch to uniform instead of Gaussian.
"""
np.random.seed(0)
# specifiers = ['X1', 'X2', 'X3']
specifiers = ['X1', 'X2']
# specifiers = ['X1']
# TODO: Set up testing fixtures with different numbers of specifiers, depth_cutoffs, etc.
atom_specification_proposal = AtomSpecificationProposal(atomic_specifiers=specifiers)
N = len(atom_specification_proposal.atomic_specifiers)
un_delete_able_types = ['*', '[#1]']
initial_tree = GBTypingTree(proposal_sigma=1.0 * unit.nanometer, un_delete_able_types=un_delete_able_types)
for base_type in un_delete_able_types[1:]:
initial_tree.add_child(child_smirks=base_type, parent_smirks='*')
from math import factorial
n_trees_at_length = lambda length: int(factorial(N) / factorial(N - length))
number_of_trees_at_each_length = list(map(n_trees_at_length, range(len(specifiers) + 1)))
def log_prob(tree):
"""To induce a uniform marginal distribution over *discrete* trees
up to depth cutoff without duplicated nodes:
1. check that the discrete tree is valid -- if not, return a log-probability of -inf
2. define a normalized distribution over each tree's *continuous* parameters,
namely a multivariate normal distribution
If we sample the resulting probability distribution properly, we should obtain:
1. A uniform marginal distribution over valid discrete trees
2. A gaussian distribution over the continuous parameters within each model
"""
L = 0.5
N_nodes = tree.number_of_nodes
no_duplicates = (len(set(tree.nodes)) == N_nodes)
within_depth_limit = (max(nx.shortest_path_length(tree.G, source='*').values()) <= depth_cutoff)
if no_duplicates and within_depth_limit:
tree_radii = tree.get_radii() # a length-N_nodes vector of the radii associated with nodes in the tree
if (min(tree_radii) >= 0) and (max(tree_radii) <= L):
return - np.log(L**N_nodes)
else:
return - np.inf
#return multivariate_normal.logpdf(x=tree_radii, mean=mean_vector)
else:
return - np.inf
np.random.seed(0)
result = tree_rjmc(initial_tree, log_prob,
n_iterations=n_iterations,
fraction_cross_model_proposals=0.5,
)
radii = [tree.get_radii() for tree in result['traj']]
np.save('sampled_radii_flat.npy', radii)
print('number of possible distinct discrete trees at each length',
list(zip(range(len(number_of_trees_at_each_length)), number_of_trees_at_each_length)))
number_of_possibilities = sum(number_of_trees_at_each_length)
print('number of possibilities:', number_of_possibilities)
print('initial tree:')
print(initial_tree)
traj = result['traj']
string_representations = list(map(str, traj))
print('number of distinct sampled models (as reflected in string representation)', len(set(string_representations)))
for s in list(set(string_representations))[:5]:
print(s)
discrete_models = [tuple(t.ordered_nodes[2:]) for t in traj]
distinct_discrete_models = sorted(list(set(discrete_models)))
discrete_model_index_dict = dict(zip(distinct_discrete_models, range(len(distinct_discrete_models))))
discrete_model_traj = [discrete_model_index_dict[d] for d in discrete_models]
np.save('discrete_model_traj_flat.npy', discrete_model_traj)
for d in distinct_discrete_models:
print(d)
print("number of distinct sampled models (as reflected in choice of smirks)", len(distinct_discrete_models))
thinning = 20
lengths = np.array([len(d) for d in discrete_models[::thinning]])
expected_length_distribution = len(lengths) * (np.array(number_of_trees_at_each_length) / np.sum(number_of_trees_at_each_length))
actual_length_distribution = np.zeros(len(expected_length_distribution))
for t in range(len(expected_length_distribution)):
actual_length_distribution[t] += sum(lengths == t)
print('expected_length_distribution', expected_length_distribution)
print('actual_length_distribution', actual_length_distribution)
threshold = 0.001
from scipy.stats import chisquare
chi2_result = chisquare(f_obs=actual_length_distribution, f_exp=expected_length_distribution)
print(chi2_result)
assert (chi2_result.pvalue > threshold)
return result
|
import random
print("Who is going to pay the bill?")
list_name = input("Tell me the names that are going to pay the bill, separated by a coma and a space (, ) ")
names = list_name.split(", ")
number_names = len(names)-1
print(names[random.randint(0,number_names)])
|
"""Test the CO2 Signal config flow."""
from unittest.mock import patch
import pytest
from homeassistant import config_entries
from homeassistant.components.co2signal import DOMAIN, config_flow
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from . import VALID_PAYLOAD
async def test_form_home(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch("CO2Signal.get_latest", return_value=VALID_PAYLOAD,), patch(
"homeassistant.components.co2signal.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"location": config_flow.TYPE_USE_HOME,
"api_key": "api_key",
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "CO2 Signal"
assert result2["data"] == {
"api_key": "api_key",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_coordinates(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"location": config_flow.TYPE_SPECIFY_COORDINATES,
"api_key": "api_key",
},
)
assert result2["type"] == RESULT_TYPE_FORM
with patch("CO2Signal.get_latest", return_value=VALID_PAYLOAD,), patch(
"homeassistant.components.co2signal.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{
"latitude": 12.3,
"longitude": 45.6,
},
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_CREATE_ENTRY
assert result3["title"] == "12.3, 45.6"
assert result3["data"] == {
"latitude": 12.3,
"longitude": 45.6,
"api_key": "api_key",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_country(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"location": config_flow.TYPE_SPECIFY_COUNTRY,
"api_key": "api_key",
},
)
assert result2["type"] == RESULT_TYPE_FORM
with patch("CO2Signal.get_latest", return_value=VALID_PAYLOAD,), patch(
"homeassistant.components.co2signal.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{
"country_code": "fr",
},
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_CREATE_ENTRY
assert result3["title"] == "fr"
assert result3["data"] == {
"country_code": "fr",
"api_key": "api_key",
}
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize(
"err_str,err_code",
[
("Invalid authentication credentials", "invalid_auth"),
("API rate limit exceeded.", "api_ratelimit"),
("Something else", "unknown"),
],
)
async def test_form_error_handling(hass: HomeAssistant, err_str, err_code) -> None:
"""Test we handle expected errors."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"CO2Signal.get_latest",
side_effect=ValueError(err_str),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"location": config_flow.TYPE_USE_HOME,
"api_key": "api_key",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": err_code}
async def test_form_error_unexpected_error(hass: HomeAssistant) -> None:
"""Test we handle unexpected error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"CO2Signal.get_latest",
side_effect=Exception("Boom"),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"location": config_flow.TYPE_USE_HOME,
"api_key": "api_key",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_form_error_unexpected_data(hass: HomeAssistant) -> None:
"""Test we handle unexpected data."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"CO2Signal.get_latest",
return_value={"status": "error"},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"location": config_flow.TYPE_USE_HOME,
"api_key": "api_key",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
|
import os
from flask import Flask, request, session
app = Flask(__name__)
@app.before_request
def set_session():
session["pod"] = os.environ.get("POD_NAME", "default")
@app.route("/")
def index():
return "Pod Id: {}".format(session["pod"])
if __name__ == "__main__":
app.secret_key = "session_key"
app.run(host="0.0.0.0", port=80)
|
import click
from pycobertura.cobertura import Cobertura
from pycobertura.reporters import (
HtmlReporter,
TextReporter,
HtmlReporterDelta,
TextReporterDelta,
)
from pycobertura.filesystem import filesystem_factory
from pycobertura.utils import get_dir_from_file_path
pycobertura = click.Group()
reporters = {
"html": HtmlReporter,
"text": TextReporter,
}
class ExitCodes:
OK = 0
EXCEPTION = 1
COVERAGE_WORSENED = 2
NOT_ALL_CHANGES_COVERED = 3
def get_exit_code(differ, source):
# Compute the non-zero exit code. This is a 2-step process which involves
# checking whether code coverage is any better first and then check if all
# changes are covered (stricter) which can only be done if the source code
# is available (and enabled via the --source option).
if not differ.has_better_coverage():
return ExitCodes.COVERAGE_WORSENED
if source:
if differ.has_all_changes_covered():
return ExitCodes.OK
else:
return ExitCodes.NOT_ALL_CHANGES_COVERED
else:
return ExitCodes.OK
@pycobertura.command()
@click.argument("cobertura_file")
@click.option("-f", "--format", default="text", type=click.Choice(list(reporters)))
@click.option(
"-o",
"--output",
metavar="<file>",
type=click.File("wb"),
help="Write output to <file> instead of stdout.",
)
@click.option(
"-s",
"--source",
metavar="<source-dir-or-zip>",
help="Provide path to source code directory for HTML output. The path can "
"also be a zip archive instead of a directory.",
)
@click.option(
"-p",
"--source-prefix",
metavar="<dir-prefix>",
help="For every file found in the coverage report, it will use this "
"prefix to lookup files on disk. This is especially useful when "
"the --source is a zip archive and the files were zipped under "
"a directory prefix that is not part of the source.",
)
def show(cobertura_file, format, output, source, source_prefix):
"""show coverage summary of a Cobertura report"""
if not source:
source = get_dir_from_file_path(cobertura_file)
cobertura = Cobertura(
cobertura_file,
filesystem=filesystem_factory(source, source_prefix=source_prefix),
)
Reporter = reporters[format]
reporter = Reporter(cobertura)
report = reporter.generate()
if not isinstance(report, bytes):
report = report.encode("utf-8")
isatty = True if output is None else output.isatty()
click.echo(report, file=output, nl=isatty)
delta_reporters = {
"text": TextReporterDelta,
"html": HtmlReporterDelta,
}
@pycobertura.command(
help="""\
The diff command compares and shows the changes between two Cobertura reports.
NOTE: Reporting missing lines or showing the source code with the diff command
can only be accurately computed if the versions of the source code used to
generate each of the coverage reports is accessible. By default, the source
will read from the Cobertura report and resolved relatively from the report's
location. If the source is not accessible from the report's location, the
options `--source1` and `--source2` are necessary to point to the source code
directories (or zip archives). If the source is not available at all, pass
`--no-source` but missing lines and source code will not be reported.
"""
)
@click.argument("cobertura_file1")
@click.argument("cobertura_file2")
@click.option(
"--color/--no-color",
default=None,
help="Colorize the output. By default, pycobertura emits color codes only "
"when standard output is connected to a terminal. This has no effect "
"with the HTML output format.",
)
@click.option(
"-f", "--format", default="text", type=click.Choice(list(delta_reporters))
)
@click.option(
"-o",
"--output",
metavar="<file>",
type=click.File("wb"),
help="Write output to <file> instead of stdout.",
)
@click.option(
"-s1",
"--source1",
metavar="<source-dir1-or-zip-archive>",
help="Provide path to source code directory or zip archive of first "
"Cobertura report. This is necessary if the filename path defined "
"in the report is not accessible from the location of the report.",
)
@click.option(
"-s2",
"--source2",
metavar="<source-dir2-or-zip-archive>",
help="Like --source1 but for the second coverage report of the diff.",
)
@click.option(
"-p1",
"--source-prefix1",
metavar="<dir-prefix1>",
help="For every file found in the coverage report, it will use this "
"prefix to lookup files on disk. This is especially useful when "
"the --source1 is a zip archive and the files were zipped under "
"a directory prefix that is not part of the source",
)
@click.option(
"-p2",
"--source-prefix2",
metavar="<dir-prefix2>",
help="Like --source-prefix1, but for applies for --source2.",
)
@click.option(
"--source/--no-source",
default=True,
help="Show missing lines and source code. When enabled (default), this "
"option requires access to the source code that was used to generate "
"both Cobertura reports (see --source1 and --source2). When "
"`--no-source` is passed, missing lines and the source code will "
"not be displayed.",
)
def diff(
cobertura_file1,
cobertura_file2,
color,
format,
output,
source1,
source2,
source_prefix1,
source_prefix2,
source,
):
"""compare coverage of two Cobertura reports"""
# Assume that the source is located in the same directory as the provided
# coverage files if no source directories are provided.
if not source1:
source1 = get_dir_from_file_path(cobertura_file1)
if not source2:
source2 = get_dir_from_file_path(cobertura_file2)
filesystem1 = filesystem_factory(source1, source_prefix=source_prefix1)
cobertura1 = Cobertura(cobertura_file1, filesystem=filesystem1)
filesystem2 = filesystem_factory(source2, source_prefix=source_prefix2)
cobertura2 = Cobertura(cobertura_file2, filesystem=filesystem2)
Reporter = delta_reporters[format]
reporter_args = [cobertura1, cobertura2]
reporter_kwargs = {"show_source": source}
isatty = True if output is None else output.isatty()
if format == "text":
color = isatty if color is None else color is True
reporter_kwargs["color"] = color
reporter = Reporter(*reporter_args, **reporter_kwargs)
report = reporter.generate()
if not isinstance(report, bytes):
report = report.encode("utf-8")
click.echo(report, file=output, nl=isatty, color=color)
exit_code = get_exit_code(reporter.differ, source)
raise SystemExit(exit_code)
|
# =============================
# tomato-clock.py
# this is a simple application to arrange your time more properly
# author @NearlyHeadlessJack (https://github.com/NearlyHeadlessJack)
# copyright (c) 2022 N.H.J.
# =============================
# import=======================
from pyecharts.charts import Bar
import time
import os
import subprocess
from copy import copy
import datetime
from pydub import AudioSegment
from pydub.playback import play
import xlrd
from xlrd import xldate_as_tuple
import xlwt
from xlutils.copy import copy
import json
from pathlib import Path
# =============================
# variables====================
localDate = time.strftime("%Y-%m-%d", time.localtime())
curTime = time.localtime(time.time())#当前时间
beginTime = time.localtime(time.time()) #开始时间
global numClocks,history
numClocks = 0 #经历的番茄钟数量
skipTimes = 0 #跳过的休息次数
cur = 1
t1 = time.mktime(curTime)
t2 = time.mktime(beginTime)
song = AudioSegment.from_wav("sound.wav")
path_json = r'/Users/jack/Desktop/repo/Time_Table/data.json'
history = {localDate: 0}
# =============================
# macOS notification===========
def show_notification(title, text):
cmd = 'display notification \"' + \
text + '\" with title \"' + title + '\"'
subprocess.call(["osascript", "-e", cmd])
play(song)
# =============================
def ReadJson():
global numClocks,history
init_data = {localDate: 0}
if Path(path_json).exists():
with open(path_json, "r") as f:
history = json.load(f)
else:
with open(path_json, "w+") as f:
json.dump(init_data, f,indent=4)
with open(path_json, "r") as f:
history = json.load(f)
if localDate not in history.keys():
history[localDate] = 0
numClocks = int(history[localDate])
# # =============================
def WriteJson():
global history
history[localDate] += 1
with open(path_json, "w+") as f:
json.dump(history, f,indent=4)
Visualization()
os.system("./git.sh")
os.system('clear') # macOS
# =============================
# visualize====================
def Visualization():
global history
bar = Bar()
list_data = list(history.keys())
bar.add_xaxis(list_data )
list_data_2 = [round(i * 25 /60.0,1) for i in list(history.values())]
bar.add_yaxis("学习时长(小时)",list_data_2 )
bar.render()
os.system("cp render.html /Users/jack/Desktop/repo/Time_Table/docs/index.html")
# =============================
ReadJson()
os.system('clear') # macOS
out1 = int(357 -curTime.tm_yday+1)
out2 = int((357 -curTime.tm_yday+1)/7)
print("Only "+ str(out1) +" days or "+str(out2)+" weeks left before UNGEE!")
print("\nYou've been learning "+str(numClocks*25)+" minutes today!\n")
time.sleep(3.5)
os.system('clear') # macOS
print ("This is tomato clock, enjoy studying!\n")
while cur:
if input("If you are ready to study, please press enter!\n") == '':
numClocks = numClocks + 1
i = datetime.datetime.now()
timeshift = i + datetime.timedelta(minutes=25)
show_notification("Study Begins!", "The No." + str(numClocks) + " clock starts.\n")
beginTime = time.localtime(time.time()) # 记录开始时间
tB = time.mktime(beginTime)
tC = time.mktime(curTime)
while tC-tB <= 25 * 60 -4:
nowTime = datetime.datetime.now()
curTime = time.localtime(time.time())
tC = time.mktime(curTime)
diff = time.gmtime(tC - tB)
os.system('clear') # macOS
print("现在是%s:%s"%(nowTime.hour,nowTime.minute))
print('This is the No.'+ str(numClocks) +' clock!')
print("番茄钟将在%s:%s结束\n"%(timeshift.hour,timeshift.minute))
print('{0} mins {1} secs remaining!'.format(24-diff.tm_min,59-diff.tm_sec))
time.sleep(0.98)
os.system('clear')
show_notification("Congratulations!", "The No." + str(numClocks) + " clock is done.")
print('Congratulations! The No.'+ str(numClocks) + ' clock is done.')
WriteJson()
time.sleep(3)
print('You can have a rest for '+str(skipTimes * 5 + 5 )+' minutes!\n')
if input("Start resting, please press enter!\n\
Skip rest (which would be accumulated), please enter the other:\n") == '':
beginTime = time.localtime(time.time())
tB = time.mktime(beginTime)
tC = time.mktime(curTime)
breakTime = skipTimes * 5 * 60 + 5 * 60 # accumulate break time
show_notification("Have a Break! ", "You can rest " + str(skipTimes * 5 + 5) + " minutes.")
while tC - tB <= breakTime - 4 :
curTime = time.localtime(time.time())
tC = time.mktime(curTime)
diff = time.gmtime(tC - tB)
os.system('clear') # macOS
print('Rest time!')
print('{0} mins {1} secs remaining!'.format(skipTimes * 5 + 5 - 1 - diff.tm_min, 59 - diff.tm_sec))
time.sleep(0.98)
os.system('clear') # macOS
show_notification("Let's Study!", "Break time is over!")
print('\n\nBreak time is done! Clock will re-startup in 4 secs.')
time.sleep(4)
skipTimes = 0
os.system('clear') # macOS
else:
skipTimes += 1
else:
cur = 0
|
from .imports import *
def to_onehot(y, num_classes=None):
"""Converts label indices to one-hot encoded vectors.
Parameters
----------
y: list or numpy array
Contains the labels as integers from 0 to num_classes.
num_classes: int, optional
The total number of classes.
Returns
-------
Numpy array with one-hot encoded vectors.
"""
y = np.array(y, dtype=np.int)
if not num_classes:
num_classes = np.max(y) + 1
num_examples = y.shape[0]
onehot = np.zeros((num_examples, num_classes))
onehot[np.arange(num_examples), y] = 1.
return onehot
def from_onehot(y):
"""Converts one-hot encoded probabilities to label indices.
Parameters
----------
y: Tensor or Variable
Returns
-------
Tensor or Variable containing the label indices.
"""
_, labels_pred = torch.max(y, 1)
return labels_pred
def label2class(y, class_names):
"""Converts label indices to class names.
Parameters
----------
y: Tensor, Variable, or numpy array
class_names: list
Returns
-------
List containing the class name for each prediction.
"""
return list(map(lambda x: class_names[x], y))
def data_loader_sample_count(data_loader, max_steps=None):
"""How many examples the DataLoader object will iterate through."""
if isinstance(data_loader, torch.utils.data.DataLoader):
num_samples = len(data_loader.sampler)
elif hasattr(data_loader, "num_examples"):
num_samples = data_loader.num_examples()
else:
num_samples = len(data_loader)
if hasattr(data_loader, "batch_size"):
if getattr(data_loader, "drop_last", False):
num_samples = (num_samples // data_loader.batch_size) * data_loader.batch_size
if max_steps:
num_samples = min(num_samples, max_steps * data_loader.batch_size)
return num_samples
class SingleTensorDataset(Dataset):
"""Dataset wrapping a data tensor but no targets."""
def __init__(self, data_tensor):
self.data_tensor = data_tensor
def __getitem__(self, index):
return self.data_tensor[index]
def __len__(self):
return self.data_tensor.size(0)
|
import torch
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
from openselfsup.utils import print_log
from .registry import DATASETS
from .base import BaseDataset
from .utils import to_numpy
@DATASETS.register_module
class MultiLabelClassificationDataset(BaseDataset):
"""Dataset for classification.
"""
def __init__(self, data_source, pipeline, prefetch=False):
super(MultiLabelClassificationDataset, self).__init__(data_source, pipeline, prefetch)
def __getitem__(self, idx):
img, target = self.data_source.get_sample(idx)
target = self.hot_encode(target)
img = self.pipeline(img)
if self.prefetch:
img = torch.from_numpy(to_numpy(img))
return dict(img=img, gt_label=target)
def evaluate(self, scores, keyword, logger=None):
eval_res = {}
target = self.get_labels()
assert scores.size(0) == target.size(0), \
"Inconsistent length for results and labels, {} vs {}".format(
scores.size(0), target.size(0))
n_classes = scores.size(1)
target = target.cpu().detach().numpy()
scores = scores.cpu().detach().numpy()
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(target[:, i],
scores[:, i])
average_precision[i] = average_precision_score(target[:, i], scores[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(target.ravel(),
scores.ravel())
average_precision["mAP"] = average_precision_score(target, scores,
average="micro")*100
if logger is not None and logger != 'silent':
print_log('Average precision score, '
'micro-averaged over all classes: {0:0.2f}'.format(average_precision["mAP"]),
logger=logger)
return average_precision
def get_labels(self):
labels = self.data_source.labels
#empty tensor to store labels with hot encoding
tensor_labels = torch.zeros(len(labels),self.data_source.total_classes)
tensor_labels.long()
for idx in range(len(labels)):
tensor_labels[idx,:] = self.hot_encode(labels[idx])
return tensor_labels
def hot_encode(self,label):
label_tensor = torch.zeros(self.data_source.total_classes)
idx = [int(n) for n in label]
label_tensor[idx] =1
return label_tensor |
import sys
from typing_extensions import Final
#: This indicates that we are running on python3.8+
PY38: Final = sys.version_info >= (3, 8)
|
def generate_matlab_files(target_dataset,source_datasets,save_path,file_name):
target_dataset_name = list(target_dataset.keys())[0]
target_dataset_data = target_dataset[target_dataset_name]
source_list = list()
for source_dataset_name,source_dataset_data in source_datasets.items():
source = {
"source_domain_data":source_dataset_data[0],
"source_domain_label":source_dataset_data[1],
"source_label_name_map":source_dataset_data[3],
"dataset_name":source_dataset_name,
"subject_id": source_dataset_data[2]
}
source_list.append(source)
matlab_data = {
"source_domain": source_list,
"target_domain": {
"target_domain_data": target_dataset_data[0],
"target_domain_label": target_dataset_data[1],
"target_label_name_map": target_dataset_data[3],
"dataset_name":target_dataset_name,
"subject_id":target_dataset_data[2]
}
}
if not os.path.isdir(save_path):
os.makedirs(save_path)
data_file = '{}_transfer_learning.mat'.format(file_name)
data_file = join(save_path,data_file)
text_file = 'target_source_data_record.json'
text_file = join(save_path,text_file)
import json
dictionary = {'target_dataet': target_dataset_name, 'source_datasets': list(source_datasets.keys())}
with open(text_file, "w") as outfile:
json.dump(dictionary, outfile)
from scipy.io import savemat
savemat(data_file, matlab_data)
|
# names of hurricanes
names = ['Cuba I', 'San Felipe II Okeechobee', 'Bahamas', 'Cuba II', 'CubaBrownsville', 'Tampico', 'Labor Day', 'New England', 'Carol', 'Janet', 'Carla', 'Hattie', 'Beulah', 'Camille', 'Edith', 'Anita', 'David', 'Allen', 'Gilbert', 'Hugo', 'Andrew', 'Mitch', 'Isabel', 'Ivan', 'Emily', 'Katrina', 'Rita', 'Wilma', 'Dean', 'Felix', 'Matthew', 'Irma', 'Maria', 'Michael']
# months of hurricanes
months = ['October', 'September', 'September', 'November', 'August', 'September', 'September', 'September', 'September', 'September', 'September', 'October', 'September', 'August', 'September', 'September', 'August', 'August', 'September', 'September', 'August', 'October', 'September', 'September', 'July', 'August', 'September', 'October', 'August', 'September', 'October', 'September', 'September', 'October']
# years of hurricanes
years = [1924, 1928, 1932, 1932, 1933, 1933, 1935, 1938, 1953, 1955, 1961, 1961, 1967, 1969, 1971, 1977, 1979, 1980, 1988, 1989, 1992, 1998, 2003, 2004, 2005, 2005, 2005, 2005, 2007, 2007, 2016, 2017, 2017, 2018]
# maximum sustained winds (mph) of hurricanes
max_sustained_winds = [165, 160, 160, 175, 160, 160, 185, 160, 160, 175, 175, 160, 160, 175, 160, 175, 175, 190, 185, 160, 175, 180, 165, 165, 160, 175, 180, 185, 175, 175, 165, 180, 175, 160]
# areas affected by each hurricane
areas_affected = [['Central America', 'Mexico', 'Cuba', 'Florida', 'The Bahamas'], ['Lesser Antilles', 'The Bahamas', 'United States East Coast', 'Atlantic Canada'], ['The Bahamas', 'Northeastern United States'], ['Lesser Antilles', 'Jamaica', 'Cayman Islands', 'Cuba', 'The Bahamas', 'Bermuda'], ['The Bahamas', 'Cuba', 'Florida', 'Texas', 'Tamaulipas'], ['Jamaica', 'Yucatn Peninsula'], ['The Bahamas', 'Florida', 'Georgia', 'The Carolinas', 'Virginia'], ['Southeastern United States', 'Northeastern United States', 'Southwestern Quebec'], ['Bermuda', 'New England', 'Atlantic Canada'], ['Lesser Antilles', 'Central America'], ['Texas', 'Louisiana', 'Midwestern United States'], ['Central America'], ['The Caribbean', 'Mexico', 'Texas'], ['Cuba', 'United States Gulf Coast'], ['The Caribbean', 'Central America', 'Mexico', 'United States Gulf Coast'], ['Mexico'], ['The Caribbean', 'United States East coast'], ['The Caribbean', 'Yucatn Peninsula', 'Mexico', 'South Texas'], ['Jamaica', 'Venezuela', 'Central America', 'Hispaniola', 'Mexico'], ['The Caribbean', 'United States East Coast'], ['The Bahamas', 'Florida', 'United States Gulf Coast'], ['Central America', 'Yucatn Peninsula', 'South Florida'], ['Greater Antilles', 'Bahamas', 'Eastern United States', 'Ontario'], ['The Caribbean', 'Venezuela', 'United States Gulf Coast'], ['Windward Islands', 'Jamaica', 'Mexico', 'Texas'], ['Bahamas', 'United States Gulf Coast'], ['Cuba', 'United States Gulf Coast'], ['Greater Antilles', 'Central America', 'Florida'], ['The Caribbean', 'Central America'], ['Nicaragua', 'Honduras'], ['Antilles', 'Venezuela', 'Colombia', 'United States East Coast', 'Atlantic Canada'], ['Cape Verde', 'The Caribbean', 'British Virgin Islands', 'U.S. Virgin Islands', 'Cuba', 'Florida'], ['Lesser Antilles', 'Virgin Islands', 'Puerto Rico', 'Dominican Republic', 'Turks and Caicos Islands'], ['Central America', 'United States Gulf Coast (especially Florida Panhandle)']]
# damages (USD($)) of hurricanes
damages = ['Damages not recorded', '100M', 'Damages not recorded', '40M', '27.9M', '5M', 'Damages not recorded', '306M', '2M', '65.8M', '326M', '60.3M', '208M', '1.42B', '25.4M', 'Damages not recorded', '1.54B', '1.24B', '7.1B', '10B', '26.5B', '6.2B', '5.37B', '23.3B', '1.01B', '125B', '12B', '29.4B', '1.76B', '720M', '15.1B', '64.8B', '91.6B', '25.1B']
# deaths for each hurricane
deaths = [90,4000,16,3103,179,184,408,682,5,1023,43,319,688,259,37,11,2068,269,318,107,65,19325,51,124,17,1836,125,87,45,133,603,138,3057,74]
# 1
# Update Recorded Damages
conversion = {"M": 1000000,
"B": 1000000000}
def convert_values(item):
if item == "Damages not recorded":
return "Damages not recorded"
elif item[-1] == 'M':
return int(float(item[:-1]) * 1000000)
elif item[-1] == 'B':
return int(float(item[:-1]) * 1000000000)
# test function by updating damages
damages = list(map(convert_values, damages))
print(damages)
print("*****************************************************")
# 2
# Create a Table
hurricane_dictionary = {}
for index in range(len(names)):
hurricane_dictionary[names[index]] = {'Name': names[index], 'Month':months[index], 'Year': years[index], 'Max Sustained Wind': max_sustained_winds[index], 'Areas Affected': areas_affected[index], 'Damage': damages[index], 'Deaths': deaths[index]}
print(hurricane_dictionary)
print("*****************************************************")
# Create and view the hurricanes dictionary
new_dictionary = {}
def organize_by_year(hurricanes):
hurricanes_by_year= dict()
for cane in hurricanes:
current_year = hurricanes[cane]['Year']
current_cane = hurricanes[cane]
if current_year not in hurricanes_by_year:
hurricanes_by_year[current_year] = [current_cane]
else:
hurricanes_by_year[current_year].append(current_cane)
return hurricanes_by_year
hurricanes_by_year = organize_by_year(hurricane_dictionary)
print(organize_by_year(hurricane_dictionary))
# 3
# Organizing by Year
# create a new dictionary of hurricanes with year and key
print("*****************************************************")
# 4
# Counting Damaged Areas
def organize_areas_by_count(hurricanes):
hurricanes_by_area = dict()
for cane in hurricanes:
current_areas = hurricanes[cane]['Areas Affected']
for area in current_areas:
if area not in hurricanes_by_area:
hurricanes_by_area[area] = 1
else:
hurricanes_by_area[area] += 1
return hurricanes_by_area
print(organize_areas_by_count(hurricane_dictionary))
# create dictionary of areas to store the number of hurricanes involved in
# 5
# Calculating Maximum Hurricane Count
def find_most_affected(hurricanes):
return list(organize_areas_by_count(hurricanes).items())[0]
# find most frequently affected area and the number of hurricanes involved in
print(find_most_affected(hurricane_dictionary))
print("*****************************************************")
# 6
# Calculating the Deadliest Hurricane
print(hurricane_dictionary)
def find_most_deaths(hurricanes):
highest_death = {'Deaths': 0}
for cane in hurricanes:
if hurricanes[cane].get('Deaths', 0) > int(list(highest_death.values())[0]):
highest_death = {hurricanes[cane]['Name']: hurricanes[cane]['Deaths']}
return highest_death
# find highest mortality hurricane and the number of deaths
print("The deadliest hurricane and the number of deaths: " + str(find_most_deaths(hurricane_dictionary)))
# 7
# Rating Hurricanes by Mortality
print("*****************************************************")
def rate_by_mortality(hurricanes):
new_dictionary = {0:[], 1:[], 2:[], 3:[], 4:[]}
for cane in hurricanes:
if hurricanes[cane]['Deaths'] == 0:
new_dictionary[0].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 100:
new_dictionary[1].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 500:
new_dictionary[2].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 1000:
new_dictionary[3].append(hurricanes[cane])
elif hurricanes[cane]['Deaths'] <= 10000:
new_dictionary[4].append(hurricanes[cane])
return new_dictionary
print(rate_by_mortality(hurricane_dictionary))
# categorize hurricanes in new dictionary with mortality severity as key
print("*****************************************************")
# 8 Calculating Hurricane Maximum Damage
def find_most_damage(hurricanes):
highest_death = {'Damage': 0}
for cane in hurricanes:
if hurricanes[cane]['Damage'] == "Damages not recorded":
continue
elif hurricanes[cane]['Damage'] > int(list(highest_death.values())[0]):
highest_death = {hurricanes[cane]['Name']: hurricanes[cane]['Damage']}
return highest_death
# find highest damage inducing hurricane and its total cost
print("The most damaging hurricane and its damages: " + str(find_most_damage(hurricane_dictionary)))
# 9
# Rating Hurricanes by Damage
damage_scale = {0: 0,
1: 100000000,
2: 1000000000,
3: 10000000000,
4: 50000000000}
print("*****************************************************")
def rate_by_damages(hurricanes):
new_dictionary = {0:[], 1:[], 2:[], 3:[], 4:[]}
for cane in hurricanes:
if hurricanes[cane]['Damage'] == 'Damages not recorded':
new_dictionary[0].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] == 0:
new_dictionary[0].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 100000000:
new_dictionary[1].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 1000000000:
new_dictionary[2].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 10000000000:
new_dictionary[3].append(hurricanes[cane])
elif hurricanes[cane]['Damage'] <= 50000000000:
new_dictionary[4].append(hurricanes[cane])
return new_dictionary
print(rate_by_damages(hurricane_dictionary))
# categorize hurricanes in new dictionary with damage severity as key
|
#!/usr/bin/env python3
"""
Day 7: Treachery of Whales
https://adventofcode.com/2021/day/7
"""
import math
from functools import partial
from typing import Callable, Union
Data = list[int]
Number = Union[int, float]
gr = (math.sqrt(5) + 1) / 2 # The golden ratio
def parse_file(path: str) -> Data:
with open(path) as f:
raw = f.read().splitlines()
data = sorted(list(map(int, raw[0].split(","))))
return data
def gss(f: Callable, a: Number, b: Number, lim: Number) -> Number:
"""
Golden-section search.
Efficently find an extremum from a given function
inside of a specified interval, [a, b].
https://en.wikipedia.org/wiki/Golden-section_search
"""
c = b - (b - a) / gr
d = a + (b - a) / gr
while abs(b - a) > lim:
if f(c) < f(d):
b = d
else:
a = c
c = b - (b - a) / gr
d = a + (b - a) / gr
return (b + a) / 2
def linear_cost(point: Number, offset: Number) -> Number:
return abs(point - offset)
def exponential_cost(point: Number, offset: Number) -> Number:
steps = abs(point - offset)
return (steps ** 2 + steps) / 2
def calculate_cost(func: Callable, data: Data, pos: Number) -> int:
"""
Calculate the value of a given position by a defined
function for cost.
"""
return round(sum(func(point, pos) for point in data))
def solve(func, data: Data) -> int:
# Golden ratio search requires a single argument function.
# So im doing partial application of the function i want the
# search to happen on.
crab_cost_func = partial(calculate_cost, func, data)
# We can set the limit to 1, as we are looking for the closest
# integer instead of the true minimum.
pos = gss(crab_cost_func, a=min(data), b=max(data), lim=1)
# Rounding the position to the nearest integer per the tasks
# definition of our minimum.
return calculate_cost(func, data, round(pos))
def part_1(data: Data):
"""
Determine the horizontal position that the crabs can align to using
the least fuel possible so they can make you an escape route! How
much fuel must they spend to align to that position?
"""
return solve(linear_cost, data)
def part_2(data: Data):
"""
Determine the horizontal position that the crabs can align to using
the least fuel possible. How much fuel must they spend to align to
that position?
"""
return solve(exponential_cost, data)
def main():
data = parse_file("inputs/example_07.txt")
print("Part 1", part_1(data))
print("Part 2", part_2(data))
if __name__ == "__main__":
main()
|
from datetime import datetime
class CustomMiddleware(object):
def __init__(self, get_response):
# One time configuration and initialisation of the object (with get_response)
# get_response is the next middleware inline to be executed
self.get_response = get_response
def __call__(self, request):
# Executed for each request before the view and later middleware are called
# print("Middleware request: ", request)
# Can now access current_time in templates
request.current_time = datetime.now()
response = self.get_response(request)
return response
|
from __future__ import division
#this module tests that sympy works with true division turned on
from sympy import Rational, Symbol, Float
def test_truediv():
assert 1/2 != 0
assert Rational(1)/2 != 0
def dotest(s):
x = Symbol("x")
y = Symbol("y")
l = [
Rational(2),
Float("1.3"),
x,
y,
pow(x,y)*y,
5,
5.5
]
for x in l:
for y in l:
s(x,y)
return True
def test_basic():
def s(a,b):
x = a
x = +a
x = -a
x = a+b
x = a-b
x = a*b
x = a/b
x = a**b
assert dotest(s)
def test_ibasic():
def s(a,b):
x = a
x += b
x = a
x -= b
x = a
x *= b
x = a
x /= b
assert dotest(s)
|
import torch
from wavenet.model import build_wavenet
from wavenet.train import Trainer
from wavenet.utils import (
set_random_seed,
load_data,
split_data,
LJSpeechDataset
)
from config import set_params
def main():
# set params and random seed
params = set_params()
set_random_seed(params.random_seed)
params.device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
if params.verbose:
print('Using device', params.device)
# load and split data
data = load_data(params.metadata_file)
train_data, valid_data = split_data(data, params.valid_ratio)
if params.verbose:
print('Data loaded and split')
# prepare dataloaders
train_dataset = LJSpeechDataset(labels=train_data, params=params)
valid_dataset = LJSpeechDataset(labels=valid_data, params=params)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params.batch_size,
num_workers=params.num_workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=params.batch_size,
num_workers=params.num_workers, pin_memory=True)
if params.verbose:
print('Data loaders prepared')
model = build_wavenet(params)
trainer = Trainer(model, params)
if params.load_model:
trainer.load_checkpoint(params.model_checkpoint)
trainer.train(train_loader, valid_loader)
if __name__ == '__main__':
main()
|
from nose.tools import assert_equals, assert_not_equals, assert_false, assert_true
import lean
import os
from lang.expr import *
from lang.env import *
MY_PATH_TO_LEAN_STDLIB = os.environ['MY_PATH_TO_LEAN_STDLIB']
lean.initialize()
env = lean.import_modules([MY_PATH_TO_LEAN_STDLIB], [lean.name("init")], 100000)
assert_equals(env.get(lean.name("nat")).get_type(), lean.mk_Type())
assert_equals(str(env.get(lean.name(lean.name("tactic"), "intro1")).get_type()), "tactic.{0} (expr bool.tt)")
decls = []
add_to_list = lambda d: decls.append(d)
env.for_each_declaration(add_to_list)
# build theorem context
env_view = EnvView(env)
# TODO(danehuang): This is expensive -- is it necessary as a test?
# decls = env_view.get_decls()
# d_thm = env_view.thm_dict_of_decls(decls)
# for n, v in d_thm.items():
# print(str(n))
# mentioned_thms = gather_theorem(d_thm, v)
# for thm in mentioned_thms:
# print(" " + str(thm))
# #print(unicode(n) + " -> " + to_expr_view(v).to_sexpr())
|
#!/usr/bin/python3
import sys
import time
import ctypes
fullscreen = True
sys.path.append("./shared")
from sbmloader import SBMObject # location of sbm file format loader
from ktxloader import KTXObject # location of ktx file format loader
from textoverlay import OVERLAY_
from shader import shader_load, link_from_shaders
from sbmath import m3dDegToRad, m3dRadToDeg, m3dTranslateMatrix44, m3dRotationMatrix44, \
m3dMultiply, m3dOrtho, m3dPerspective, rotation_matrix, translate, m3dScaleMatrix44, \
scale, m3dLookAt, normalize
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.raw.GL.ARB.vertex_array_object import glGenVertexArrays, glBindVertexArray
except:
print ('''
ERROR: PyOpenGL not installed properly.
''')
sys.exit()
import numpy as np
from math import cos, sin
import glm
identityMatrix = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
myobject = SBMObject()
ktxobject = KTXObject()
overlay = OVERLAY_()
texture = GLuint(0)
program = GLuint(0)
vao = GLuint(0)
exposure=1.0
vs_source = '''
#version 420 core
void main(void)
{
const vec4 vertices[] = vec4[](vec4(-1.0, -1.0, 0.5, 1.0),
vec4( 1.0, -1.0, 0.5, 1.0),
vec4(-1.0, 1.0, 0.5, 1.0),
vec4( 1.0, 1.0, 0.5, 1.0));
gl_Position = vertices[gl_VertexID];
}
'''
fs_source = '''
#version 430 core
uniform sampler2D s;
uniform float exposure;
out vec4 color;
void main(void)
{
vec4 c = texture(s, gl_FragCoord.xy / vec2(512.0, 512.0));
c.xyz = vec3(1.0) - exp(-c.xyz * exposure);
color = c;
}
'''
def checkGLError():
status = glGetError()
if status != GL_NO_ERROR:
raise RuntimeError('gl error %s' % (status,))
class Scene:
def __init__(self, width, height):
global overlay
global texture
global program
global vao
self.width = width
self.height = height
overlay.init(80, 50)
#// Generate a name for the texture
glGenTextures(1, texture)
#// Load texture from file
texture = ktxobject.ktx_load("treelights_2k.ktx")
#// Now bind it to the context using the GL_TEXTURE_2D binding point
glBindTexture(GL_TEXTURE_2D, texture)
program = glCreateProgram()
fs = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fs, fs_source)
glCompileShader(fs)
if not glGetShaderiv(fs, GL_COMPILE_STATUS):
print( 'compile error:' )
print( glGetShaderInfoLog(fs) )
vs = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vs, vs_source)
glCompileShader(vs)
if not glGetShaderiv(vs, GL_COMPILE_STATUS):
print( 'compile error:' )
print( glGetShaderInfoLog(vs) )
glAttachShader(program, vs)
glAttachShader(program, fs)
glLinkProgram(program)
if not glGetProgramiv(program, GL_LINK_STATUS):
print( 'link error:' )
print( glGetProgramInfoLog(program) )
glGenVertexArrays(1, vao)
glBindVertexArray(vao)
def display(self):
global texture
global program
currentTime = time.time()
green = [ 0.0, 0.25, 0.0, 1.0 ]
glClearBufferfv(GL_COLOR, 0, green)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, texture)
glUseProgram(program)
glViewport(0, 0, self.width, self.height)
glUniform1f(0, exposure)
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4)
overlay.clear()
buffer = ("Exposure = %2.2f (Numpad +/- to change)" % exposure)
overlay.drawText(buffer, 0, 0)
overlay.draw()
checkGLError()
glutSwapBuffers()
def reshape(self, width, height):
self.width = width
self.height = height
def keyboard(self, key, x, y ):
global fullscreen
global exposure
print ('key:' , key)
if key == b'\x1b': # ESC
sys.exit()
elif key == b'f' or key == b'F': #fullscreen toggle
if (fullscreen == True):
glutReshapeWindow(512, 512)
glutPositionWindow(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
else:
glutFullScreen()
fullscreen = True
elif key == b'+':
exposure *= 1.1
elif key == b'-':
exposure /= 1.1
def init(self):
pass
def timer(self, blah):
glutPostRedisplay()
glutTimerFunc( int(1/60), self.timer, 0)
time.sleep(1/60.0)
if __name__ == '__main__':
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(512, 512)
w1 = glutCreateWindow('OpenGL SuperBible - HDR Exposure')
glutInitWindowPosition(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
#glutFullScreen()
scene = Scene(512,512)
glutReshapeFunc(scene.reshape)
glutDisplayFunc(scene.display)
glutKeyboardFunc(scene.keyboard)
glutIdleFunc(scene.display)
#glutTimerFunc( int(1/60), scene.timer, 0)
scene.init()
glutMainLoop()
|
import re
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.utils import timezone
import datetime
from authentication.models import Profile
from rest_framework import serializers
from rest_framework.fields import IntegerField
from api.validators import VEHICLE_NUMBER
from fms.models import QuoteVehicles, Requirement, RequirementVehicleQuote, RequirementQuote, REQ_STATUS
from fms.views_requirement import RDONLY_REQ_STATUS
from restapi.serializers.sme import BasicSmeSerializer
from sme.models import Sme
from api.utils import get_or_none, int_or_none
from supplier.models import Supplier
from utils.models import City, AahoOffice, VehicleCategory
class QuoteVehiclesSerializer(serializers.Serializer):
id = IntegerField(read_only=True)
vehicle_no = serializers.CharField(max_length=15, required=True, allow_blank=True, trim_whitespace=True)
def create(self, validated_data):
# create quote vehicles
quote_vehicle, created = QuoteVehicles.objects.get_or_create(vehicle_no=validated_data['vehicle_no'])
return quote_vehicle
def update(self, instance, validated_data):
instance.vehicle_no = validated_data.get('vehicle_no', instance.vehicle_no)
instance.updated_on = timezone.now()
instance.save()
return instance
@staticmethod
def check_vehicle_no_pattern(validated_data):
response = {'status': 'success', 'msg': 'Pattern Match'}
vehicle_no = validated_data['vehicle_no']
vehicle_number_pattern = re.compile(VEHICLE_NUMBER[1:-2], re.IGNORECASE)
if isinstance(vehicle_no, str):
if vehicle_number_pattern.match(vehicle_no):
return response
else:
response = {'status': 'failure', 'msg': 'Pattern Does Not Match'}
return response
else:
response = {'status': 'failure', 'msg': 'Vehicle No Is Not String'}
return response
class RequirementVehicleQuoteSerializer(serializers.Serializer):
quote_vehicle_id = IntegerField(required=True)
requirement_id = IntegerField(required=True)
def create(self, validated_data):
try:
requirement = Requirement.objects.get(id=validated_data['requirement_id'])
except Requirement.DoesNotExist:
raise serializers.ValidationError("Requirement Does Not Exist")
try:
quote_veh = QuoteVehicles.objects.get(id=validated_data['quote_vehicle_id'])
except QuoteVehicles.DoesNotExist:
raise serializers.ValidationError("QuoteVehicles Does Not Exist")
requirement_vehicle_quote, created = RequirementVehicleQuote.objects.get_or_create(
requirement=requirement, quote_vehicle=quote_veh)
return requirement_vehicle_quote
def update(self, instance, validated_data):
try:
requirement = Requirement.objects.get(id=validated_data['requirement_id'])
except Requirement.DoesNotExist:
raise serializers.ValidationError("Requirement Does Not Exist")
try:
quote_veh = QuoteVehicles.objects.get(id=validated_data['quote_vehicle_id'])
except QuoteVehicles.DoesNotExist:
raise serializers.ValidationError("QuoteVehicles Does Not Exist")
instance.requirement = requirement
instance.quote_vehicle = quote_veh
instance.updated_on = timezone.now()
instance.save()
return instance
class RequirementQuoteSerializer(serializers.Serializer):
id = IntegerField(read_only=True)
supplier_id = IntegerField(required=True)
supplier = serializers.SerializerMethodField()
requirement_vehicle_quote_id = IntegerField(required=True)
requirement_vehicle_quote = serializers.SerializerMethodField()
rate = serializers.IntegerField(max_value=99999, required=True)
status = serializers.CharField(max_length=15, allow_null=True, required=False)
def create(self, validated_data):
try:
supplier = Supplier.objects.get(id=validated_data['supplier_id'])
except Supplier.DoesNotExist:
raise serializers.ValidationError("Supplier Does Not Exist")
try:
requirement_vehicle_quote = RequirementVehicleQuote(id=validated_data['requirement_vehicle_quote_id'])
except RequirementVehicleQuote.DoesNotExist:
raise serializers.ValidationError("RequirementVehicleQuote Does Not Exist")
requirement_quote = RequirementQuote.objects.create(supplier=supplier,
requirement_vehicle_quote=requirement_vehicle_quote,
rate=validated_data['rate'])
return requirement_quote
def update(self, instance, validated_data):
try:
supplier = Supplier.objects.get(id=validated_data['supplier_id'])
except Supplier.DoesNotExist:
raise serializers.ValidationError("Supplier Does Not Exist")
try:
requirement_vehicle_quote = RequirementVehicleQuote(id=validated_data['requirement_vehicle_quote_id'])
except RequirementVehicleQuote.DoesNotExist:
raise serializers.ValidationError("RequirementVehicleQuote Does Not Exist")
instance.rate = validated_data.get('rate', instance.rate)
instance.supplier = supplier
instance.requirement_vehicle_quote = requirement_vehicle_quote
instance.updated_on = timezone.now()
instance.save()
return instance
def get_requirement_vehicle_quote(self, obj):
rvq = get_or_none(RequirementVehicleQuote, id=int_or_none(obj.requirement_vehicle_quote_id))
if not rvq:
raise serializers.ValidationError("RequirementVehicleQuote Does Not Exist")
return {'requirement_id': rvq.requirement.id, 'quote_vehicle_id': rvq.quote_vehicle.id,
'tonnage': rvq.requirement.tonnage, 'no_of_vehicles': rvq.requirement.no_of_vehicles}
def get_supplier(self, obj):
supplier = get_or_none(Supplier, id=int_or_none(obj.supplier_id))
if not supplier:
raise serializers.ValidationError("Supplier Does Not Exist")
profile = Profile.objects.get(user=supplier.user)
return {'supplier_id': supplier.id, 'supplier_name': profile.name, 'supplier_phone': profile.phone}
class RequirementSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
client_id = serializers.IntegerField(required=True)
from_shipment_date = serializers.CharField(max_length=10, required=True, trim_whitespace=True)
to_shipment_date = serializers.CharField(max_length=10, allow_null=True, trim_whitespace=True, required=False)
from_city_id = serializers.IntegerField(required=True)
from_city = serializers.SerializerMethodField()
to_city_id = serializers.IntegerField(required=True)
to_city = serializers.SerializerMethodField()
aaho_office_id = serializers.IntegerField(required=True)
aaho_office = serializers.SerializerMethodField()
tonnage = serializers.DecimalField(allow_null=True, required=False, max_digits=6, decimal_places=2)
no_of_vehicles = serializers.IntegerField(max_value=9999, allow_null=True, required=False)
rate = serializers.IntegerField(max_value=99999, allow_null=True, required=False)
material = serializers.CharField(max_length=35, allow_null=True)
type_of_vehicle_id = serializers.IntegerField(max_value=99999, allow_null=True, required=False)
type_of_vehicle = serializers.SerializerMethodField()
req_status = serializers.CharField(max_length=35, trim_whitespace=True)
remark = serializers.CharField(max_length=25, trim_whitespace=False, required=False)
cancel_reason = serializers.CharField(max_length=75, trim_whitespace=True, required=False)
client = serializers.SerializerMethodField()
from_state = serializers.SerializerMethodField()
to_state = serializers.SerializerMethodField()
read_only = serializers.SerializerMethodField()
quotes = serializers.SerializerMethodField()
client_data = serializers.SerializerMethodField()
def get_quotes(self, instance):
quotes = RequirementQuote.objects.filter(requirement_vehicle_quote__requirement__id=instance.id)
return quotes.count()
def get_client_data(self, instance):
if isinstance(instance, Requirement) and isinstance(instance.client, Sme):
return BasicSmeSerializer(instance.client).data
return {}
def create(self, validated_data):
objects = self.parse_requirement_fields(validated_data, True)
if objects['status'] == 'failure':
raise serializers.ValidationError({'status': 'failure', 'msg': objects['msg']})
objects.pop('status', None)
try:
req = Requirement.objects.create(**objects)
except IntegrityError:
raise serializers.ValidationError({'status': 'failure', 'msg': 'Inquiry could not be created'})
return req
def update(self, instance, validated_data):
objects = self.parse_requirement_fields(validated_data, False)
if objects['status'] == 'failure':
raise serializers.ValidationError({'status': 'failure', 'msg': objects['msg']})
instance.client = objects['client']
instance.aaho_office = objects['aaho_office']
instance.from_shipment_date = objects['from_shipment_date']
instance.to_shipment_date = objects['to_shipment_date']
instance.from_city = objects['from_city']
instance.to_city = objects['to_city']
instance.tonnage = objects['tonnage']
instance.no_of_vehicles = objects['no_of_vehicles']
instance.material = objects['material']
instance.type_of_vehicle = objects['type_of_vehicle']
instance.rate = objects['rate']
instance.remark = objects['remark']
instance.cancel_reason = objects['cancel_reason']
instance.changed_by = User.objects.get(username=validated_data['user'].username)
instance.updated_on = timezone.now()
if instance.req_status and validated_data['req_status'] == 'open':
instance.req_status = objects['req_status']
instance.save(update_fields=['req_status', 'material', 'client', 'aaho_office', 'from_shipment_date',
'to_shipment_date', 'from_city', 'to_city', 'tonnage', 'no_of_vehicles',
'material', 'type_of_vehicle', 'rate', 'remark', 'cancel_reason', 'changed_by',
'updated_on'])
else:
instance.req_status = objects['req_status']
instance.save()
return instance
def get_client(self, obj):
client = get_or_none(Sme, id=int_or_none(obj.client_id))
if not client:
raise serializers.ValidationError("Client Does Not Exist")
return client.get_name()
def get_from_city(self, obj):
city = get_or_none(City, id=int_or_none(obj.from_city_id))
if not city:
raise serializers.ValidationError("From City Does Not Exist")
return city.name
def get_to_city(self, obj):
city = get_or_none(City, id=int_or_none(obj.to_city_id))
if not city:
raise serializers.ValidationError("To City Does Not Exist")
return city.name
def get_aaho_office(self, obj):
aaho_office = get_or_none(AahoOffice, id=int_or_none(obj.aaho_office_id))
if not aaho_office:
raise serializers.ValidationError("AahoOffice Does Not Exist")
return aaho_office.branch_name
def get_type_of_vehicle(self, obj):
type_of_vehicle = get_or_none(VehicleCategory, id=int_or_none(obj.type_of_vehicle_id))
if not type_of_vehicle:
vehicle_name = ''
else:
vehicle_name = type_of_vehicle.name()
return vehicle_name
def get_from_state(self, obj):
city = get_or_none(City, id=int_or_none(obj.from_city_id))
if not city:
raise serializers.ValidationError("From City Does Not Exist")
else:
return city.state.name if city and city.state else ''
def get_to_state(self, obj):
city = get_or_none(City, id=int_or_none(obj.to_city_id))
if not city:
raise serializers.ValidationError("To City Does Not Exist")
else:
return city.state.name if city and city.state else ''
def get_read_only(self, obj):
if obj.req_status in [x[0] for x in RDONLY_REQ_STATUS]:
rdonly = True
else:
rdonly = False
return rdonly
def parse_requirement_fields(self, load, created):
user = load['user']
data = load
from_shipment_date = data.get('from_shipment_date', None)
to_shipment_date = data.get('to_shipment_date', None)
try:
present = datetime.datetime.now()
from_dt = datetime.datetime.strptime(from_shipment_date, '%Y-%m-%d')
if to_shipment_date:
to_date = datetime.datetime.strptime(to_shipment_date, '%Y-%m-%d')
if to_date < from_dt:
return {'status': 'failure', 'msg': 'To Date should be greater than From Date'}
if to_date.date() < present.date() and created:
return {'status': 'failure', 'msg': 'To Date should be greater than or equal to Today'}
if from_dt.date() < present.date() and created:
return {'status': 'failure', 'msg': 'From Date should be greater than or equal to Today'}
elif from_dt.date() < present.date() and created:
return {'status': 'failure', 'msg': 'From Date should be greater than or equal to Today'}
except ValueError:
return {'status': 'failure', 'msg': 'Incorrect date format, should be YYYY-MM-DD'}
tonnage = data.get('tonnage', None)
no_of_vehicles = data.get('no_of_vehicles', None)
material = data.get('material', None)
rate = data.get('rate', None)
if not tonnage and not no_of_vehicles:
return {'status': 'failure', 'msg': 'Enter Either Tonnage or No of Vehicles'}
if data.get('from_city_id', None) == data.get('to_city_id', None):
return {'status': 'failure', 'msg': 'From City and To City should be different'}
client = get_or_none(Sme, id=int_or_none(data.get('client_id')))
if not isinstance(client, Sme):
return {'status': 'failure', 'msg': 'Incorrect client '}
from_city = get_or_none(City, id=data.get('from_city_id', None))
if not isinstance(from_city, City):
return {'status': 'failure', 'msg': 'Incorrect from city '}
to_city = get_or_none(City, id=data.get('to_city_id', None))
if not isinstance(to_city, City):
return {'status': 'failure', 'msg': 'Incorrect to city '}
type_of_vehicle = get_or_none(VehicleCategory, id=int_or_none(data.get('type_of_vehicle_id')))
aaho_office = get_or_none(AahoOffice, id=data.get('aaho_office_id', None))
if not isinstance(aaho_office, AahoOffice):
return {'status': 'failure', 'msg': 'Incorrect Aaho Office'}
created_by = User.objects.get(username=user.username)
if tonnage and not no_of_vehicles:
no_of_vehicles = None
if no_of_vehicles and not tonnage:
tonnage = None
if not rate:
rate = None
if not type_of_vehicle:
type_of_vehicle = None
else:
if not isinstance(type_of_vehicle, VehicleCategory):
return {'status': 'failure', 'msg': 'Incorrect vehicle type'}
req_status = data.get('req_status', None)
if req_status not in [x[0] for x in REQ_STATUS]:
return {'status': 'failure', 'msg': 'Inquiry status is wrong'}
if req_status == 'lapsed':
return {'status': 'failure', 'msg': 'Inquiry status should be open/unverified'}
remark = data.get('remark', None)
if not remark:
remark = None
cancel_reason = data.get('cancel_reason', None)
if not cancel_reason:
cancel_reason = None
objects = {'status': 'success', 'client': client, 'from_shipment_date': from_shipment_date,
'to_shipment_date': to_shipment_date, 'from_city': from_city, 'to_city': to_city,
'aaho_office': aaho_office, 'tonnage': tonnage,
'no_of_vehicles': no_of_vehicles, 'material': material, 'type_of_vehicle': type_of_vehicle,
'rate': rate, 'created_by': created_by, 'req_status': req_status, 'remark': remark,
'cancel_reason': cancel_reason}
return objects
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Set
from flask import g
from flask_babel import lazy_gettext as _
from sqlalchemy import or_
from sqlalchemy.orm import Query
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.sqltypes import JSON
from superset import app, security_manager
from superset.models.core import Database
from superset.views.base import BaseFilter
def can_access_databases(
view_menu_name: str,
) -> Set[str]:
return {
security_manager.unpack_database_and_schema(vm).database
for vm in security_manager.user_view_menu_names(view_menu_name)
}
class DatabaseFilter(BaseFilter): # pylint: disable=too-few-public-methods
# TODO(bogdan): consider caching.
def apply(self, query: Query, value: Any) -> Query:
if security_manager.can_access_all_databases():
return query
database_perms = security_manager.user_view_menu_names("database_access")
schema_access_databases = can_access_databases("schema_access")
datasource_access_databases = can_access_databases("datasource_access")
return query.filter(
or_(
self.model.perm.in_(database_perms),
self.model.database_name.in_(
[*schema_access_databases, *datasource_access_databases]
),
)
)
class DatabaseUploadEnabledFilter(BaseFilter): # pylint: disable=too-few-public-methods
"""
Custom filter for the GET list that filters all databases based on allow_file_upload
"""
name = _("Upload Enabled")
arg_name = "upload_is_enabled"
def apply(self, query: Query, value: Any) -> Query:
filtered_query = query.filter(Database.allow_file_upload)
datasource_access_databases = can_access_databases("datasource_access")
if hasattr(g, "user"):
allowed_schemas = [
app.config["ALLOWED_USER_CSV_SCHEMA_FUNC"](db, g.user)
for db in datasource_access_databases
]
if len(allowed_schemas):
return filtered_query
return filtered_query.filter(
or_(
cast(Database.extra, JSON)["schemas_allowed_for_file_upload"]
is not None,
cast(Database.extra, JSON)["schemas_allowed_for_file_upload"] != [],
)
)
|
# -*- coding: utf-8 -*-"""Documentation about the Fish project module."""
import logging
logger = logging.getLogger(__name__)
# FIXME: put actual code here
def example():
logger.info('Providing information about the excecution of the function.')
print('Hello Fishy Things updated again 2')
if __name__ == "__main__":
example() |
from gilded_rose import Item, GildedRose
class TestGildedRose(object):
def test_foo(self):
# Given
items = [Item("foo", 0, 0)]
gilded_rose = GildedRose(items)
# When
gilded_rose.update_quality()
# Then
assert items[0].name == "fixme"
|
"""
proxyUtil: Proxy auto-config for Python
===================================
Copyright 2018 Carson Lam
Copyright 2020-2021 Hiroki Fujii
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from proxyUtil.api import get_pac, collect_pac_urls, download_pac, PACSession, pac_context_for_url, virtualProxyEnviron
__version__ = '0.2.4'
__all__ = [
'get_pac', 'collect_pac_urls', 'download_pac', 'PACSession', 'pac_context_for_url', 'virtualProxyEnviron'
]
|
from .generator import Generator
class EconomicDispatch:
def __init__(self):
self.generators = dict()
self.load = float # extend it to pd.Series to consider multiple steps
def add_generator(self, generator: Generator):
self.generators[generator.id] = generator
def add_load(self, load: float):
self.load = load
|
import requests
url ="http://www.omdbapi.com/?t=hackers&apikey=6b48d4b6"
#url ="http://www.omdbapi.com/?s=omen&apikey=6b48d4b6"
r = requests.get(url)
json_data =r.json()
for key, value in json_data.items():
print(key + ":", value)
|
import json
import re
import requests
from django.conf import settings
from cathie.exceptions import CatsAnswerCodeException
from cathie import authorization
def cats_check_status():
pass
@authorization.check_authorization_for_cats
def cats_submit_solution(source_text: str, problem_id: int, de_id: int, source=None):
# ToDo обработать повторную отправку решения
url = f'{settings.CATS_URL}main.pl?f=api_submit_problem;json=1;'
url += f'sid={authorization.cats_sid()}'
data = {
'de_id': de_id,
'source_text': source_text,
'problem_id': problem_id
}
r = requests.post(url, data=data)
if r.status_code != 200:
raise CatsAnswerCodeException(r)
r_content = json.loads(r.content.decode('utf-8'))
req_ids = None
if r_content.get('href_run_details'):
req_ids = re.search(r'(?<=rid=)\d+', r_content['href_run_details']).group()
if req_ids.isdigit():
req_ids = int(req_ids)
return req_ids, r_content
def cats_submit_problem():
pass
@authorization.check_authorization_for_cats
def cats_check_solution_status(req_ids: int):
url = f'{settings.CATS_URL}main.pl?f=api_get_request_state;req_ids={req_ids};json=1;'
url += f'sid={authorization.cats_sid()}'
r = requests.get(url)
if r.status_code != 200:
raise CatsAnswerCodeException(r)
data = r.json()
if data:
return data[0]['verdict'], data
@authorization.check_authorization_for_cats
def cats_get_problems_from_contest(contest_id):
url = f'{settings.CATS_URL}?f=problems;json=1;cid={contest_id};'
url += f'sid={authorization.cats_sid()}'
answer = requests.get(url)
if answer.status_code != 200:
raise CatsAnswerCodeException(answer)
data = json.loads(answer.content.decode('utf-8'))
# course_problems = CatsProblemSerializer(data=data.problems, many=True)
return data['problems']
def cats_get_problem_description_by_url(description_url):
url = f'{settings.CATS_URL}{description_url.lstrip("./")}'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4356.6 Safari/537.36'
}
request = requests.request(method='get', url=url, headers=headers)
if request.status_code != 200:
raise CatsAnswerCodeException(request)
data = request.content.decode('utf-8')
return data
# def cats_get_problem_by_id(cats_id, user):
# pass
|
import logging
from datawinners.search import update_submission_search_index, form_model_change_handler, entity_search_update, entity_form_model_change_handler
from mangrove.datastore.documents import SurveyResponseDocument, FormModelDocument, EntityFormModelDocument
from mangrove.datastore.entity import Entity
from mangrove.errors.MangroveException import FormModelDoesNotExistsException
def populate_submission_index(dbm, form_model_id=None):
start_key = [form_model_id] if form_model_id else []
end_key = [form_model_id, {}] if form_model_id else [{}, {}]
rows = dbm.database.iterview("surveyresponse/surveyresponse", 1000, reduce=False, include_docs=False, startkey=start_key, endkey=end_key)
logger = logging.getLogger(dbm.database_name)
ignored = 0
for row in rows:
try:
survey_response = SurveyResponseDocument._wrap_row(row)
update_submission_search_index(survey_response, dbm, refresh_index=False)
except FormModelDoesNotExistsException as e:
ignored += 1
logger.warning(e.message) # ignore orphaned submissions On changing form code!
if ignored > 0:
logger.warning("Few submissions are ignored %s" % ignored)
def populate_entity_index(dbm):
rows = dbm.database.iterview('by_short_codes/by_short_codes', 100, reduce=False, include_docs=True)
for row in rows:
entity = Entity.__document_class__.wrap(row.get('doc'))
entity_search_update(entity, dbm)
def create_all_indices(dbm):
populate_entity_index(dbm)
populate_submission_index(dbm)
def create_all_mappings(dbm):
for row in dbm.load_all_rows_in_view('questionnaire'):
if row['value']['is_registration_model']:
entity_form_model_change_handler(EntityFormModelDocument.wrap(row["value"]), dbm)
else:
form_model_change_handler(FormModelDocument.wrap(row["value"]), dbm)
|
import logging
class BaseACL(object):
def __init__(self, **kwargs):
super(BaseACL, self).__init__()
# Role object if this ACL is associated to a role
self.role = kwargs.get("role")
# User object if this ACL is associated to an user
self.user = kwargs.get("user")
# Parent ACL object (e.g. parent of an user ACL is a role ACL)
self.parent = kwargs.get("parent")
self._logger = logging.getLogger(self.__class__.__name__)
@classmethod
def from_dict(cls, item):
assert isinstance(item, dict)
return cls(**item)
def allows_packet(self, pkt, src_user):
raise NotImplementedError()
|
import celery_logger
import json
import os
import random
import services_component
import sys
import uuid
from NETlist_connector.subnetParser import NETlist
from celery import Task, Celery, group
from celery.signals import task_prerun, worker_ready
from cmsscan.scanner import scanner as cmsscanner
from configparser import SafeConfigParser, ConfigParser
from cve_connector.nvd_cve.toneo4j import move_cve_data_to_neo4j
from cve_connector.vendor_cve.implementation.main import add_vendor_cves
from criticality_estimator import CriticalityEstimator
from datetime import datetime, timedelta, timezone
from flowmon_m import flowmon_connector
from ipaddress import IPv4Address, IPv4Network
from neo4j.exceptions import TransientError
from neo4jclient.AbsClient import AbstractClient
from neo4jclient.CMSClient import CMSClient
from neo4jclient.NmapClient import NmapClient
from neo4jclient.OSClient import OSClient
from neo4jclient.RTIRClient import RTIRClient
from neo4jclient.RESTClient import RESTClient
from neo4jclient.ServicesClient import ServicesClient
from neo4jclient.VulnerabilityCompClient import VulnerabilityCompClient
from neo4jclient.WebCheckerClient import WebCheckerClient
from neo4jclient.Cleaner import Cleaner
from nmap_topology_scanner.scanner import scan, topology_scan_neo
from osrest import run
from rtir_connector import rtir
from sabu import JsonParsing
from shadowserver_module import Shadowserver
from shodan_module import Shodan
from shutil import copyfile
from time import time, sleep
from webchecker_component import Webchecker
# Create the app
# GLOBAL CONSTANTS & METHODS #
DEBUG = False
APP = Celery('CRUSOE')
APP.config_from_object('celeryconfig')
CONFIG_PATH = 'config/conf.ini'
logger = celery_logger.get_logger("CELERY", "/var/log/crusoe/celery.log")
def parse_config(section_name):
"""
Parse config file
:param section_name: name of config section
:return: section of config with same name as dictionary
"""
config = ConfigParser()
config.read(CONFIG_PATH)
return config[section_name]
def edit_config(section, key, value):
"""
Edit actual config
:param section: name of config section
:param key: key of section
:param value: value to edit
"""
parser = SafeConfigParser()
parser.read(CONFIG_PATH)
parser.set(section, key, value)
with open(CONFIG_PATH, 'w') as conf_file:
parser.write(conf_file)
def realize_transaction(neo4jclient_function, args=None):
"""
Realize transaction between neo4j and this program methods.
:param neo4jclient_function: function to be executed
:param args: function argument
:return: result of function
"""
for i in range(9, -1, -1):
try:
if args is None:
return neo4jclient_function()
else:
return neo4jclient_function(args)
except TransientError:
logger.warning(f"Other transaction ongoing, waiting {0.5 * (10 - i)} second and trying again")
logger.warning(f"Attempts left: {i}")
sleep(0.5 * (10 - i))
# CONFIG #
class ComponentException(Exception):
"""
Exception from component
"""
pass
class BaseTask(Task):
"""
Task base class
"""
crusoe_conf = parse_config('crusoe')
flowmon_conf = parse_config('flowmon')
os_conf = parse_config('os')
service_conf = parse_config('service')
sabu_conf = parse_config('sabu')
nmap_topology_scanner_conf = parse_config('nmap-topology-scanner')
rtir_conf = parse_config('rtir-connector')
cve_connector_conf = parse_config('cve-connector')
shadowserver_conf = parse_config('shadowserver')
shodan_conf = parse_config('shodan')
webchecker_conf = parse_config('webchecker')
cms_conf = parse_config('cms')
# the most used params
neo4j_addr = parse_config('neo4j')['address']
neo4j_passwd = parse_config('neo4j')['password']
neo4j_import = parse_config('neo4j')['import']
log_path = crusoe_conf['log_path']
tmp_path = crusoe_conf['tmp']
def now(self):
"""Return the current time and date as a datetime."""
return datetime.now(self.timezone)
def on_retry(self, exc, task_id, args, kwargs, einfo):
"""
Retry handler
"""
logger.warning(f"retried task {task_id}")
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
Handler called after the task returns
"""
if status == "SUCCESS":
logger.debug(f"{task_id} ended with status :{status} and return {retval}")
else:
logger.warning(f"{task_id} ended with status :{status} and return {retval}")
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Error handler
"""
logger.error(f'{task_id} failed: {exc} with {einfo}')
@worker_ready.connect
def setup_DB(sender, **kwargs):
"""
Initial setup
"""
if "crusoe" in sender.hostname:
logger.info("Master is running initial setup")
# Clean up all waiting task
APP.control.purge()
# Setup initial structure of DB
neo4j_pass = parse_config('neo4j')['password']
neo4j_addr = parse_config('neo4j')['address']
AbstractClient(bolt=neo4j_addr, password=neo4j_pass).init_db()
# send waiting task to remote scan
# APP.send_task("remote.scan_init", queue='remote')
else:
logger.info("Only master can create initial setup")
@task_prerun.connect
def per_task_setup(sender, task_id, task, args, kwargs, **_kwargs):
"""
Before task start handler
"""
logger.info(f"started task {task.name}")
# TASKS #
@APP.task(bind=True, base=BaseTask)
def rtir_connector(self):
"""
RTIR connector task
"""
rtir_result = rtir.parse_rt(user=self.rtir_conf['user'],
password=self.rtir_conf['password'],
output=f"{self.neo4j_import}{self.rtir_conf['file']}",
logger=celery_logger.get_logger("RTIR", f"{self.log_path}rtir_connector.log"))
neo4jclient = RTIRClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
realize_transaction(neo4jclient.create_rtir_part, f"{self.rtir_conf['file']}")
return rtir_result
@APP.task(bind=True, base=BaseTask)
def scan_init(self):
indexes = list(range(42)) + list(range(55, 58)) + list(range(59, 106)) + list(range(107, 256))
subnets = [f"147.251.{x}.0/24" for x in indexes]
for subnet in subnets:
APP.send_task("remote.topology_scan", queue='crusoe', kwargs = {"subnet" : subnet})
APP.send_task("remote.vertical_scan", queue='crusoe', kwargs = {"subnet" : subnet})
@APP.task(bind=True, base=BaseTask, soft_time_limit=86400)
def topology_scan(self, subnet):
# enqueue the same task
APP.send_task("remote.topology_scan", queue='crusoe', kwargs = {"subnet" : subnet})
result, raw_result = topology_scan_neo([subnet])
client = NmapClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
client.create_topology(json.dumps(result))
return len(result)
@APP.task(bind=True, base=BaseTask, soft_time_limit=86400)
def vertical_scan(self, subnet):
# enqueue the same task
APP.send_task("remote.vertical_scan", queue='crusoe', kwargs = {"subnet" : subnet})
result = scan([subnet], '-sV -T4 -F')
client = NmapClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
client.create_vertical_scan_cpe(json.dumps(result))
return len(result)
@APP.task(bind=True, base=BaseTask, soft_time_limit=86400)
def shadowserver(self):
"""
Shadowserver task
"""
stats = Shadowserver.process_vulnerabilities(self.tmp_path, self.shadowserver_conf['user'],
self.shadowserver_conf['password'], self.neo4j_import,
self.shadowserver_conf['json_name'],
celery_logger.get_logger("Shadowserver",
f"{self.log_path}vulnerability_component_shadowserver.log"))
vuln_client = VulnerabilityCompClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
realize_transaction(vuln_client.create_shadowserver_part, f"{self.shadowserver_conf['json_name']}")
return stats
@APP.task(bind=True, base=BaseTask)
def shodan(self):
"""
Shodan task
"""
stats = Shodan.process_vulnerabilities(self.shodan_conf['config_file'],
self.shodan_conf['api_key'],
self.shodan_conf['subnets'],
self.neo4j_import,
self.shodan_conf['json_name'],
celery_logger.get_logger("Shodan",
f"{self.log_path}vulnerability_component_shodan.log"))
vuln_client = VulnerabilityCompClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
realize_transaction(vuln_client.create_shadowserver_part,
f"{self.shodan_conf['json_name']}")
return stats
@APP.task(bind=True, base=BaseTask)
def cleaner(self):
"""
Neo4j cleaner task
"""
cleaner_client = Cleaner(bolt=self.neo4j_addr, password=self.neo4j_passwd)
number_of_deleted_sec_events = cleaner_client.clean_security_event()
number_of_deleted_resolves_to = cleaner_client.clean_old_domains()
number_of_deleted_connected_to = cleaner_client.clean_topology()
number_of_deleted_net_services = cleaner_client.clean_network_services()
number_of_deleted_soft_versions = cleaner_client.clean_software_versions()
return f'Successfully deleted {number_of_deleted_sec_events} security event nodes, ' \
f'{number_of_deleted_resolves_to} :RELATES_TO relationships, ' \
f'{number_of_deleted_connected_to} :IS_CONNECTED_TO relationships, ' \
f'{number_of_deleted_net_services + number_of_deleted_soft_versions} :ON relationships'
@APP.task(bind=True, base=BaseTask)
def sabu(self):
"""
Sabu task
"""
return JsonParsing.parse(self.sabu_conf['directory'],
self.neo4j_passwd,
self.sabu_conf['regex'],
self.sabu_conf['path'],
self.neo4j_import,
celery_logger.get_logger("SABU", f"{self.log_path}sabu.log"))
@APP.task(bind=True, base=BaseTask)
def netlist(self):
"""
Netlist task
"""
nl = NETlist(self.neo4j_passwd, f"/var/lib/neo4j/import/subnets",
celery_logger.get_logger("NETlist", f"{self.log_path}netlist.log"))
return nl.update()
@APP.task(bind=True, base=BaseTask, soft_time_limit=14400, max_retries=5)
def nvd_CVEs(self):
"""
Adds or updates all CVEs which were modified the current day
:return:
"""
cve_data = f"{self.tmp_path}{self.cve_connector_conf['tmp_cve_subdir']}"
specified_time = (datetime.now() - timedelta(days=1)).isoformat()
try:
return move_cve_data_to_neo4j(specified_time, self.neo4j_passwd, cve_data,
celery_logger.get_logger("CVE", f"{self.log_path}cve.log"))
except:
logger.error(f"Unexpected error: {sys.exc_info()[0]}")
raise self.retry(countdown=4 ** self.request.retries)
@APP.task(bind=True, base=BaseTask, soft_time_limit=14400, max_retries=5)
def vendor_CVEs(self):
"""
Adds or updates all vendor CVEs
"""
ms_directory = f"{self.tmp_path}{self.cve_connector_conf['tmp_ms_subdir']}"
try:
return add_vendor_cves(ms_directory, self.neo4j_passwd, celery_logger.get_logger("CVE", f"{self.log_path}cve.log"))
except:
logger.error(f"Unexpected error: {sys.exc_info()[0]}")
raise self.retry(countdown=4 ** self.request.retries)
# FLOWS #
@APP.task(bind=True, base=BaseTask)
def OS_parse(self, args):
"""
Parse OS for endpoint devices by outcoming flows
"""
t1 = time()
result = run.parse(args[1],
f"{self.neo4j_import}{self.os_conf['file']}",
config=self.os_conf,
logger=celery_logger.get_logger("OS", f"{self.log_path}os_detection.log"))
t2 = time()
logger.info('Commit to database ...')
neo4jclient = OSClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
realize_transaction(neo4jclient.upload_os_from_file, f"{self.os_conf['file']}")
t3 = time()
return f'{result} Measurement: python = {t2 - t1} neo = {t3 - t2}'
@APP.task(bind=True, base=BaseTask, soft_time_limit=600)
def service(self, args):
"""
Parse services by outcoming flows
"""
t1 = time()
result = services_component.run(args[1],
f"{self.neo4j_import}{self.service_conf['output']}",
config=self.service_conf,
logger=celery_logger.get_logger("SERVICES", f"{self.log_path}services_component.log"))
logger.info("Commiting services to database ...")
t2 = time()
neo4jclient = ServicesClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
realize_transaction(neo4jclient.create_service_component, f"{self.service_conf['output']}")
t3 = time()
return f'{result} Measurement: python = {t2 - t1} neo = {t3 - t2}'
@APP.task(bind=True, base=BaseTask, ignore_result=True)
def check_certs(self):
"""
Webchecker task
"""
t1 = time()
wc = Webchecker(self.webchecker_conf, celery_logger.get_logger("WEBCHECKER", f"{self.log_path}webchecker.log"))
t2 = time()
db = WebCheckerClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
hosts = list(map(lambda x: (x[0]["IP"], x[0]["Domain"]), db.get_ip_and_domain_names()))
t3 = time()
result = wc.run_certs(hosts)
with open(f"{self.neo4j_import}{self.webchecker_conf['cert_file']}", "w") as cert_file:
json.dump(result, cert_file)
t4 = time()
db.upload_cert_errors(f"{self.webchecker_conf['cert_file']}")
t5 = time()
return f"{len(result['data'])} certificate issues found. Measurement: python = {(t2 - t1) + (t4 - t3)} neo = {(t3 - t2) + (t5 - t4)}"
@APP.task(bind=True, base=BaseTask, ignore_result=True)
def detect_domains(self, args):
""" parse domains by incoming flows"""
t1 = time()
wc = Webchecker(self.webchecker_conf, celery_logger.get_logger("WEBCHECKER", "/var/log/crusoe/webchecker.log"))
result = wc.run_detect(args[1])
with open(f"{self.neo4j_import}{self.webchecker_conf['domain_file']}", "w") as cert_file:
json.dump(result, cert_file)
t2 = time()
db = WebCheckerClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
db.upload_hostnames(f"{self.webchecker_conf['domain_file']}")
t3 = time()
return f"{len(result['data'])} domains detected. Measurement: python = {t2 - t1} neo = {t3 - t2}"
@APP.task(bind=True, base=BaseTask, ignore_result=True)
def compute_criticality(self):
"""
Criticality estimator task
"""
ce = CriticalityEstimator(
bolt=self.neo4j_addr,
password=self.neo4j_passwd,
logger=celery_logger.get_logger("CRITICALITY", f"{self.log_path}criticality.log")
)
return ce.run()
@APP.task(bind=True, max_retries=6, ignore_result=True, base=BaseTask)
def flowmon(self, param_prefix):
"""
Download flows
"""
try:
path, counter = flowmon_connector.download_ssh(
user=self.flowmon_conf['user'],
password=self.crusoe_conf['passphrase'],
key_path=self.crusoe_conf['key_path'],
hostname=self.flowmon_conf['hostname'],
nfdump_path=self.flowmon_conf['nfdump_path'],
dir_param=self.flowmon_conf['dir_param'],
collectors=json.loads(self.flowmon_conf['collectors']),
aggregate=self.flowmon_conf[f'{param_prefix}aggregate'],
flow_filter=self.flowmon_conf[f'{param_prefix}filter'],
output_format=self.flowmon_conf[f'{param_prefix}format'],
remote_file_path=self.flowmon_conf[f'{param_prefix}remote_file_path'],
local_tmp_path=f"{self.flowmon_conf['tmp_dir']}{self.flowmon_conf[param_prefix + 'local_file_prefix']}",
logger=celery_logger.get_logger("flowmon", f"{self.log_path}flowmon.log"))
if counter == 0:
logger.error(f"Missing flow data")
if self.request.retries == self.max_retries:
raise ComponentException("Flowmon: missing flow data")
raise self.retry(countdown=2 ** self.request.retries)
except:
logger.error(f"Unexpected error: {sys.exc_info()[0]}")
if self.request.retries == self.max_retries:
raise ComponentException(f"Flowmon: unexpected error: {sys.exc_info()[0]}")
raise self.retry(countdown=2 ** self.request.retries)
return f'Processed scan flag: {param_prefix[:-1]}, number of flows: {counter}', path
@APP.task(bind=True, base=BaseTask, ignore_result=True)
def flowmon_chain(self):
"""
Download flows, parse os, services and domains
"""
sleep(5)
out_group = group(OS_parse.s(), service.s())
out_flow_chain = (flowmon.s(param_prefix="out_") | out_group)
in_flow_chain = (flowmon.s(param_prefix="in_") | detect_domains.s())
out_flow_chain()
in_flow_chain()
logger.info(f'remove {self.flowmon_conf["tmp_dir"]} directory')
now = time()
for f in os.listdir(os.path.join(self.flowmon_conf["tmp_dir"])):
file_path = os.path.join(self.flowmon_conf["tmp_dir"], f)
if "~" in file_path or os.stat(file_path).st_mtime < now - 3600:
os.remove(file_path)
start_time = datetime.now() - timedelta(minutes=5)
end_time = start_time + timedelta(minutes=5)
start_time = start_time.astimezone().isoformat()
end_time = end_time.astimezone().isoformat()
return f'Processing flows between: {start_time} and {end_time}, active components: domains, OS and services'
@APP.task(bind=True, base=BaseTask, ignore_result=True)
def cms_scan(self):
"""
CMS component task
"""
blacklist = list(range(42, 55)) + [58, 106]
blacklist = [IPv4Network(f"147.251.{x}.0/24") for x in blacklist]
client = CMSClient(bolt=self.neo4j_addr, password=self.neo4j_passwd)
res = client.get_ips_and_domain_names().value()
stats = 0
with open(self.cms_conf["tmp"], "w") as out:
for host in res:
if any([IPv4Address(host["IP"]) in x for x in blacklist]):
continue
out.write(f"{host['Domain']}\n")
stats += 1
result = cmsscanner.run(whatweb_path=self.cms_conf["whatweb_path"],
hosts=self.cms_conf["tmp"],
extra_params=self.cms_conf["params"],
out_path=f"{self.neo4j_import}{self.cms_conf['json_name']}",
logger=celery_logger.get_logger("CMS", f"{self.log_path}cms.log"))
if result:
client.create_cms_component(f"{self.cms_conf['json_name']}")
return f"Scanned {stats} domains"
return f"Empty result JSON, nothing to parse. Exiting..."
|
from .types import *
import msgpack
xtypes = [tuple, set, PHASE0, PHASE1LOCK, PHASE2ACK, RELEASE3, BLSDECISION, BLSACCEPTABLE, BLSLOCK, BLSACK, BLSASK, BLSPUT]
xmap = dict((k, i) for i, k in enumerate(xtypes))
def ext_pack(x):
if type(x) in xtypes:
xid = xmap[type(x)]
return msgpack.ExtType(xid, msgpack.packb(list(x), default=ext_pack, strict_types=True))
return x
def ext_unpack(code, data):
data = msgpack.unpackb(data, ext_hook=ext_unpack)
xt = xtypes[code]
if xt == tuple:
return tuple(data)
elif xt == set:
return set(data)
else:
return xtypes[code]._make(data)
return msgpack.ExtType(code, data)
def pack(struct):
return msgpack.packb(struct, default=ext_pack, strict_types=True)
def unpack(data):
return msgpack.unpackb(data, ext_hook=ext_unpack)
def test_True():
assert True
def test_simplest():
from .statemachine import dls_state_machine
data0 = PHASE0(dls_state_machine.PHASE0, ("hello0",), 0, 0, None)
bin0 = pack(data0)
data1 = unpack(bin0)
assert data0 == data1
def test_list():
from .statemachine import dls_state_machine
p0 = PHASE0(dls_state_machine.PHASE0, ("hello0",), 0, 0, None)
data0 = [ p0, (), [] ]
bin0 = pack(data0)
data1 = unpack(bin0)
assert data0 == data1
def test_set():
data0 = [1, set([1, 2, 3 ]), {}]
bin0 = pack(data0)
data1 = unpack(bin0)
assert data0 == data1
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 30 21:55:18 2018
@author: Prosimios
"""
import pickle as pkl
def save_obj(obj, name, folder ):
with open(folder+'/'+ name + '.pkl', 'wb') as f:
pkl.dump(obj, f, pkl.HIGHEST_PROTOCOL)
def load_obj(name, folder ):
with open(folder+'/' + name + '.pkl', 'rb') as f:
return pkl.load(f)
GRR = ['1:1','20:19','10:9','8:7','4:3','3:2','2:1']
all_sims = []
filename_path = 'D:/Dropbox/Dia dia/Sistemas complejos IIQ3763/Isaac/Semestral project/Codes/Cellular_Growth_CA/celular_growth_ca.py'
work_dir = 'D:/Dropbox/Dia dia/Sistemas complejos IIQ3763/Isaac/Semestral project/Codes/Cellular_Growth_CA'
for i in range(len(GRR)):
argums = '150 100 15000 ' + GRR[i]
runfile(filename_path, args = argums , wdir= work_dir)
ratio_values = load_obj('ratios', 'data' )
all_sims.append(ratio_values)
save_obj(all_sims, 'all_ratios', 'data')
|
"""File to handle player related functions."""
from playx.utility import direct_to_play
from playx.cache import search_locally, update_URL_cache, search_URL
from playx.youtube import grab_link, dw, get_youtube_title
from playx.songfinder import search
from playx.logger import Logger
from playx.stringutils import is_song_url, url_type
from playx.soundcloud import get_track_info
from playx.playlist.ytrelated import get_data
from playx.playlist import playlistcache
from os.path import basename
# Setup logger
logger = Logger("player")
class URLPlayer:
"""
Currently support for soundcloud and youtube URL's are added.
"""
def __init__(
self,
URL=None,
songObj=None,
dont_cache_search=False,
show_lyrics=False,
no_cache=False,
):
self.URL = URL
self.stream_url = ""
self.title = ""
self.URL_type = url_type(self.URL) if self.URL is not None else None
self.songObj = songObj
self.dont_cache_search = dont_cache_search
self.show_lyrics = show_lyrics
self.no_cache = no_cache
def _dw(self):
"""
Add the song to download.
"""
if not self.no_cache:
dw(self.title, self.stream_url, self.URL)
else:
logger.info("Caching is disabled")
def _get_soundcloud_data(self):
"""
Extract the data for the soundcloud track.
"""
self.title, self.stream_url = get_track_info(self.URL)
def _get_youtube_data_url(self):
"""
Search youtube and get its data.
"""
# Need to put a check because in some cases the URL is already passed
# by the playlist extractor.
if self.title == "":
self.title = get_youtube_title(self.URL)
self.stream_url = grab_link(self.URL)
def _extract_data(self):
"""
Extract the song data according to type
"""
if self.URL_type == "youtube" or self.URL_type == "ytmusic":
self._get_youtube_data_url()
elif self.URL_type == "soundcloud":
self._get_soundcloud_data()
def _extract_songObj(self):
"""
Extract the data from the songObj.
"""
if self.URL_type == "youtube":
self.title = self.songObj.title
self.URL = self.songObj.URL
self._get_youtube_data_url()
elif self.URL_type == "soundcloud":
self.title = self.songObj.title
self.stream_url = self.songObj.URL
def _stream_from_url(self):
"""Stream the song using the url.
Before searching the stream, get the title of the song
If local search is not forbidden, search it locally
"""
if self.songObj is None:
self._extract_data()
else:
self._extract_songObj()
logger.debug(self.title)
# Now search the song locally
if not self.dont_cache_search:
match = search_locally(self.title)
if match:
# Update the URL cache. This is necessary for the old songs.
update_URL_cache(self.title, self.URL)
# Change the value to local path
self.stream_url = match[1]
else:
self._dw()
else:
logger.info("Searching locally disabled.")
if self.stream_url == "":
self._get_youtube_data_url()
direct_to_play(self.stream_url, self.show_lyrics, self.title)
def play_url(self, URL, songObj=None):
"""
Play the song by using the URL.
"""
self.URL = URL
# Make a search locally to see if the song is already cached.
if not self.dont_cache_search:
song_path = search_URL(self.URL)
if song_path is not None:
self.stream_url = song_path
self.title = basename(song_path)
direct_to_play(song_path, self.show_lyrics, self.title)
return self.URL
self.URL_type = url_type(self.URL)
logger.debug("Detected URL type as: {}".format(self.URL_type))
# Disable keywords if the url type is ytmusic
if self.URL_type == "ytmusic":
self.disable_kw = True
if songObj is not None:
self.songObj = songObj
self._stream_from_url()
return self.URL
class NamePlayer:
"""
Player to particularly play songs by name.
"""
def __init__(
self,
name=None,
dont_cache_search=False,
show_lyrics=False,
no_cache=False,
disable_kw=False,
):
self.name = name
self.URL = None
self.dont_cache_search = dont_cache_search
self.no_cache = no_cache
self.show_lyrics = show_lyrics
self.title = ""
self.stream_url = ""
self.disable_kw = disable_kw
def _get_youtube_data_name(self):
"""
Search youtube and get its data.
"""
data = search(self.name, self.disable_kw)
# Handle if the data returned is None
# That probably means the song wasn't found on YT
# and we need to skip playing that song.
if data is None:
return False
self.title = data.title
self.URL = data.url
self.stream_url = grab_link(data.url)
return True
def _stream_from_name(self):
"""Start streaming the song.
First search in the local cache.
If no song is found in the cache, search in the youtube.
"""
# Need to check if searching locally is forbidden
if not self.dont_cache_search:
match = search_locally(self.name)
if match:
self.title = match[0]
self.stream_url = match[1]
else:
if not self._get_youtube_data_name():
return
local_path = search_URL(self.URL)
# Try to check if the URL is mapped locally.
if local_path is not None:
logger.debug("Replacing the stream URL with the local.")
self.stream_url = local_path
else:
self._dw()
# Update the URL cache
update_URL_cache(self.title, self.URL)
else:
self._get_youtube_data_name()
direct_to_play(self.stream_url, self.show_lyrics, self.title)
def play_name(self, name):
"""
Start playing the song.
"""
self.name = name
self._stream_from_name()
return self.URL
class Player(URLPlayer, NamePlayer):
"""
Base class to play songs.
Player will take different types of data,
recognize them and play accordingly.
Supported data types would be:
Playlist
URL
Songname
"""
def __init__(
self,
data,
on_repeat,
datatype=None,
playlisttype=None,
show_lyrics=False,
dont_cache_search=False,
no_cache=False,
no_related=False,
disable_kw=False,
):
"""
data can be anything of the above supported
types.
If playlist then it is iterated over,
if it is some other type then its simply
sent to be played according to the player.
datatype supports the following types:
- playlist
- song
- URL
"""
URLPlayer.__init__(
self,
show_lyrics=show_lyrics,
dont_cache_search=dont_cache_search,
no_cache=no_cache,
)
NamePlayer.__init__(
self,
show_lyrics=show_lyrics,
dont_cache_search=dont_cache_search,
no_cache=no_cache,
disable_kw=disable_kw,
)
self._iterable_list = []
self.data = data
self.datatype = datatype
self.playlisttype = playlisttype
self.no_related = no_related
self.on_repeat = on_repeat
self._playlist_names = [
"spotify",
"youtube",
"soundcloud",
"billboard",
"jiosaavn",
"gaana",
"cached",
"youtubemusic",
]
self._datatypes = ["playlist", "song", "URL"]
self.show_lyrics = show_lyrics
self.dont_cache_search = dont_cache_search
self.no_cache = no_cache
def _determine_datatype(self):
"""Determine the datatype of the passed data."""
if is_song_url(self.data):
self.datatype = "URL"
else:
self.datatype = "song"
def _play_related(self, url):
"""
Play related songs.
"""
if self.no_related:
return
# Check if URL is not path
logger.debug(url)
if url != "":
related_songs = get_data(url)
else:
return
if len(related_songs) != 0:
logger.info("Playing related songs")
for i in related_songs:
self.play_name(i.search_query)
def _check_type(self):
"""Check the type of the data"""
if self.playlisttype is not None:
logger.debug(self.playlisttype)
logger.hold()
if self.playlisttype.lower() not in self._playlist_names:
logger.critical("Passed playlist is not supported yet")
else:
self.datatype = "playlist"
self._iterable_list = self.data
elif self.datatype is not None:
if self.datatype not in self._datatypes:
logger.warning("Datatype of playlist not within supported ones.")
else:
self._determine_datatype()
def _get_repeat_times(self):
"""Return the number of times the song is supposed to repeat."""
# The passed arg on_repeat is used to check that.
# The arg passes 1 in case the --repeat flag is not passed
# which means we simply need to loop for once.
# The arg passes None in case the --repeat flag is passed but
# without a value. In this case, we need to make sure the song goes
# on an infinite loop. Though, in our case, we will make the loop run
# for a really large value like 1000
# The arg passes the number of times the loop is supposed the run in
# case the value is passed by the user.
if self.on_repeat == 1:
return 1
elif self.on_repeat is None:
logger.info("Repeating indefinitely")
return 5000
else:
logger.info(
"Repeating {} {}".format(
self.on_repeat, "time" if self.on_repeat == 1 else "times"
)
)
return self.on_repeat
def play(self):
"""Play the data."""
self._check_type()
# Stored the returned URL, useful for playing related songs.
URL = None
on_repeat_time = self._get_repeat_times()
while on_repeat_time > 0:
try:
if self.datatype == "URL":
URL = self.play_url(self.data)
elif self.datatype == "song":
URL = self.play_name(self.data)
elif self.datatype == "playlist":
for i in self._iterable_list:
# For different playlists the player needs to act
# differently
if self.playlisttype == "soundcloud":
self.play_url(i.URL, i)
elif self.playlisttype == "youtube":
self.play_url(i.search_query, i)
else:
self.play_name(i.search_query)
on_repeat_time -= 1
except KeyboardInterrupt:
on_repeat_time = -1
logger.info("Exitting peacefully")
if URL is not None:
self._play_related(URL)
|
CONFIG = {
'schema': {
'type': 'object',
'title': 'Comment',
'description': 'Description',
'required': [
'name',
],
'properties': {
'name': {
'type': 'string'
},
'description': {
'type': 'string'
},
'flavor': {
'type': 'string',
'title': 'Flavor',
'enum': [
]
},
'image': {
'type': 'string',
'title': 'Image',
'enum': [
]
},
'maximum_lifetime': {
'type': 'string',
'title': 'Maximum life-time (days hours mins)',
'default': '1h 0m',
'pattern': '^(\d+d\s?)?(\d{1,2}h\s?)?(\d{1,2}m\s?)?$',
'validationMessage': 'Value should be in format [days]d [hours]h [minutes]m'
},
'maximum_instances_per_user': {
'type': 'integer',
'title': 'Maximum instances per user',
'default': 1,
},
'preallocated_credits': {
'type': 'boolean',
'title': 'Pre-allocate credits for the instance from the user quota',
'default': False,
},
'cost_multiplier': {
'type': 'number',
'title': 'Cost multiplier (default 1.0)',
'default': 1.0,
},
'userdata': {
'type': 'text',
'title': 'Customization script for instance after it is launched',
'default': '',
},
'firewall_rules': {
'type': 'array',
'title': 'Frontend firewall rules',
'items': {
'type': 'string',
'title': 'Rules',
}
},
'allow_update_client_connectivity': {
'type': 'boolean',
'title': "Allow user to request instance firewall to allow access to user's IP address",
'default': False,
},
'needs_ssh_keys': {
'type': 'boolean',
'title': "Needs ssh-keys to access",
'default': True,
},
}
},
'form': [
{
'type': 'help',
'helpvalue': '<h4>OpenStack Instance config</h4>'
},
'name',
'description',
'flavor',
'image',
'maximum_lifetime',
'maximum_instances_per_user',
'preallocated_credits',
'cost_multiplier',
{
'key': 'userdata',
'type': 'textarea',
'title': 'Customization script for instance after it is launched'
},
'firewall_rules',
'allow_update_client_connectivity',
],
'model': {
'name': 'os-machine',
'description': 'openstack blueprint',
'flavor': 'mini',
'image': 'Ubuntu-14.04',
'firewall_rules': ['tcp 22 22 192.168.1.0/24'],
'needs_ssh_keys': True,
}
}
|
import pandas as pd
import matplotlib.pyplot as plt
my_dataset1 = pd.read_excel('Smith_glass_post_NYT_data.xlsx', sheet_name='Supp_traces')
x = my_dataset1.Zr
y = my_dataset1.Th
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.scatter(x, y, marker='s', color='#ff464a', edgecolor='#000000')
ax1.set_title("using scatter()")
ax1.set_xlabel("Zr [ppm]")
ax1.set_ylabel("Th [ppm]")
ax2 = fig.add_subplot(1, 2, 2)
ax2.plot(x, y, marker='s', linestyle='', color='#ff464a', markeredgecolor='#000000')
ax2.set_title("using plot()")
ax2.set_xlabel("Zr [ppm]")
ax2.set_ylabel("Th [ppm]")
fig.tight_layout()
|
import nextcord
from nextcord.ext import commands
from nextcord.ext.commands import has_permissions, MissingPermissions
import os
import datetime
import asyncio
class Moderation(commands.Cog, name="Moderation"):
"""Receives Moderation commands"""
COG_EMOJI = "🛡️"
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(f"{ctx.author.mention}Sorry, you do not have permission to do this! `Required Permission: Administrator`")
print(type(ctx), type(error))
@commands.command(name='purge', pass_context=True)
@commands.has_permissions(manage_messages=True, administrator=True)
async def purge(self, ctx, amount=6):
"""purges a user in the server"""
await ctx.channel.purge(limit=amount)
embed = nextcord.Embed(title=f"{amount} messages has been purged!", colour=nextcord.Colour.blue(), timestamp=datetime.datetime.utcnow())
await ctx.reply(embed=embed)
@commands.command(name='mute', pass_context=True)
@commands.has_permissions(manage_messages=True, administrator=True)
async def mute(self, ctx, member: nextcord.Member, *, reason=None):
"""Mute a user from the server"""
guild = ctx.guild
mutedRole = nextcord.utils.get(guild.roles, name="Muted")
if not mutedRole:
mutedRole = await guild.create_role(name="Muted")
for channel in guild.channels:
await channel.set_permissions(mutedRole, speak=False, send_messages=False, read_message_history=True, read_messages=False)
embed = nextcord.Embed(title="Muted", description=f"{member.mention} was muted ", colour=nextcord.Colour.blue(), timestamp=datetime.datetime.utcnow())
embed.add_field(name="Reason:", value=reason, inline=False)
await ctx.reply(embed=embed)
await member.add_roles(mutedRole, reason=reason)
await member.send(f"You have been muted from: {guild.name} Reason: {reason}")
@commands.command(name='unmute', pass_context=True)
@commands.has_permissions(manage_messages=True, administrator=True)
async def unmute(self, ctx, member: nextcord.Member):
"""Unmute a user from the server"""
mutedRole = nextcord.utils.get(ctx.guild.roles, name="Muted")
await member.remove_roles(mutedRole)
await member.send(f"You have unmuted from: {ctx.guild.name}")
embed = nextcord.Embed(title="Unmute", description=f"Unmuted {member.mention}", colour=nextcord.Colour.blue(), timestamp=datetime.datetime.utcnow())
await ctx.reply(embed=embed)
@commands.command(name='kick', pass_context=True)
@commands.has_permissions(manage_messages=True, administrator=True)
async def kick(self, ctx, member: nextcord.Member, reason="No Reason"):
"""kicks a user from the server"""
if member == None:
embed = nextcord.Embed(f"{ctx.message.author}, Please enter a valid user!")
await ctx.reply(embed=embed)
else:
guild = ctx.guild
embed = nextcord.Embed(title="Kicked!", description=f"{member.mention} has been kicked!!", colour=nextcord.Colour.blue(), timestamp=datetime.datetime.utcnow())
embed.add_field(name="Reason: ", value=reason, inline=False)
await ctx.reply(embed=embed)
await guild.kick(user=member)
@commands.command(name='ban', pass_context=True)
@commands.has_permissions(manage_messages=True, administrator=True)
async def ban(self, ctx, member: nextcord.Member, reason="No Reason"):
"""Ban a user from the server"""
if member == None:
embed = nextcord.Embed(f"{ctx.message.author}, Please enter a valid user!")
await ctx.reply(embed=embed)
else:
guild = ctx.guild
embed = nextcord.Embed(title="Banned!", description=f"{member.mention} has been banned!", colour=nextcord.Colour.blue(), timestamp=datetime.datetime.utcnow())
embed.add_field(name="Reason: ", value=reason, inline=False)
await ctx.reply(embed=embed)
await guild.ban(user=member)
@commands.command(name='unban', pass_context=True)
@commands.has_permissions(manage_messages=True, administrator=True)
async def unban(self, ctx, user: nextcord.User):
"""Unban a user from the server"""
if user == None:
embed = nextcord.Embed(f"{ctx.message.author}, Please enter a valid user!")
await ctx.reply(embed=embed)
else:
guild = ctx.guild
embed = nextcord.Embed(title="Unbanned!", description=f"{user.display_name} has been unbanned!", colour=nextcord.Colour.blue(), timestamp=datetime.datetime.utcnow())
await ctx.reply(embed=embed)
await guild.unban(user=user)
def setup(bot: commands.Bot):
bot.add_cog(Moderation(bot))
|
'''
Created on 8 Jun 2013
@author: dsnowdon
'''
import httplib, mimetypes
import socket
def post_multipart(host, port, selector, fields, files):
"""
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form
fields.
files is a sequence of (name, filename, value) elements for data to
be uploaded as files
Return the server's response page.
"""
content_type, body = encode_multipart_formdata(fields, files)
try:
conn = httplib.HTTPConnection(host, port)
conn.putrequest('POST', selector)
conn.putheader('content-type', content_type)
conn.putheader('content-length', str(len(body)))
conn.endheaders()
conn.send(body)
response = conn.getresponse()
data = response.read()
status = response.status
if 200 == status:
return (status, data)
else:
return (status, None)
except socket.error:
print "Connection error"
return (404, None)
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form
fields.
files is a sequence of (name, filename, value) elements for data to
be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '---------------------------13049614110900'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="{}"'.format(key))
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="{0}";filename="{1}"'.format(key, filename))
L.append('Content-Type: {}'.format(get_content_type(filename)))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary={}'.format(BOUNDARY)
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream' |
"""Hierarchy of abstract base classes, from _collections_abc.py."""
from pytype import utils
# class -> list of superclasses
SUPERCLASSES = {
# "mixins" (don't derive from object):
"Hashable": [],
"Iterable": [],
"AsyncIterable": [],
"Sized": [],
"Callable": [],
"Awaitable": [],
"Iterator": ["Iterable"],
"AsyncIterator": ["AsyncIterable"],
"Coroutine": ["Awaitable"],
# Classes (derive from object):
"Container": ["object"],
"Number": ["object"],
"Complex": ["Number"],
"Real": ["Complex"],
"Rational": ["Real"],
"Integral": ["Rational"],
"Set": ["Sized", "Iterable", "Container"],
"MutableSet": ["Set"],
"Mapping": ["Sized", "Iterable", "Container"],
"MappingView": ["Sized"],
"KeysView": ["MappingView", "Set"],
"ItemsView": ["MappingView", "Set"],
"ValuesView": ["MappingView"],
"MutableMapping": ["Mapping"],
"Sequence": ["Sized", "Iterable", "Container"],
"MutableSequence": ["Sequence"],
"ByteString": ["Sequence"],
# Builtin types:
"set": ["MutableSet"],
"frozenset": ["Set"],
"dict": ["MutableMapping"],
"tuple": ["Sequence"],
"list": ["MutableSequence"],
"complex": ["Complex"],
"float": ["Real"],
"int": ["Integral"],
"bool": ["int"],
"str": ["Sequence"],
"basestring": ["Sequence"],
"bytes": ["ByteString"],
"range": ["Sequence"],
"bytearray": ["ByteString", "MutableSequence"],
"memoryview": ["Sequence"],
# Types that can only be constructed indirectly:
# (See EOL comments for the definition)
"bytearray_iterator": ["Iterator"], # type(iter(bytearray()))
"dict_keys": ["KeysView"], # type({}.keys()).
"dict_items": ["ItemsView"], # type({}.items()).
"dict_values": ["ValuesView"], # type({}.values())
"dict_keyiterator": ["Iterator"], # type(iter({}.keys()))
"dict_valueiterator": ["Iterator"], # type(iter({}.values()))
"dict_itemiterator": ["Iterator"], # type(iter({}.items()))
"list_iterator": ["Iterator"], # type(iter([]))
"list_reverseiterator": ["Iterator"], # type(iter(reversed([])))
"range_iterator": ["Iterator"], # type(iter(range(0)))
"longrange_iterator": ["Iterator"], # type(iter(range(1 << 1000)))
"set_iterator": ["Iterator"], # type(iter(set()))
"tuple_iterator": ["Iterator"], # type(iter(()))
"str_iterator": ["Iterator"], # type(iter("")).
"zip_iterator": ["Iterator"], # type(iter(zip())).
"bytes_iterator": ["Iterator"], # type(iter(b'')).
"mappingproxy": ["Mapping"], # type(type.__dict__)
"generator": ["Generator"], # type((lambda: (yield))())
"async_generator": ["AsyncGenerator"], # type((lambda: (yield))())
"coroutine": ["Coroutine"],
}
def GetSuperClasses():
"""Get a Python type hierarchy mapping.
This generates a dictionary that can be used to look up the bases of
a type in the abstract base class hierarchy.
Returns:
A dictionary mapping a type, as string, to a list of base types (also
as strings). E.g. "float" -> ["Real"].
"""
return SUPERCLASSES.copy()
def GetSubClasses():
"""Get a reverse Python type hierarchy mapping.
This generates a dictionary that can be used to look up the (known)
subclasses of a type in the abstract base class hierarchy.
Returns:
A dictionary mapping a type, as string, to a list of direct
subclasses (also as strings).
E.g. "Sized" -> ["Set", "Mapping", "MappingView", "Sequence"].
"""
return utils.invert_dict(GetSuperClasses())
|
from . import abc, patterns, pool
from .abc import DeliveryMode
from .channel import Channel
from .connection import Connection, connect
from .exceptions import AMQPException, MessageProcessError
from .exchange import Exchange, ExchangeType
from .message import IncomingMessage, Message
from .queue import Queue
from .robust_channel import RobustChannel
from .robust_connection import RobustConnection, connect_robust
from .robust_exchange import RobustExchange
from .robust_queue import RobustQueue
from .version import (
__author__, __version__, author_info, package_info, package_license,
version_info,
)
__all__ = (
"AMQPException",
"Channel",
"Connection",
"DeliveryMode",
"Exchange",
"ExchangeType",
"IncomingMessage",
"Message",
"MessageProcessError",
"Queue",
"RobustChannel",
"RobustConnection",
"RobustExchange",
"RobustQueue",
"__author__",
"__version__",
"abc",
"author_info",
"connect",
"connect_robust",
"package_info",
"package_license",
"patterns",
"pool",
"version_info",
)
|
import sys
import math
from Tkinter import *
from MemoryAdministrator import MemoryAdministrator
from stack import Stack
memory = MemoryAdministrator()
quads = []
proc = dict()
run = True
current_quad = 0
fill = False
penColor = '#000000000'
fillColor = '#000000000'
penWidth = 1
def actions():
#function that runs while you haven't reached the end of the program
while(run):
#print "QUADS", quads[current_quad], " ", memory.getValue('40000')
options[quads[current_quad][0]]()
def add():
#function that adds two numbers
global current_quad
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print "ADD ", first, " + ", second, " IN ", quads[current_quad][3]
result = first + second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def substract():
#function that substracts two numbers
global current_quad
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print "SUB ", first, " - ", second, " IN ", quads[current_quad][3]
result = first - second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def multiply():
#function that multiplies two numbers
global current_quad
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print "SUB ", first, " * ", second, " IN ", quads[current_quad][3]
result = first * second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def divide():
#function that divides two numbers
global current_quad
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print "SUB ", first, " / ", second, " = ", first/second, " IN ", quads[current_quad][3]
result = first / second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def less_than():
#function that compares if one number is less than another
global current_quad
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print "LESSSS ", first, " / ", second, " = ", first<second, " IN ", quads[current_quad][3]
result = first < second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def more_than():
#function that compares if one number is more than another
global current_quad
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print "MOREE ", first, " / ", second, " = ", first>second, " IN ", quads[current_quad][3]
result = first > second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def less_than_eq():
#function that compares if one number is less than or equal to another
global current_quad
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print "LESSSS ", first, " / ", second, " = ", first<second, " IN ", quads[current_quad][3]
result = first <= second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def more_than_eq():
#function that compares if one number is more than or equal to another
global current_quad
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print "MOREE ", first, " / ", second, " = ", first>second, " IN ", quads[current_quad][3]
result = first >= second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def different_than():
#function that compares if one number is different than another
global current_quad
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print "SUB ", first, " / ", second, " = ", first/second, " IN ", quads[current_quad][3]
result = first != second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def equal_to():
#function that compares if one number is equal to another
global current_quad
#print "EQUAL ", quads[current_quad]
first = memory.getValue(quads[current_quad][1])
second = memory.getValue(quads[current_quad][2])
#print " Equal ", first, " / ", second, " = IN ", quads[current_quad][3]
result = first == second
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def stringColor(red, green, blue):
#gets the hexadecimal string for the color
red = int(red)
if(red > 255):
red = 255
green = int(green)
if(green > 255):
green = 255
blue = int(blue)
if(blue > 255):
blue = 255
color = '#' + str(format(red, '02x')) + str(format(green, '02x')) + str(format(blue, '02x'))
return color
def pencolor():
#changes the pen color
global current_quad, penColor
red = memory.getValue(quads[current_quad][1])
green = memory.getValue(quads[current_quad][2])
blue = memory.getValue(quads[current_quad][3])
penColor = stringColor(red, green, blue)
#print "PENCOLOR", penColor
current_quad += 1
def color():
#changes the color of the fill
global current_quad, fillColor
red = memory.getValue(quads[current_quad][1])
green = memory.getValue(quads[current_quad][2])
blue = memory.getValue(quads[current_quad][3])
fillColor = stringColor(red, green, blue)
#print "PENCOLOR", fillColor
current_quad += 1
def backColor():
#changes the background color
global current_quad, penWidth
red = memory.getValue(quads[current_quad][1])
green = memory.getValue(quads[current_quad][2])
blue = memory.getValue(quads[current_quad][3])
color = stringColor(red, green, blue)
w.configure(background=color)
current_quad += 1
def penwidth():
#changes the width of the pen
global current_quad, penWidth
penWidth = memory.getValue(quads[current_quad][1])
current_quad += 1
def assign():
#assigns a value to a variable
global current_quad
result = memory.getValue(quads[current_quad][1])
#print "ASS", result, " ", quads[current_quad][3]
memory.writeValue(quads[current_quad][3], result)
current_quad += 1
def rectangle():
#creates a rectangle with the proper requirements, moves the pen to the lower right corner
global current_quad, fill, fillColor, penColor, penWidth
x = memory.getValue('41000')
y = memory.getValue('41001')
#print "RECTANGLE ", x, " ", y, " ", memory.getValue(quads[current_quad][1]), " ", memory.getValue(quads[current_quad][2]), " ", fill
x2 = x + memory.getValue(quads[current_quad][1])
y2 = y + memory.getValue(quads[current_quad][2])
#print "RECTANGLE ", x, " ", y, " ", x2, " ", y2, " ", fill
if(fill):
w.create_rectangle(x, y, x2, y2, fill=fillColor, outline=penColor, width=penWidth)
else:
w.create_rectangle(x, y, x2, y2, fill='', outline=penColor, width=penWidth )
memory.writeValue('41000', x2)
memory.writeValue('41001', y2)
current_quad += 1
def triangle():
#creates a triangle with the proper requirements
global current_quad, fill, fillColor, penColor, penWidth
x = memory.getValue(quads[current_quad][1])
y = memory.getValue(quads[current_quad][2])
current_quad += 1
x2 = memory.getValue(quads[current_quad][0])
y2 = memory.getValue(quads[current_quad][1])
x3 = memory.getValue(quads[current_quad][2])
y3 = memory.getValue(quads[current_quad][3])
#print "TRIANGLE ", x, " ", quads[current_quad][1] , " ", x2, " ", " ", fill
if(fill):
w.create_polygon(x, y, x2, y2, x3, y3, fill=fillColor, outline=penColor, width=penWidth )
else:
w.create_polygon(x, y, x2, y2, x3, y3, fill='', outline=penColor, width=penWidth )
memory.writeValue('41000', x3)
memory.writeValue('41001', y3)
current_quad += 1
def circle():
#creates a circle taking the curren position as its center, moves the pen to the lower right corner
global current_quad, fill, fillColor, penColor, penWidth
size = memory.getValue(quads[current_quad][1])
x = memory.getValue('41000') - size
y = memory.getValue('41001') - size
x2 = x + (size * 2)
y2 = y + (size * 2)
#print "CIRCLE ", x, " ", y, " ", x2, " ", y2, " ", fill
if(fill):
w.create_oval(x, y, x2, y2, fill=fillColor, outline=penColor, width=penWidth)
else:
w.create_oval(x, y, x2, y2, fill='', outline=penColor, width=penWidth)
memory.writeValue('41000', x2)
memory.writeValue('41001', y2)
current_quad += 1
def arc():
#creates an arc taking the current position as its center, moves the pen to the lower right corner
global current_quad, fill, fillColor, penColor, penWidth
size = memory.getValue(quads[current_quad][1])
x = memory.getValue('41000') - size
y = memory.getValue('41001') - size
x2 = x + (size * 2)
y2 = y + (size * 2)
#print "CIRCLE ", x, " ", y, " ", x2, " ", y2, " ", fill
if(fill):
w.create_arc(x, y, x2, y2, fill=fillColor, outline=penColor, width=penWidth)
else:
w.create_arc(x, y, x2, y2, fill='', outline=penColor, width=penWidth, extent=size, style=ARC)
y2 = y2 - size
memory.writeValue('41000', x2)
memory.writeValue('41001', y2)
current_quad += 1
def square():
#creates a square with the proper requirements, moves the pen to the lower right corner
global current_quad, fill, fillColor, penColor, penWidth
x = memory.getValue('41000')
y = memory.getValue('41001')
x2 = x + memory.getValue(quads[current_quad][1])
y2 = y + memory.getValue(quads[current_quad][1])
#print "SQUARE ", x, " ", y, " ", x2, " ", y2, " ", memory.getValue(quads[current_quad][1])
if(fill):
w.create_rectangle(x, y, x2, y2, fill=fillColor, outline=penColor, width=penWidth)
else:
w.create_rectangle(x, y, x2, y2, fill='', outline=penColor, width=penWidth)
memory.writeValue('41000', x2)
memory.writeValue('41001', y2)
current_quad += 1
def label():
#prints a text label
global current_quad, penColor, penWidth
finish = int(quads[current_quad][3])
start = int(quads[current_quad][2])
lenght = finish - start
start = 0
direction = int(quads[current_quad][1])
word = ''
while start <= lenght:
constDir = direction + start
constDir = str(constDir)
#print "COONST", constDir
word += memory.getValue(constDir)
start += 1
w.create_text(memory.getValue('41000'), memory.getValue('41001'), text=word, fill=penColor)
current_quad += 1
def linestrip():
#creates a series of lines taking into account the current position
global current_quad, penWidth, penColor
dire = int(quads[current_quad][1])
ren = int(quads[current_quad][2])
points = []
help = (memory.getValue('41000'), memory.getValue('41001'))
points.append(help)
cRen = 0
while cRen < ren:
help = (memory.getValue(str(cRen + dire)), memory.getValue(str(cRen + 1 + dire)))
points.append(help)
cRen += 2
w.create_line(points, fill=penColor)
current_quad += 1
def polygon():
#creates a polygon with the given coordenates with the proper requirements
global current_quad, penWidth, penColor
dire = int(quads[current_quad][1])
ren = int(quads[current_quad][2])
points = []
help = (memory.getValue('41000'), memory.getValue('41001'))
points.append(help)
cRen = 0
while cRen < ren:
help = (memory.getValue(str(cRen + dire)), memory.getValue(str(cRen + 1 + dire)))
points.append(help)
cRen += 2
if(fill):
w.create_polygon(points, fill=fillColor, outline=penColor, width=penWidth)
else:
w.create_polygon(points, fill='', outline=penColor, width=penWidth)
current_quad += 1
def fill():
#determines if the figure will have filling or not
global current_quad, fill
#print "FILL ", quads[current_quad][3]
if(quads[current_quad][3] == '1'):
fill = True
else:
fill = False
current_quad += 1
def get_x_and_y(angle, hypotenus):
#detetrmines the end of the line, basic mathematics
if(angle < 45):
return [ hypotenus * (math.cos(math.radians(angle))), (-1 * (hypotenus * (math.sin(math.radians(angle)))))]
elif(angle < 90):
return [ hypotenus * (math.sin(math.radians(angle))), (-1 * (hypotenus * (math.cos(math.radians(angle)))))]
elif(angle == 90):
return [ 0, (-1*hypotenus)]
angle -= 90
if(angle < 45):
return [ (-1 * (hypotenus * (math.sin(math.radians(angle))))), (-1 * (hypotenus * (math.cos(math.radians(angle)))))]
elif(angle < 90):
return [ (-1 * (hypotenus * (math.cos(math.radians(angle))))), (-1 * (hypotenus * (math.sin(math.radians(angle)))))]
elif(angle == 90):
return [(-1*hypotenus), 0]
angle -= 90
if(angle < 45):
return [ (-1 * (hypotenus * (math.cos(math.radians(angle))))), hypotenus * (math.sin(math.radians(angle)))]
elif(angle < 90):
return [ (-1 * (hypotenus * (math.sin(math.radians(angle))))), hypotenus * (math.cos(math.radians(angle)))]
elif(angle == 90):
return [0, hypotenus]
angle -= 90
if(angle < 45):
return [hypotenus * (math.sin(angle)), hypotenus * (math.cos(angle))]
elif(angle < 90):
return [hypotenus * (math.cos(angle)), hypotenus * (math.sin(angle))]
elif(angle == 90):
return [hypotenus, 0]
def penpos():
#changes the pen position
global current_quad
memory.writeValue('41001', memory.getValue(quads[current_quad][1]))
memory.writeValue('41000', memory.getValue(quads[current_quad][2]))
current_quad += 1
def penX():
#changes the x of the pen position
global current_quad
memory.writeValue('41000', memory.getValue(quads[current_quad][1]))
current_quad += 1
def penY():
#changes the y of the pen position
global current_quad
memory.writeValue('41001', memory.getValue(quads[current_quad][1]))
current_quad += 1
def penUp():
#moves the y of the pen upwards
global current_quad
up = memory.getValue('41001') - memory.getValue(quads[current_quad][1])
memory.writeValue('41001', up)
current_quad += 1
def penDown():
#moves the y of the pen downwards
global current_quad
up = memory.getValue(quads[current_quad][1]) + memory.getValue('41001')
memory.writeValue('41001', up)
current_quad += 1
def penLeft():
#moves the x of the pen to the left
global current_quad
up = memory.getValue('41000') - memory.getValue(quads[current_quad][1])
memory.writeValue('41000', up)
current_quad += 1
def penRight():
#moves the x of the pen to the right
global current_quad
up = memory.getValue(quads[current_quad][1]) + memory.getValue('41000')
memory.writeValue('41000', up)
current_quad += 1
def move():
#creates a line, has to calculate the end position of the line.
global current_quad, penColor
x = memory.getValue('41000')
y = memory.getValue('41001')
hyp = memory.getValue(quads[current_quad][1])
angle = memory.getValue(quads[current_quad][2])
pos = get_x_and_y(angle, hyp)
#print "X Y", pos[0], " ", pos[1]
w.create_line(x, y,(x+pos[0]), (y+pos[1]), fill=penColor)
memory.writeValue('41000', (x+pos[0]))
memory.writeValue('41001', (y+pos[1]))
current_quad += 1
def goto():
#changes the current_quad
global current_quad
#print "GOTO", current_quad
current_quad = int(quads[current_quad][3])
#print quads[current_quad][3], " ", current_quad
def goto_false():
#if the condition is false changes the current_quad
global current_quad
temp = memory.getValue(quads[current_quad][1])
if(temp):
current_quad += 1
else:
current_quad = int(quads[current_quad][3])
#print "GOTOF", quads[current_quad][3], " ", current_quad
def era():
#creates the memory necesary for the function that is being called.
global current_quad
temp = proc[quads[current_quad][3]]
#print "ERA", temp
memory.setFunction(temp[0], temp[1], temp[2], temp[3], temp[4], temp[5], temp[6])
current_quad += 1
def param():
#function that assigns the values that are being sent to the function, has to check if its a pointer to make the proper write call.
global current_quad
if quads[current_quad][3][1] == '7':
#pointer
memory.writePointValue(quads[current_quad][3], quads[current_quad][1], 1)
#print "PARAM ", quads[current_quad][3], " ", quads[current_quad][1]
else:
memory.writeValueS(quads[current_quad][3], memory.getValue(quads[current_quad][1]))
#print "PARAM ", quads[current_quad][3], " ", memory.getValue(quads[current_quad][1])
current_quad += 1
def goSub():
#saves the current quad, position it will return to and changes the current scope and current quad.
global current_quad
memory.changeScope()
memory.actionPointer_push(current_quad+1)
current_quad = int(quads[current_quad][3])
def dim():
#arrays when you want to access a value form an array check if the square is not out of bounds.
global current_quad
value = memory.getValue(quads[current_quad][2])
dim = int(quads[current_quad][1])
if value < 0 or value > dim:
print "Out of bounds.", value, " ", dim
sys.exit(0)
current_quad += 1
def dimC():
#matrix when you want to access a value form an matrix check if the square is not out of bounds.
global current_quad
value = int(quads[current_quad][2])
dim = int(quads[current_quad][1])
if value < 0 or value > dim:
print "Out of bounds.", value, " ", dim
sys.exit(0)
current_quad += 1
def pointerDir():
#retrieve the direction of the value you want to access, if its a pointer you have to retrieve its real base direction and add the position you want. store the direction to the pointer.
global current_quad
#print " POINTER", quads[current_quad]
if quads[current_quad][1][1] == '7':
vDir = memory.getValuePointer(quads[current_quad][1])
# print " POINTER", vDir
else:
vDir = int(quads[current_quad][1])
vPoint = memory.getValue(quads[current_quad][2])
#print "VDIR " , vDir+vPoint
vDir = vDir + vPoint
memory.writePointValue(quads[current_quad][3], str(vDir), 0)
current_quad += 1
def pointerDirC():
#gets the direction for the value and stores it to the pointer
global current_quad
vDir = int(quads[current_quad][1])
vPoint = int(quads[current_quad][2])
#print "VDIR " , vDir+vPoint
vDir = vDir + vPoint
memory.writePointValue(quads[current_quad][3], str(vDir), 0)
current_quad += 1
def endPro():
#ends the procedure that was running, deletes all its data and returns to the point where the function call was made
global current_quad
current_quad = int(memory.actionPointer_pop())
memory.delete_function()
def endProg():
#you have reached the end of the program.
global run
run = False
options = { '+' : add,
'-' : substract,
'*' : multiply,
'/' : divide,
'<' : less_than,
'>' : more_than,
'<=' : less_than_eq,
'>=' : more_than_eq,
'<>' : different_than,
'==' : equal_to,
'GOTO': goto,
'GOTOF' : goto_false,
'ENDPROG': endProg,
'101': assign,
'201' : rectangle,
'202' : triangle,
'203' : circle,
'204' : square,
'205' : polygon,
'206' : linestrip,
'207' : arc,
'208' : label,
'209' : fill,
'301' : pencolor,
'302' : color,
'303' : backColor,
'304' : penwidth,
'307' : penpos,
'308' : penX,
'309' : penY,
'305' : move,
'310' : penUp,
'311' : penDown,
'312' : penLeft,
'313' : penRight,
'ERA' : era,
'PARAMETRO' : param,
'GOSUB' : goSub,
'RETURN' : param,
'DIM' : dim,
'DIMC' : dimC,
'DIR' : pointerDir,
'DIRC' : pointerDirC,
'ENDPROC' : endPro}
if(len(sys.argv) > 1):
if sys.argv[1] == "-f":
print sys.argv[2]
f = open(sys.argv[2], "r")
count = 0.5
s = f.readlines()
string = ""
for line in s:
line = line.strip()
if(line == '%%'):
count += 1
else:
if(count <= 1):
if(count == 1):
info = line.split(' ')
memory.writeValue(info[1], info[0])
else:
count += 0.5
info = line.split(' ')
memory.constSize(int(info[0]), int(info[1]), int(info[2]))
if(count == 2):
info = line.split(' ')
if(info[0] == 'main'):
memory.setMainMem(info[1], info[2], info[3], info[4], info[5], info[6], info[7])
elif(info[0] == 'globals'):
memory.setGlobalMem(info[1], info[2], info[3], info[4], info[5], info[6], info[7])
else:
proc[info[0]] = [info[1], info[2], info[3], info[4], info[5], info[6], info[7]]
if(count == 3):
info = line.split(' ')
sp = [info[0], info[1], info[2], info[3]]
quads.append(sp)
string += line
root = Tk()
print "const int float", len(quads)
memory.constPrint()
w = Canvas(root, width=600, height=480)
w.configure(background='white')
w.pack()
actions()
print "const int float"
memory.constPrint()
print "main"
memory.printMain()
print "global"
memory.printGl()
mainloop()
else:
print "):"
|
import sys
"""
def get_input():
'''Prompt user to input message to display.'''
message = input("What would you like the cow to say?")
return message
"""
def split_message(message, lines):
"""Given a message to display and the number of lines to display on,
return a list of the substrings from the message to display
for each line.
Param: message(str)
lines(int)
Return: sublines(list):
"""
sublines = list()
message_words = message.split()
for i in range(lines):
index = 0
speech_line = "< "
word_to_add = message_words[int(index)] + " "
while (len(speech_line) + len(word_to_add)) <= 23:
if index < len(message):
speech_line += message[index]
index += 1
else:
speech_line += " "
word_to_add = " "
speech_line += " >\n"
sublines.append(speech_line)
return sublines
"""
sublines = list()
for i in range(lines + 1):
speech_line = ""
speech_line += "< "
index = 0
for i in range(21):
if not len(message) == 0:
speech_line += message[0]
message = message[1:]
else:
speech_line += " "
speech_line += " >\n"
sublines.append(speech_line)
return sublines
"""
def form_message(sublines, message_divider):
"""Form the message portion of speech bubble.
Param: sublines(list): a list of the chars to go on each line in message
Return: bubble(str): formaatted to fit inside of a bubble
"""
bubble = ""
bubble += message_divider
bubble += "".join(sublines)
bubble += message_divider
return bubble
def draw_speech(message):
"""Given a message, draws a speech bubble for the cow to use.
Param: message(str)
Return: speech_bubble(str)
"""
bubble = ""
message_divider = " ----------------------- \n"
limit_line_length = len(message_divider)
# calculate number of lines the speech bubble will need length-wise
lines = (len(message) // limit_line_length) + 1
# form message section of speech bubble
bubble = form_message(split_message(message, lines), message_divider)
return bubble
def draw_cow(speech_bubble):
"""Draws the ASCII cow to speak the message.
Param: speech_bubble(str)
Return: cow_image(str)"""
cow_image = ""
# add characters line by line to imitate ASCII art of a cow
cow_image += speech_bubble
cow_image += "\t\\ ^ ^\n"
cow_image += "\t\\ _ _ \n"
return cow_image
def cowsay(message):
"""Implements the cowsay program
Param: message(str): command line input
Return: None
"""
# message = get_input()
# message = "Hello, World!"
message = " ".join(message)
speech_bubble = draw_speech(message)
print(draw_cow(speech_bubble))
if __name__ == "__main__":
message = sys.argv[1:]
cowsay(message)
|
from syzscope.interface.s2e import S2EInterface
s2e_path = '/home/xzou017/projects/KOOBE-test/s2e'
kernel_path = '/home/xzou017/projects/KOOBE-test/s2e/images/debian-9.2.1-x86_64-0e2adab6/guestfs/vmlinux'
syz_path = '/home/xzou017/projects/SyzbotAnalyzer/tools/gopath/src/github.com/google/syzkaller'
s2e_project_path = '/home/xzou017/projects/KOOBE-test/s2e/projects/2389bfc'
def init_s2e_inst(s2e_path, kernel_path, syz_path):
inst = S2EInterface(s2e_path, kernel_path, syz_path)
return inst
def getAvoidingPC_test(inst, func_list):
if inst == None:
return
res = inst.getAvoidingPC(func_list)
for func in res:
print(func, res[func])
return res
def generateAvoidList_test(inst, avoid, s2e_project_path):
inst.generateAvoidList(avoid, s2e_project_path)
if __name__ == '__main__':
inst = init_s2e_inst(s2e_path, kernel_path, syz_path)
func_list = [
'refcount_dec_and_mutex_lock',
'mutex_unlock',
'_raw_spin_lock_irqsave',
'_raw_spin_unlock_irqrestore',
'kfree_call_rcu',
'kfree',
'mutex_lock',
'_raw_spin_lock',
'_raw_spin_trylock',
'kfree_skb',
'get_order',
'_raw_read_lock',
'_raw_spin_lock_bh',
'_raw_spin_unlock_bh',
'rht_key_hashfn',
]
res = getAvoidingPC_test(inst, func_list)
generateAvoidList_test(inst, res, s2e_project_path)
|
'''
imputation for cmmva and cva on mnist
'''
import os
import sys
import time
import math
import numpy as np
import theano
import theano.tensor as T
import theano.tensor.shared_randomstreams
from util import datapy, color, paramgraphics
from optimization import optimizer
from layer import FullyConnected, nonlinearity
from layer import GaussianHidden, NoParamsBernoulliVisiable,Pegasos
from layer import ConvMaxPool, UnpoolConvNon
def c_6layer_mnist_imputation(seed=0, ctype='cva',
pertub_type=3, pertub_prob=6, pertub_prob1=14, visualization_times=20,
denoise_times=200, predir=None, n_batch=144,
dataset='mnist.pkl.gz', batch_size=500):
"""
Missing data imputation
"""
#cp->cd->cpd->cd->c
nkerns=[32, 32, 64, 64, 64]
drops=[0, 0, 0, 0, 0, 1]
#skerns=[5, 3, 3, 3, 3]
#pools=[2, 1, 1, 2, 1]
#modes=['same']*5
n_hidden=[500, 50]
drop_inverses=[1,]
# 28->12->12->5->5/5*5*64->500->50->500->5*5*64/5->5->12->12->28
if dataset=='mnist.pkl.gz':
dim_input=(28, 28)
colorImg=False
logdir = 'results/imputation/'+ctype+'/mnist/'+ctype+'_6layer_mnist_'+str(pertub_type)+'_'+str(pertub_prob)+'_'+str(pertub_prob1)+'_'+str(denoise_times)+'_'
logdir += str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print predir
with open(logdir+'hook.txt', 'a') as f:
print >>f, predir
train_set_x, test_set_x, test_set_x_pertub, pertub_label, pertub_number = datapy.load_pertub_data(dirs='data_imputation/', pertub_type=pertub_type, pertub_prob=pertub_prob,pertub_prob1=pertub_prob1)
datasets = datapy.load_data_gpu(dataset, have_matrix=True)
_, _, _ = datasets[0]
valid_set_x, _, _ = datasets[1]
_, _, _ = datasets[2]
# compute number of minibatches for training, validation and testing
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x')
x_pertub = T.matrix('x_pertub') # the data is presented as rasterized images
p_label = T.matrix('p_label')
random_z = T.matrix('random_z')
drop = T.iscalar('drop')
drop_inverse = T.iscalar('drop_inverse')
activation = nonlinearity.relu
rng = np.random.RandomState(seed)
rng_share = theano.tensor.shared_randomstreams.RandomStreams(0)
input_x = x_pertub.reshape((batch_size, 1, 28, 28))
recg_layer = []
cnn_output = []
#1
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2),
border_mode='valid',
activation=activation
))
if drops[0]==1:
cnn_output.append(recg_layer[-1].drop_output(input=input_x, drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(input=input_x))
#2
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
if drops[1]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
#3
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, nkerns[1], 12, 12),
filter_shape=(nkerns[2], nkerns[1], 3, 3),
poolsize=(2, 2),
border_mode='valid',
activation=activation
))
if drops[2]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
#4
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, nkerns[2], 5, 5),
filter_shape=(nkerns[3], nkerns[2], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
if drops[3]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
#5
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, nkerns[3], 5, 5),
filter_shape=(nkerns[4], nkerns[3], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
if drops[4]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
mlp_input_x = cnn_output[-1].flatten(2)
activations = []
#1
recg_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in= 5 * 5 * nkerns[-1],
n_out=n_hidden[0],
activation=activation
))
if drops[-1]==1:
activations.append(recg_layer[-1].drop_output(input=mlp_input_x, drop=drop, rng=rng_share))
else:
activations.append(recg_layer[-1].output(input=mlp_input_x))
#stochastic layer
recg_layer.append(GaussianHidden.GaussianHidden(
rng=rng,
input=activations[-1],
n_in=n_hidden[0],
n_out = n_hidden[1],
activation=None
))
z = recg_layer[-1].sample_z(rng_share)
gene_layer = []
z_output = []
random_z_output = []
#1
gene_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in=n_hidden[1],
n_out = n_hidden[0],
activation=activation
))
z_output.append(gene_layer[-1].output(input=z))
random_z_output.append(gene_layer[-1].output(input=random_z))
#2
gene_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in=n_hidden[0],
n_out = 5*5*nkerns[-1],
activation=activation
))
if drop_inverses[0]==1:
z_output.append(gene_layer[-1].drop_output(input=z_output[-1], drop=drop_inverse, rng=rng_share))
random_z_output.append(gene_layer[-1].drop_output(input=random_z_output[-1], drop=drop_inverse, rng=rng_share))
else:
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output(input=random_z_output[-1]))
input_z = z_output[-1].reshape((batch_size, nkerns[-1], 5, 5))
input_random_z = random_z_output[-1].reshape((n_batch, nkerns[-1], 5, 5))
#1
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-1], 5, 5),
filter_shape=(nkerns[-2], nkerns[-1], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
z_output.append(gene_layer[-1].output(input=input_z))
random_z_output.append(gene_layer[-1].output_random_generation(input=input_random_z, n_batch=n_batch))
#2
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-2], 5, 5),
filter_shape=(nkerns[-3], nkerns[-2], 3, 3),
poolsize=(2, 2),
border_mode='full',
activation=activation
))
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#3
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-3], 12, 12),
filter_shape=(nkerns[-4], nkerns[-3], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#4
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-4], 12, 12),
filter_shape=(nkerns[-5], nkerns[-4], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#5 stochastic layer
# for the last layer, the nonliearity should be sigmoid to achieve mean of Bernoulli
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-5], 12, 12),
filter_shape=(1, nkerns[-5], 5, 5),
poolsize=(2, 2),
border_mode='full',
activation=nonlinearity.sigmoid
))
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
gene_layer.append(NoParamsBernoulliVisiable.NoParamsBernoulliVisiable(
#rng=rng,
#mean=z_output[-1],
#data=input_x,
))
logpx = gene_layer[-1].logpx(mean=z_output[-1], data=input_x)
# 4-D tensor of random generation
random_x_mean = random_z_output[-1]
random_x = gene_layer[-1].sample_x(rng_share, random_x_mean)
x_denoised = z_output[-1].flatten(2)
x_denoised = p_label*x+(1-p_label)*x_denoised
mse = ((x - x_denoised)**2).sum() / pertub_number
params=[]
for g in gene_layer:
params+=g.params
for r in recg_layer:
params+=r.params
train_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x_pertub: train_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
valid_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x_pertub: valid_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
test_activations = theano.function(
inputs=[x_pertub],
outputs=T.concatenate(activations, axis=1),
givens={
drop: np.cast['int32'](0)
}
)
imputation_model = theano.function(
inputs=[index, x_pertub],
outputs=[x_denoised, mse],
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
p_label:pertub_label[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
drop_inverse: np.cast['int32'](0)
}
)
##################
# Pretrain MODEL #
##################
model_epoch = 600
if os.environ.has_key('model_epoch'):
model_epoch = int(os.environ['model_epoch'])
if predir is not None:
color.printBlue('... setting parameters')
color.printBlue(predir)
if model_epoch == -1:
pre_train = np.load(predir+'best-model.npz')
else:
pre_train = np.load(predir+'model-'+str(model_epoch)+'.npz')
pre_train = pre_train['model']
if ctype == 'cva':
for (para, pre) in zip(params, pre_train):
para.set_value(pre)
elif ctype == 'cmmva':
for (para, pre) in zip(params, pre_train[:-2]):
para.set_value(pre)
else:
exit()
else:
exit()
###############
# TRAIN MODEL #
###############
print '... training'
epoch = 0
n_visualization = 100
output = np.ones((n_visualization, visualization_times+2, 784))
output[:,0,:] = test_set_x.get_value()[:n_visualization,:]
output[:,1,:] = test_set_x_pertub.get_value()[:n_visualization,:]
image = paramgraphics.mat_to_img(output[:,0,:].T, dim_input, colorImg=colorImg)
image.save(logdir+'data.png', 'PNG')
image = paramgraphics.mat_to_img(output[:,1,:].T, dim_input, colorImg=colorImg)
image.save(logdir+'data_pertub.png', 'PNG')
tmp = test_set_x_pertub.get_value()
while epoch < denoise_times:
epoch = epoch + 1
this_mse=0
for i in xrange(n_test_batches):
d, m = imputation_model(i, tmp[i * batch_size: (i + 1) * batch_size])
tmp[i * batch_size: (i + 1) * batch_size] = np.asarray(d)
this_mse+=m
if epoch<=visualization_times:
output[:,epoch+1,:] = tmp[:n_visualization,:]
print epoch, this_mse
with open(logdir+'hook.txt', 'a') as f:
print >>f, epoch, this_mse
image = paramgraphics.mat_to_img(tmp[:n_visualization,:].T, dim_input, colorImg=colorImg)
image.save(logdir+'procedure-'+str(epoch)+'.png', 'PNG')
np.savez(logdir+'procedure-'+str(epoch), tmp=tmp)
image = paramgraphics.mat_to_img((output.reshape(-1,784)).T, dim_input, colorImg=colorImg, tile_shape=(n_visualization,22))
image.save(logdir+'output.png', 'PNG')
np.savez(logdir+'output', output=output)
# save original train features and denoise test features
for i in xrange(n_train_batches):
if i == 0:
train_features = np.asarray(train_activations(i))
else:
train_features = np.vstack((train_features, np.asarray(train_activations(i))))
for i in xrange(n_valid_batches):
if i == 0:
valid_features = np.asarray(valid_activations(i))
else:
valid_features = np.vstack((valid_features, np.asarray(valid_activations(i))))
for i in xrange(n_test_batches):
if i == 0:
test_features = np.asarray(test_activations(tmp[i * batch_size: (i + 1) * batch_size]))
else:
test_features = np.vstack((test_features, np.asarray(test_activations(tmp[i * batch_size: (i + 1) * batch_size]))))
np.save(logdir+'train_features', train_features)
np.save(logdir+'valid_features', valid_features)
np.save(logdir+'test_features', test_features)
if __name__ == '__main__':
ctype = sys.argv[1]
pertub_type = int(sys.argv[2])
pertub_prob = float(sys.argv[3])
pertub_prob1 = float(sys.argv[4])
denoise_times = int(sys.argv[5])
predir = sys.argv[6]
c_6layer_mnist_imputation(ctype=ctype, denoise_times=denoise_times,
pertub_type=pertub_type, pertub_prob=pertub_prob, pertub_prob1=pertub_prob1, predir=predir) |
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
import datetime
import random
import time
from neutron_lib import constants
from neutron_lib import context as ncontext
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import timeutils
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron._i18n import _
from neutron.agent.common import utils as agent_utils
from neutron.common import constants as n_const
from neutron.common import utils
from neutron.db import agents_db
from neutron.db.availability_zone import network as network_az
from neutron.db.models import agent as agent_model
from neutron.db import models_v2
from neutron.db.network_dhcp_agent_binding import models as ndab_model
from neutron.extensions import agent as ext_agent
from neutron.extensions import dhcpagentscheduler
from neutron.extensions import wrs_net
from neutron import worker as neutron_worker
LOG = logging.getLogger(__name__)
AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('network_scheduler_driver',
default='neutron.scheduler.'
'dhcp_agent_scheduler.WeightScheduler',
help=_('Driver to use for scheduling network to DHCP agent')),
cfg.IntOpt('network_reschedule_threshold',
default=1,
help=_('Threshold that when current network distribution has '
'one DHCP agent with this many more networks than '
'another DHCP agent, then rescheduling is needed')),
cfg.BoolOpt('network_auto_schedule', default=True,
help=_('Allow auto scheduling networks to DHCP agent.')),
cfg.BoolOpt('allow_automatic_dhcp_failover', default=True,
help=_('Automatically remove networks from offline DHCP '
'agents.')),
cfg.IntOpt('dhcp_agents_per_network', default=1,
help=_('Number of DHCP agents scheduled to host a tenant '
'network. If this number is greater than 1, the '
'scheduler automatically assigns multiple DHCP agents '
'for a given tenant network, providing high '
'availability for DHCP service.')),
cfg.BoolOpt('enable_services_on_agents_with_admin_state_down',
default=False,
help=_('Enable services on an agent with admin_state_up '
'False. If this option is False, when admin_state_up '
'of an agent is turned False, services on it will be '
'disabled. Agents with admin_state_up False are not '
'selected for automatic scheduling regardless of this '
'option. But manual scheduling to such agents is '
'available if this option is True.')),
]
cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS)
class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
"""Common class for agent scheduler mixins."""
# agent notifiers to handle agent update operations;
# should be updated by plugins;
agent_notifiers = {
constants.AGENT_TYPE_DHCP: None,
constants.AGENT_TYPE_L3: None,
constants.AGENT_TYPE_LOADBALANCER: None,
}
@staticmethod
def is_eligible_agent(active, agent):
if active is None:
# filtering by activeness is disabled, all agents are eligible
return True
else:
# note(rpodolyaka): original behaviour is saved here: if active
# filter is set, only agents which are 'up'
# (i.e. have a recent heartbeat timestamp)
# are eligible, even if active is False
return not agent_utils.is_agent_down(
agent['heartbeat_timestamp'])
def update_agent(self, context, id, agent):
original_agent = self.get_agent(context, id)
result = super(AgentSchedulerDbMixin, self).update_agent(
context, id, agent)
agent_data = agent['agent']
agent_notifier = self.agent_notifiers.get(original_agent['agent_type'])
if (agent_notifier and
'admin_state_up' in agent_data and
original_agent['admin_state_up'] != agent_data['admin_state_up']):
agent_notifier.agent_updated(context,
agent_data['admin_state_up'],
original_agent['host'])
return result
def add_agent_status_check_worker(self, function):
# TODO(enikanorov): make interval configurable rather than computed
interval = max(cfg.CONF.agent_down_time // 2, 1)
# add random initial delay to allow agents to check in after the
# neutron server first starts. random to offset multiple servers
initial_delay = random.randint(interval, interval * 2)
check_worker = neutron_worker.PeriodicWorker(function, interval,
initial_delay)
self.add_worker(check_worker)
def agent_dead_limit_seconds(self):
return cfg.CONF.agent_down_time * 2
def wait_down_agents(self, agent_type, agent_dead_limit):
"""Gives chance for agents to send a heartbeat."""
# check for an abrupt clock change since last check. if a change is
# detected, sleep for a while to let the agents check in.
tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
timeutils.utcnow())
if tdelta.total_seconds() > cfg.CONF.agent_down_time:
LOG.warning("Time since last %s agent reschedule check has "
"exceeded the interval between checks. Waiting "
"before check to allow agents to send a heartbeat "
"in case there was a clock adjustment.",
agent_type)
time.sleep(agent_dead_limit)
self._clock_jump_canary = timeutils.utcnow()
def get_cutoff_time(self, agent_dead_limit):
cutoff = timeutils.utcnow() - datetime.timedelta(
seconds=agent_dead_limit)
return cutoff
def reschedule_resources_from_down_agents(self, agent_type,
get_down_bindings,
agent_id_attr,
resource_id_attr,
resource_name,
reschedule_resource,
rescheduling_failed):
"""Reschedule resources from down neutron agents
if admin state is up.
"""
agent_dead_limit = self.agent_dead_limit_seconds()
self.wait_down_agents(agent_type, agent_dead_limit)
context = ncontext.get_admin_context()
try:
down_bindings = get_down_bindings(context, agent_dead_limit)
agents_back_online = set()
for binding in down_bindings:
binding_agent_id = getattr(binding, agent_id_attr)
binding_resource_id = getattr(binding, resource_id_attr)
if binding_agent_id in agents_back_online:
continue
else:
# we need new context to make sure we use different DB
# transaction - otherwise we may fetch same agent record
# each time due to REPEATABLE_READ isolation level
context = ncontext.get_admin_context()
agent = self._get_agent(context, binding_agent_id)
if agent.is_active:
agents_back_online.add(binding_agent_id)
continue
LOG.warning(
"Rescheduling %(resource_name)s %(resource)s from agent "
"%(agent)s because the agent did not report to the server "
"in the last %(dead_time)s seconds.",
{'resource_name': resource_name,
'resource': binding_resource_id,
'agent': binding_agent_id,
'dead_time': agent_dead_limit})
try:
reschedule_resource(context, binding_resource_id)
except (rescheduling_failed, oslo_messaging.RemoteError):
# Catch individual rescheduling errors here
# so one broken one doesn't stop the iteration.
LOG.exception("Failed to reschedule %(resource_name)s "
"%(resource)s",
{'resource_name': resource_name,
'resource': binding_resource_id})
except Exception:
# we want to be thorough and catch whatever is raised
# to avoid loop abortion
LOG.exception("Exception encountered during %(resource_name)s "
"rescheduling.",
{'resource_name': resource_name})
class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
.DhcpAgentSchedulerPluginBase,
AgentSchedulerDbMixin):
"""Mixin class to add DHCP agent scheduler extension to db_base_plugin_v2.
"""
network_scheduler = None
def add_periodic_dhcp_agent_status_check(self):
if not cfg.CONF.allow_automatic_dhcp_failover:
LOG.info("Skipping periodic DHCP agent status check because "
"automatic network rescheduling is disabled.")
return
self.add_agent_status_check_worker(
self.remove_networks_from_down_agents
)
def is_eligible_agent(self, context, active, agent):
# eligible agent is active or starting up
return (AgentSchedulerDbMixin.is_eligible_agent(active, agent) or
self.agent_starting_up(context, agent))
def agent_starting_up(self, context, agent):
"""Check if agent was just started.
Method returns True if agent is in its 'starting up' period.
Return value depends on amount of networks assigned to the agent.
It doesn't look at latest heartbeat timestamp as it is assumed
that this method is called for agents that are considered dead.
"""
agent_dead_limit = datetime.timedelta(
seconds=self.agent_dead_limit_seconds())
network_count = (context.session.query(ndab_model.
NetworkDhcpAgentBinding).
filter_by(dhcp_agent_id=agent['id']).count())
# amount of networks assigned to agent affect amount of time we give
# it so startup. Tests show that it's more or less sage to assume
# that DHCP agent processes each network in less than 2 seconds.
# So, give it this additional time for each of the networks.
additional_time = datetime.timedelta(seconds=2 * network_count)
LOG.debug("Checking if agent starts up and giving it additional %s",
additional_time)
agent_expected_up = (agent['started_at'] + agent_dead_limit +
additional_time)
return agent_expected_up > timeutils.utcnow()
def _schedule_network(self, context, network_id, dhcp_notifier):
LOG.info("Scheduling unhosted network %s", network_id)
try:
# TODO(enikanorov): have to issue redundant db query
# to satisfy scheduling interface
network = self.get_network(context, network_id)
agents = self.schedule_network(context, network)
if not agents:
LOG.info("Failed to schedule network %s, "
"no eligible agents or it might be "
"already scheduled by another server",
network_id)
return
if not dhcp_notifier:
return
for agent in agents:
LOG.info("Adding network %(net)s to agent "
"%(agent)s on host %(host)s",
{'net': network_id,
'agent': agent.id,
'host': agent.host})
dhcp_notifier.network_added_to_agent(
context, network_id, agent.host)
except Exception:
# catching any exception during scheduling
# so if _schedule_network is invoked in the loop it could
# continue in any case
LOG.exception("Failed to schedule network %s", network_id)
def _filter_bindings(self, context, bindings):
"""Skip bindings for which the agent is dead, but starting up."""
# to save few db calls: store already checked agents in dict
# id -> is_agent_starting_up
checked_agents = {}
for binding in bindings:
try:
agent_id = binding.dhcp_agent['id']
if agent_id not in checked_agents:
if self.agent_starting_up(context, binding.dhcp_agent):
# When agent starts and it has many networks to process
# it may fail to send state reports in defined interval
# The server will consider it dead and try to remove
# networks from it.
checked_agents[agent_id] = True
LOG.debug("Agent %s is starting up, skipping",
agent_id)
else:
checked_agents[agent_id] = False
if not checked_agents[agent_id]:
yield binding
except exc.ObjectDeletedError:
# we're not within a transaction, so object can be lost
# because underlying row is removed, just ignore this issue
LOG.debug("binding was removed concurrently, skipping it")
def remove_networks_from_down_agents(self):
"""Remove networks from down DHCP agents if admin state is up.
Reschedule them if configured so.
"""
agent_dead_limit = self.agent_dead_limit_seconds()
self.wait_down_agents('DHCP', agent_dead_limit)
cutoff = self.get_cutoff_time(agent_dead_limit)
context = ncontext.get_admin_context()
try:
down_bindings = (
context.session.query(ndab_model.NetworkDhcpAgentBinding).
join(agent_model.Agent).
filter(agent_model.Agent.heartbeat_timestamp < cutoff,
agent_model.Agent.admin_state_up))
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
dead_bindings = [b for b in
self._filter_bindings(context, down_bindings)]
agents = self.get_agents_db(
context, {'agent_type': [constants.AGENT_TYPE_DHCP]})
if not agents:
# No agents configured so nothing to do.
return
active_agents = [agent for agent in agents if
self.is_eligible_agent(context, True, agent)]
if not active_agents:
LOG.warning("No DHCP agents available, "
"skipping rescheduling")
return
for binding in dead_bindings:
LOG.warning("Removing network %(network)s from agent "
"%(agent)s because the agent did not report "
"to the server in the last %(dead_time)s "
"seconds.",
{'network': binding.network_id,
'agent': binding.dhcp_agent_id,
'dead_time': agent_dead_limit})
# save binding object to avoid ObjectDeletedError
# in case binding is concurrently deleted from the DB
saved_binding = {'net': binding.network_id,
'agent': binding.dhcp_agent_id}
try:
# do not notify agent if it considered dead
# so when it is restarted it won't see network delete
# notifications on its queue
self.remove_network_from_dhcp_agent(context,
binding.dhcp_agent_id,
binding.network_id,
notify=False)
except dhcpagentscheduler.NetworkNotHostedByDhcpAgent:
# measures against concurrent operation
LOG.debug("Network %(net)s already removed from DHCP "
"agent %(agent)s",
saved_binding)
# still continue and allow concurrent scheduling attempt
except Exception:
LOG.exception("Unexpected exception occurred while "
"removing network %(net)s from agent "
"%(agent)s",
saved_binding)
if cfg.CONF.network_auto_schedule:
self._schedule_network(
context, saved_binding['net'], dhcp_notifier)
except Exception:
# we want to be thorough and catch whatever is raised
# to avoid loop abortion
LOG.exception("Exception encountered during network "
"rescheduling")
def get_dhcp_agents_hosting_networks(
self, context, network_ids, active=None, admin_state_up=None,
hosts=None):
if not network_ids:
return []
query = context.session.query(ndab_model.NetworkDhcpAgentBinding)
query = query.options(orm.contains_eager(
ndab_model.NetworkDhcpAgentBinding.dhcp_agent))
query = query.join(ndab_model.NetworkDhcpAgentBinding.dhcp_agent)
if network_ids:
query = query.filter(
ndab_model.NetworkDhcpAgentBinding.network_id.in_(network_ids))
if hosts:
query = query.filter(agent_model.Agent.host.in_(hosts))
if admin_state_up is not None:
query = query.filter(agent_model.Agent.admin_state_up ==
admin_state_up)
return [binding.dhcp_agent
for binding in query
if self.is_eligible_agent(context, active,
binding.dhcp_agent)]
def add_network_to_dhcp_agent(self, context, id, network_id):
self._get_network(context, network_id)
with context.session.begin(subtransactions=True):
agent_db = self._get_agent(context, id)
if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or
not services_available(agent_db['admin_state_up'])):
raise dhcpagentscheduler.InvalidDHCPAgent(id=id)
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
for dhcp_agent in dhcp_agents:
if id == dhcp_agent.id:
raise dhcpagentscheduler.NetworkHostedByDHCPAgent(
network_id=network_id, agent_id=id)
binding = ndab_model.NetworkDhcpAgentBinding()
binding.dhcp_agent_id = id
binding.network_id = network_id
context.session.add(binding)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_added_to_agent(
context, network_id, agent_db.host)
def remove_network_from_dhcp_agent(self, context, id, network_id,
notify=True):
agent = self._get_agent(context, id)
try:
query = context.session.query(ndab_model.NetworkDhcpAgentBinding)
binding = query.filter(
ndab_model.NetworkDhcpAgentBinding.network_id == network_id,
ndab_model.NetworkDhcpAgentBinding.dhcp_agent_id == id).one()
except exc.NoResultFound:
raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent(
network_id=network_id, agent_id=id)
# reserve the port, so the ip is reused on a subsequent add
device_id = utils.get_dhcp_agent_device_id(network_id,
agent['host'])
filters = dict(network_id=[network_id],
device_owner=[constants.DEVICE_OWNER_DHCP])
ports = self.get_ports(context, filters=filters)
# NOTE(kevinbenton): there should only ever be one port per
# DHCP agent per network so we don't have to worry about one
# update_port passing and another failing
for port in ports:
if port['device_id'].startswith(device_id):
port['device_id'] = n_const.DEVICE_ID_RESERVED_DHCP_PORT
try:
self.update_port(context, port['id'], dict(port=port))
except n_exc.PortNotFound:
LOG.warning("Port %(port)s deleted concurrently "
"by agent",
{'port': port['id']})
with context.session.begin():
context.session.delete(binding)
LOG.warning("Unbinding network %(network)s from agent %(agent)s",
{'network': network_id, 'agent': id})
if not notify:
return
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_removed_from_agent(
context, network_id, agent['host'])
def list_networks_on_dhcp_agent(self, context, id):
query = context.session.query(
ndab_model.NetworkDhcpAgentBinding.network_id)
query = query.filter(
ndab_model.NetworkDhcpAgentBinding.dhcp_agent_id == id)
net_ids = [item[0] for item in query]
if net_ids:
return {'networks':
self.get_networks(context, filters={'id': net_ids})}
else:
# Exception will be thrown if the requested agent does not exist.
self._get_agent(context, id)
return {'networks': []}
def list_active_networks_on_active_dhcp_agent(self, context, host):
try:
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_DHCP, host)
except ext_agent.AgentNotFoundByTypeHost:
LOG.debug("DHCP Agent not found on host %s", host)
return []
if not services_available(agent.admin_state_up):
return []
query = context.session.query(
ndab_model.NetworkDhcpAgentBinding.network_id)
query = query.filter(
ndab_model.NetworkDhcpAgentBinding.dhcp_agent_id == agent.id)
net_ids = [item[0] for item in query]
if net_ids:
return self.get_networks(
context,
filters={'id': net_ids, 'admin_state_up': [True]}
)
else:
return []
def list_dhcp_agents_hosting_network(self, context, network_id):
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents]
if agent_ids:
return {
'agents': self.get_agents(context, filters={'id': agent_ids})}
else:
return {'agents': []}
@utils.synchronized('schedule-networks', external=True)
def schedule_network(self, context, created_network):
if self.network_scheduler:
return self.network_scheduler.schedule(
self, context, created_network)
@utils.synchronized('auto-schedule-networks', external=True)
def auto_schedule_networks(self, context, host):
if self.network_scheduler:
self.network_scheduler.auto_schedule_networks(self, context, host)
def _relocate_network(self, context, agent_id, network):
LOG.debug("relocating network {}".format(network['id']))
try:
self.remove_network_from_dhcp_agent(context,
agent_id,
network['id'])
except dhcpagentscheduler.NetworkNotHostedByDhcpAgent:
# measures against concurrent operation
LOG.warning("Network %(net)s already removed from DHCP "
"agent %(agent)s",
{"net": network['id'],
"agent": agent_id})
return []
agents = self.schedule_network(context, network)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if not agents:
LOG.warning(("Relocation of network {} has failed").format(
network['id']))
return []
elif dhcp_notifier:
for agent in agents:
dhcp_notifier.network_added_to_agent(
context, network['id'], agent['host'])
return agents
def relocate_networks(self, context, agent):
"""Remove networks from given agent and attempt to reschedule to a
different agent. This function assumes that it whatever condition led
to needing to relocate the networks away from the agent will also
prevent it from rescheduling to that same agent; therefore all
agent/host state changes must be persisted to the database before
invoking this function.
"""
agent_id = agent['id']
result = self.list_networks_on_dhcp_agent(context, agent_id)
networks = result.get('networks')
device_id = utils.get_dhcp_agent_device_id("%", agent['host'])
with context.session.begin():
# Reserve all the DHCP ports for networks on this agent,
# so that the ips are reused on subsequent adds.
query = context.session.query(models_v2.Port)
query = query.filter(
models_v2.Port.device_id.like(device_id)
)
query.update({'device_id': n_const.DEVICE_ID_RESERVED_DHCP_PORT},
synchronize_session=False)
# Delete all the dhcp network bindings for this agent.
query = context.session.query(ndab_model.NetworkDhcpAgentBinding)
query = query.filter(
ndab_model.NetworkDhcpAgentBinding.dhcp_agent_id == agent_id
)
query.delete(synchronize_session=False)
# Iterate through networks on the agent, notifying the guest that each
# network is removed, and then reschedule the network to a new agent.
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
for network in networks:
network_id = network['id']
if dhcp_notifier:
dhcp_notifier.network_removed_from_agent(
context, network_id, agent['host'])
new_agents = self.schedule_network(context, network)
if not new_agents:
LOG.warning(("Relocation of network {} has failed").format(
network_id))
continue
elif dhcp_notifier:
for new_agent in new_agents:
dhcp_notifier.network_added_to_agent(
context, network_id, new_agent['host'])
def _can_dhcp_agent_host_network(self, context, agent, network_id):
"""Return true if the agent specified can host the network.
:returns: True if given DHCP agent can host the given network id
"""
if not self.is_host_available(context, agent['host']):
return False
candidate_hosts = self.filter_hosts_with_network_access(
context, network_id, [agent['host']])
return bool(candidate_hosts)
def _count_net_vlan_segments(self, networks):
count = 0
for network in networks:
count += network['dhcp_vlan_segments']
return count
def redistribute_networks(self, context,
_meets_network_rescheduling_threshold):
"""Redistribute to a more optimal network distribution"""
# Don't reschedule if more than one DHCP agent per DHCP server
if cfg.CONF.dhcp_agents_per_network > 1:
LOG.warning("DHCP agent redistribution disabled because "
"dhcp_agents_per_network is greater than 1")
return
start_time = time.time()
filters = {'agent_type': [constants.AGENT_TYPE_DHCP]}
agents = self.get_agents(context, filters)
networks_on_agents = []
rescheduled_networks = []
network_vlans = {}
# Count vlan segments on networks
subnet_filters = {"enable_dhcp": [True]}
for subnet in self.get_subnets(context, filters=subnet_filters):
network_id = subnet['network_id']
vlan_id = subnet.get(wrs_net.VLAN, 0)
if network_id not in network_vlans:
network_vlans[network_id] = set()
network_vlans[network_id].add(vlan_id)
# Create a list of tuples (agent_id, [network_id_0, ..., network_id_n])
for agent in agents:
result = self.list_networks_on_dhcp_agent(context, agent['id'])
for network in result['networks']:
network_id = network['id']
vlan_segments = len(network_vlans.get(network_id, []))
network['dhcp_vlan_segments'] = vlan_segments
networks_on_agents.append((agent, result))
db_completion_time = time.time()
found_match = None
# Loop through agents to try redistributing on all but the last agent
while len(networks_on_agents) > 1 or found_match:
# Sort by number of networks during first run,
# and re-sort the list if any networks are relocated
networks_on_agents.sort(
key=(lambda x: self._count_net_vlan_segments(x[1]['networks']))
)
# If arriving here either during first run, or after going through
# without any relocations, then pop the agent with most networks,
# and try redistributing its networks.
if not found_match:
busiest_agent_networks = networks_on_agents.pop()
found_match = None
networks_on_busiest_agent = self._count_net_vlan_segments(
busiest_agent_networks[1]['networks']
)
# Iterate through list of DHCP agents sorted in ascending order
# by the number of networks they are hosting
for agent_networks in networks_on_agents:
minimum_networks = self._count_net_vlan_segments(
agent_networks[1]['networks']
)
# Stop trying to reschedule from the busiest agent, if there is
# no possibility to reschedule to the agent of the current
# iteration. Because the list of agents is sorted by number
# of networks, if the agent of the current iteration doesn't
# meet the rescheduling threshold from the busiest agent, then
# no agent will.
if not _meets_network_rescheduling_threshold(
networks_on_busiest_agent, minimum_networks):
break
# Sort based on number of vlan segments, so that network with
# most vlan segments is rescheduled first.
busiest_agent_networks[1]['networks'].sort(
key=(lambda x: self._count_net_vlan_segments([x])),
reverse=True
)
# Loop through networks on busiest agent, and see if it can be
# rescheduled to the agent of the current iteration. This is
# only to check if rescheduling is possible; if it can be
# rescheduled, then it will still use the default scheduler to
# schedule it.
for network in busiest_agent_networks[1]['networks']:
# Reschedule network at most once per run. This will
# minimize the downtime of the network's DHCP service,
# as well as linearly bound the number of relocations.
if network['id'] in rescheduled_networks:
continue
# In the case of multiple vlans on network, check that it
# still meets the rescheduling threshold
vlan_subnets = self._count_net_vlan_segments([network])
if not _meets_network_rescheduling_threshold(
networks_on_busiest_agent - vlan_subnets + 1,
minimum_networks):
continue
if self._can_dhcp_agent_host_network(context,
agent_networks[0],
network['id']):
rescheduled_networks.append(network['id'])
new_agents = self._relocate_network(
context, busiest_agent_networks[0]['id'], network
)
for new_agent in new_agents:
for networks_on_agent in networks_on_agents:
agent = networks_on_agent[0]
if agent['host'] == new_agent['host']:
networks_on_agent[1]['networks'].append(
network
)
found_match = network
break
if found_match:
busiest_agent_networks[1]['networks'].remove(found_match)
break
end_time = time.time()
LOG.warning("redistribute_networks took %(total_time)d seconds to "
"relocate %(count)d networks including "
"%(db_access_time)d seconds accessing DB",
{'total_time': (end_time - start_time),
'count': len(rescheduled_networks),
'db_access_time': (db_completion_time - start_time)})
class AZDhcpAgentSchedulerDbMixin(DhcpAgentSchedulerDbMixin,
network_az.NetworkAvailabilityZoneMixin):
"""Mixin class to add availability_zone supported DHCP agent scheduler."""
def get_network_availability_zones(self, network):
zones = {agent.availability_zone for agent in network.dhcp_agents}
return list(zones)
# helper functions for readability.
def services_available(admin_state_up):
if cfg.CONF.enable_services_on_agents_with_admin_state_down:
# Services are available regardless admin_state_up
return True
return admin_state_up
def get_admin_state_up_filter():
if cfg.CONF.enable_services_on_agents_with_admin_state_down:
# Avoid filtering on admin_state_up at all
return None
# Filters on admin_state_up is True
return True
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/103_FCN.ipynb (unless otherwise specified).
__all__ = ['FCN']
# Cell
from ..imports import *
from .layers import *
# Cell
class FCN(Module):
def __init__(self, c_in, c_out, layers=[128, 256, 128], kss=[7, 5, 3]):
self.conv1 = Conv1d(c_in, layers[0], kss[0], padding='same', act_fn='relu')
self.conv2 = Conv1d(layers[0], layers[1], kss[1], padding='same', act_fn='relu')
self.conv3 = Conv1d(layers[1], layers[2], kss[2], padding='same', act_fn='relu')
self.gap = nn.AdaptiveAvgPool1d(1)
self.squeeze = Squeeze(-1)
self.fc = nn.Linear(layers[-1], c_out)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.squeeze(self.gap(x))
return self.fc(x) |
# This script generates an STL file with an engraved ring.
# To use the script the following fonts are included:
# http://www.dafont.com/pacifico.font
# http://www.dafont.com/hearts-for-3d-fx.font
# Run the script as a parameter to the FreeCAD version 0.15
#
# The MIT License (MIT)
#
# Copyright (c) 2016
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import os
import sys
import Draft
import Mesh
def sides(width, radius, thickness):
cathetus_top = (thickness / 2.0) + radius
cathetus_side = width / 2.0
hypotenuse = math.sqrt(math.pow(cathetus_side, 2) + math.pow(cathetus_top, 2))
return (cathetus_top, cathetus_side, hypotenuse)
def remove(thing):
doc = App.activeDocument()
doc.removeObject(thing.Label)
def clear():
doc = App.activeDocument()
for thing in doc.Objects:
remove(thing)
def find(name):
return App.activeDocument().getObject(name)
def rename(thing, name):
thing.ObjectName = name
thing.Label = name
def union(shapes):
doc = App.activeDocument()
thing = doc.addObject('Part::MultiFuse')
thing.Shapes = shapes
doc.recompute()
return thing
def intersection(shapes):
doc = App.activeDocument()
thing = doc.addObject('Part::MultiCommon')
thing.Shapes = shapes
doc.recompute()
return thing
def subtraction(thing1, thing2):
doc = App.activeDocument()
thing = doc.addObject('Part::Cut')
thing.Base = thing1
thing.Tool = thing2
doc.recompute()
return thing
def sphere(radius):
doc = App.activeDocument()
thing = doc.addObject('Part::Sphere')
thing.Radius = radius
doc.recompute()
return thing
def cylinder(radius, height):
doc = App.activeDocument()
translation = App.Vector(0, height / 2.0, 0)
rotation = App.Rotation(App.Vector(1, 0, 0), 90)
center = App.Vector(0, 0, 0)
thing = doc.addObject('Part::Cylinder')
thing.Radius = radius
thing.Height = height
thing.Placement = App.Placement(translation, rotation, center)
doc.recompute()
return thing
def torus(radius, thickness, offset):
doc = App.activeDocument()
thickness = thickness / 2.0
translation = App.Vector(0, offset, 0)
rotation = App.Rotation(App.Vector(1, 0, 0), 90)
center = App.Vector(0, 0, 0)
thing = doc.addObject('Part::Torus')
thing.Radius1 = radius
thing.Radius2 = thickness
thing.Placement = App.Placement(translation, rotation, center)
doc.recompute()
return thing
def hollowed(outer, inner):
outer = sphere(outer)
inner = sphere(inner)
return subtraction(outer, inner)
def strutted(radius, thickness, clearance):
radius = radius + (thickness / 2.0)
hi = torus(radius, thickness, clearance / 2.0)
lo = torus(radius, thickness, -clearance / 2.0)
return union([hi, lo])
def ringed(width, radius, thickness):
(cathetus_top, cathetus_side, hypotenuse) = sides(width, radius, thickness)
angle = math.acos(cathetus_side / hypotenuse)
outer = hypotenuse + thickness / 2.0
inner = hypotenuse - thickness / 2.0
hollow = hollowed(outer, inner)
trim = cylinder(2.0 * outer, width)
return intersection([hollow, trim])
def texted(text, font, size, offset, scale, thickness):
doc = App.activeDocument()
translation = App.Vector(0, offset, -thickness / 2.0)
rotation = App.Rotation(App.Vector(0, 0, 1), 0)
center = App.Vector(0, 0, 0)
flat = Draft.makeShapeString(String=text, FontFile=font, Size=size)
flat.Placement = App.Placement(translation, rotation, center)
extruded = doc.addObject('Part::Extrusion')
extruded.Base = flat
extruded.Dir = (0, 0, thickness)
extruded.Solid = (True)
scaled = Draft.clone(extruded)
scaled.Scale = (scale, 1.00, 1.00)
doc.recompute()
remove(flat)
remove(extruded)
translation = App.Vector(-scaled.Shape.BoundBox.XLength / 2.0, 0, 0)
scaled.Placement = App.Placement(translation, rotation, center)
doc.recompute()
return scaled
def wrapped(text, font, size, offset, scale, thickness, radius, angle):
letters = []
circumference = 2.0 * math.pi * radius
factor = circumference / 360.0
for letter in text:
if ' ' == letter:
delta = 2.0 * size
else:
letter = texted(letter, font, size, offset, scale, thickness)
delta = 0.8 * letter.Shape.BoundBox.XLength / factor
rotation = App.Placement(App.Vector(0, 0, 0), App.Rotation(App.Vector(0, 1, 0), angle))
translation = App.Placement(App.Vector(0, 0, radius), App.Rotation())
letter.Placement = rotation.multiply(translation.multiply(letter.Placement))
letters.append(letter)
angle += delta
return letters
def export(thing, detail, stl):
doc = App.activeDocument()
mesh = doc.addObject('Mesh::Feature')
mesh.Mesh = Mesh.Mesh(thing.Shape.tessellate(detail))
Mesh.export([mesh], stl)
def go(engraving, radius, thickness, clearance, angle):
path = os.path.dirname(sys.argv[1])
pacifico = path + '/Pacifico.ttf'
hearts = path + '/hearts.ttf'
RING = 'Ring'
App.newDocument(RING)
App.setActiveDocument(RING)
App.ActiveDocument = App.getDocument(RING)
Gui.ActiveDocument = Gui.getDocument(RING)
stamp = []
stamp.extend(wrapped(engraving, pacifico, 6.2, -1.2, 1.7, 3 * thickness, radius, angle))
stamp.extend(wrapped('&', pacifico, 3.9, -1.2, 1.7, 3 * thickness, radius, 180))
stamp.extend(wrapped('l', hearts, 0.9, -1, 1.2, 3 * thickness, radius, +150))
stamp.extend(wrapped('l', hearts, 0.9, -1, 1.2, 3 * thickness, radius, -150))
stamp = union(stamp)
blank = ringed(clearance, radius + 0.1, thickness - 0.2)
engraving = intersection([blank, stamp])
struts = strutted(radius, thickness, clearance)
ring = union([struts, engraving])
export(ring, 0.01, path + '/ring.stl')
go('love', 8.5, 1.0, 2.8, -35.0)
|
from pathlib import Path
from typing import Callable, Dict, Any
import pandas as pd
from rich.console import Console
from .._job_utils import (
create_alignment_job_directory_structure,
write_single_tilt_series_alignment_output
)
from ... import utils
def align_single_tilt_series(
tilt_series_id: str,
tilt_series_df: pd.DataFrame,
tilt_image_df: pd.DataFrame,
alignment_function: Callable,
alignment_function_kwargs: Dict[str, Any],
output_directory: Path,
):
"""Align a single tilt-series in IMOD using RELION tilt-series metadata.
Parameters
----------
tilt_series_id: 'rlnTomoName' in RELION tilt-series metadata.
tilt_series_df: master file for tilt-series metadata.
tilt_image_df: file containing information for images in a single tilt-series.
alignment_function: alignment function from yet_another_imod_wrapper.
alignment_function_kwargs: keyword arguments specific to the alignment function.
output_directory: directory in which results will be stored.
"""
console = Console(record=True)
# Create output directory structure
stack_directory, external_directory, metadata_directory = \
create_alignment_job_directory_structure(output_directory)
imod_directory = external_directory / tilt_series_id
imod_directory.mkdir(parents=True, exist_ok=True)
# Establish filenames
tilt_series_filename = f'{tilt_series_id}.mrc'
tilt_image_metadata_filename = f'{tilt_series_id}.star'
# Order is important in IMOD, sort by tilt angle
tilt_image_df = tilt_image_df.sort_values(by='rlnTomoNominalStageTiltAngle', ascending=True)
# Create tilt-series stack and align using IMOD
# implicit assumption - one tilt-axis angle per tilt-series
console.log('Creating tilt series stack')
utils.image.stack_image_files(
image_files=tilt_image_df['rlnMicrographName'],
output_image_file=stack_directory / tilt_series_filename,
)
console.log('Running IMOD alignment')
alignment_function(
tilt_series_file=stack_directory / tilt_series_filename,
tilt_angles=tilt_image_df['rlnTomoNominalStageTiltAngle'],
pixel_size=tilt_series_df['rlnTomoTiltSeriesPixelSize'],
nominal_rotation_angle=tilt_image_df['rlnTomoNominalTiltAxisAngle'][0],
output_directory=imod_directory,
**alignment_function_kwargs,
)
console.log('Writing STAR file for aligned tilt-series')
output_xf_file = imod_directory / f'{tilt_series_id}.xf'
if output_xf_file.exists():
write_single_tilt_series_alignment_output(
tilt_image_df=tilt_image_df,
tilt_series_id=tilt_series_id,
pixel_size=tilt_series_df['rlnTomoTiltSeriesPixelSize'],
alignment_directory=imod_directory,
output_star_file=metadata_directory / tilt_image_metadata_filename,
)
|
import numpy as np
import flox
from . import parameterized
N = 1000
class Combine:
def setup(self, *args, **kwargs):
raise NotImplementedError
@parameterized("kind", ("cohorts", "mapreduce"))
def time_combine(self, kind):
flox.core._npg_combine(
getattr(self, f"x_chunk_{kind}"),
**self.kwargs,
keepdims=True,
engine="numpy",
)
@parameterized("kind", ("cohorts", "mapreduce"))
def peakmem_combine(self, kind):
flox.core._npg_combine(
getattr(self, f"x_chunk_{kind}"),
**self.kwargs,
keepdims=True,
engine="numpy",
)
class Combine1d(Combine):
"""
Time the combine step for dask reductions,
this is for reducting along a single dimension
"""
def setup(self, *args, **kwargs):
def construct_member(groups):
return {
"groups": groups,
"intermediates": [
np.ones((40, 120, 120, 4), dtype=float),
np.ones((40, 120, 120, 4), dtype=int),
],
}
# motivated by
self.x_chunk_mapreduce = [
construct_member(groups)
for groups in [
np.array((1, 2, 3, 4)),
np.array((5, 6, 7, 8)),
np.array((9, 10, 11, 12)),
]
* 2
]
self.x_chunk_cohorts = [construct_member(groups) for groups in [np.array((1, 2, 3, 4))] * 4]
self.kwargs = {"agg": flox.aggregations.mean, "axis": (3,), "group_ndim": 1}
|
#Class to create binary tree
class binaryTree():
def __init__(self, root):
self.key = root
self.leftChild = None
self.rightChild = None
def insertLeftChild(self, new_node):
if(self.leftChild == None):
self.leftChild = binaryTree(new_node)
else:
t = self.leftChild
t.leftChild = self.leftChild
self.leftChild = t
def insertRightChild(self, new_node):
if(self.rightChild == None):
self.rightChild = binaryTree(new_node)
else:
t = self.rightChild
t.rightChild = self.rightChild
self.rightChild = t
def getRoot(self):
return self.key
def setRoot(self, value):
self.key = value
def getLeftChild(self):
return self.leftChild
def getRightChild(self):
return self.rightChild
#Testing
r = binaryTree('a')
print(r.getRoot())
print(r.getLeftChild())
r.insertLeftChild('b')
print(r.getLeftChild())
print(r.getLeftChild().getRoot())
r.insertRightChild('c')
print(r.getRightChild())
print(r.getRightChild().getRoot())
r.getRightChild().setRoot('hello')
print(r.getRightChild().getRoot()) |
import re
from simhash import Simhash
def get_features(s):
width = 6
s = s.lower()
s = re.sub(r'[^\w]+', '', s)
return [s[i:i + width] for i in range(max(len(s) - width + 1, 1))]
h1 = Simhash('ćHow are you? I am fine. Thanks1.')
h2 = Simhash('ćHow are you? I am fine. Thanks2.')
str1 = 'ćHow are you? I am fine. Thanks1.'
str2 = 'ćHow are you? I am fine. Thanks2.'
h1 = Simhash(str1)
h2 = Simhash(str2)
print ("hash for " + str1 + " : ")
print("h1.value")
print ("hash for " + str2 + " : ")
print(h2.value)
print ("distance:")
print(h1.distance(h2))
print("\n")
str1 = 'a'
str2 = 'b'
h1 = Simhash(str1)
h2 = Simhash(str2)
print ("hash for " + str1 + " : ")
print(h1.value)
print ("hash for " + str2 + " : ")
print(h2.value)
print ("distance:")
print(h1.distance(h2))
print("\n")
str1 = '1'
str2 = '2'
h1 = Simhash(str1)
h2 = Simhash(str2)
print ("hash for " + str1 + " : ")
print(h1.value)
print ("hash for " + str2 + " : ")
print(h2.value)
print ("distance:")
print(h1.distance(h2))
print("\n")
str1 = '111111111111111'
str2 = '111111111111112'
h1 = Simhash(str1)
h2 = Simhash(str2)
print ("hash for " + str1 + " : ")
print(h1.value)
print ("hash for " + str2 + " : ")
print(h2.value)
print ("distance:")
print(h1.distance(h2))
print("\n")
|
"""
Released under BSD 3-Clause License,
Copyright (c) 2021 Cerebras Systems Inc.
All rights reserved.
"""
import logging
import os
from typing import List, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch_rev_structs import (
RecomputeSilo,
RevResidualBlock,
RevResidualSilo,
RevSequential,
RevSilo,
RevSpatialDownsample,
)
from .bn_helper import BatchNorm2d, relu_inplace
logger = logging.getLogger(__name__)
ALIGN_CORNERS = True
# Modeling
norm_kwargs_defaults = {
'eps': 1e-3,
'momentum': 0.1,
'affine': True,
'track_running_stats': True,
}
def Norm2d(
num_features: int, norm_kwargs: dict = norm_kwargs_defaults,
):
layer = BatchNorm2d(num_features, **norm_kwargs)
return layer
class ModuleHelper:
@staticmethod
def NormReLU(num_features):
return nn.Sequential(
Norm2d(num_features, norm_kwargs=norm_kwargs_defaults),
nn.ReLU(inplace=relu_inplace),
)
@staticmethod
def Norm2d(num_features):
return Norm2d(num_features, norm_kwargs=norm_kwargs_defaults)
class SpatialGather_Module(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_Module, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, h, w = (
probs.size(0),
probs.size(1),
probs.size(2),
probs.size(3),
)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1) # batch x hw x c
probs = F.softmax(self.scale * probs, dim=2) # batch x k x hw
ocr_context = (
torch.matmul(probs, feats).permute(0, 2, 1).unsqueeze(3)
) # batch x k x c
return ocr_context
class _ObjectAttentionBlock(nn.Module):
'''
The basic implementation for object context block
Input:
N X C X H X W
Parameters:
in_channels : the dimension of the input feature map
key_channels : the dimension after the key/query transform
scale : choose the scale to downsample the input feature maps
(save memory cost)
Return:
N X C X H X W
'''
def __init__(self, in_channels, key_channels, scale=1):
super(_ObjectAttentionBlock, self).__init__()
self.scale = scale
self.in_channels = in_channels
self.key_channels = key_channels
self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
self.f_pixel = nn.Sequential(
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
ModuleHelper.NormReLU(self.key_channels),
nn.Conv2d(
in_channels=self.key_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
ModuleHelper.NormReLU(self.key_channels),
)
self.f_object = nn.Sequential(
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
ModuleHelper.NormReLU(self.key_channels),
nn.Conv2d(
in_channels=self.key_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
ModuleHelper.NormReLU(self.key_channels),
)
self.f_down = nn.Sequential(
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.key_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
ModuleHelper.NormReLU(self.key_channels),
)
self.f_up = nn.Sequential(
nn.Conv2d(
in_channels=self.key_channels,
out_channels=self.in_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
),
ModuleHelper.NormReLU(self.in_channels),
)
def forward(self, x, proxy):
batch_size, h, w = x.size(0), x.size(2), x.size(3)
if self.scale > 1:
x = self.pool(x)
query = self.f_pixel(x).view(batch_size, self.key_channels, -1)
query = query.permute(0, 2, 1)
key = self.f_object(proxy).view(batch_size, self.key_channels, -1)
value = self.f_down(proxy).view(batch_size, self.key_channels, -1)
value = value.permute(0, 2, 1)
sim_map = torch.matmul(query, key)
sim_map = (self.key_channels ** -0.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
# add bg context ...
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.view(batch_size, self.key_channels, *x.size()[2:])
context = self.f_up(context)
if self.scale > 1:
context = F.interpolate(
input=context,
size=(h, w),
mode='bilinear',
align_corners=ALIGN_CORNERS,
)
return context
class ObjectAttentionBlock2D(_ObjectAttentionBlock):
def __init__(
self, in_channels, key_channels, scale=1,
):
super(ObjectAttentionBlock2D, self).__init__(
in_channels, key_channels, scale,
)
class SpatialOCR_Module(nn.Module):
"""
Implementation of the OCR module:
We aggregate the global object representation to update the
representation for each pixel.
"""
def __init__(
self, in_channels, key_channels, out_channels, scale=1, dropout=0.1,
):
super(SpatialOCR_Module, self).__init__()
self.object_context_block = ObjectAttentionBlock2D(
in_channels, key_channels, scale
)
_in_channels = 2 * in_channels
self.conv_bn_dropout = nn.Sequential(
nn.Conv2d(
_in_channels, out_channels, kernel_size=1, padding=0, bias=False
),
ModuleHelper.NormReLU(out_channels),
nn.Dropout2d(dropout),
)
def forward(self, feats, proxy_feats):
context = self.object_context_block(feats, proxy_feats)
return self.conv_bn_dropout(torch.cat([context, feats], 1))
class ReZero(nn.Module):
def __init__(self):
super().__init__()
self.rezero = nn.Parameter(torch.zeros((1, 1, 1, 1)))
def forward(self, inputs):
return self.rezero * inputs
class Conv2d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
rezero: bool = False,
):
super().__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode,
)
self.rezero = None
if rezero:
self.rezero = ReZero()
def extra_repr(self):
return f"rezero:{self.rezero is not None}"
def forward(self, inputs):
x = self.conv(inputs)
if self.rezero is not None:
x = self.rezero(x)
return x
class Mult(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x * y
class SqueezeExcite(nn.Module):
def __init__(self, in_channels, se_ch):
super().__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
Conv2d(in_channels=in_channels, out_channels=se_ch, kernel_size=1),
nn.Hardswish(),
Conv2d(in_channels=se_ch, out_channels=in_channels, kernel_size=1),
nn.Hardsigmoid(),
)
self.mult = Mult()
def forward(self, x):
return self.mult(self.se(x), x)
class ConvNormAct(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = None,
groups: int = 1,
conv_bias: bool = None,
rezero: bool = False,
mv_rezero: bool = False,
act: bool = True,
norm: bool = True,
norm_kwargs: dict = norm_kwargs_defaults,
zero_init: bool = False,
):
super().__init__()
self.zero_init = zero_init
if zero_init:
assert not rezero
if padding is None:
padding = (kernel_size - 1) // 2
if conv_bias is None:
conv_bias = not norm
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=conv_bias,
rezero=rezero and not mv_rezero,
)
self.norm = None
if norm:
self.norm = Norm2d(
num_features=out_channels, norm_kwargs=norm_kwargs
)
if zero_init:
nn.init.zeros_(self.norm.weight)
self.act = nn.Hardswish() if act else None
self.rezero = None
if rezero and mv_rezero:
self.rezero = ReZero()
def forward(self, x):
x = self.conv(x)
if self.norm is not None:
x = self.norm(x)
if self.act is not None:
x = self.act(x)
if self.rezero is not None:
x = self.rezero(x)
return x
class NormActConv(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = None,
groups: int = 1,
conv_bias: bool = None,
rezero: bool = False,
mv_rezero: bool = False,
act: bool = True,
norm: bool = True,
norm_kwargs: dict = norm_kwargs_defaults,
zero_init: bool = False,
):
super().__init__()
assert not mv_rezero
assert not zero_init
self.norm = None
if norm:
self.norm = Norm2d(
num_features=in_channels, norm_kwargs=norm_kwargs
)
self.act = nn.Hardswish() if act else None
if padding is None:
padding = (kernel_size - 1) // 2
if conv_bias is None:
conv_bias = not norm
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=conv_bias,
rezero=rezero,
)
def forward(self, x):
if self.norm is not None:
x = self.norm(x)
if self.act is not None:
x = self.act(x)
return self.conv(x)
class MBConvBlock(nn.Module):
"""Mobile Inverted Residual Bottleneck Block.
References:
[1] https://arxiv.org/abs/1704.04861 (MobileNet v1)
[2] https://arxiv.org/abs/1801.04381 (MobileNet v2)
[3] https://arxiv.org/abs/1905.02244 (MobileNet v3)
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
expand_ratio: int = 2,
stride: int = 1,
se_ratio: float = 0.125,
rezero: bool = True,
mv_rezero: bool = False,
id_skip: bool = False,
norm: bool = True,
norm_kwargs: dict = norm_kwargs_defaults,
ext_norm: bool = False,
zero_init: bool = False,
):
super().__init__()
# skip connection
if id_skip:
assert stride == 1
assert in_channels == out_channels
self.id_skip = id_skip
int_ch = int(in_channels * expand_ratio)
self.ext_norm = None
if ext_norm:
self.ext_norm = Norm2d(
num_features=in_channels, norm_kwargs=norm_kwargs
)
self.exp_conv = None
if expand_ratio != 1:
self.exp_conv = ConvNormAct(
in_channels=in_channels,
out_channels=int_ch,
kernel_size=1,
norm=norm,
norm_kwargs=norm_kwargs,
)
# Depthwise convolution
self.dw_conv = ConvNormAct(
in_channels=int_ch,
out_channels=int_ch,
kernel_size=kernel_size,
stride=stride,
groups=int_ch,
norm=norm,
norm_kwargs=norm_kwargs,
)
# Squeeze and Excitation layer, if desired
self.se = None
if 0 < se_ratio <= 1:
self.se = SqueezeExcite(int_ch, max(1, int(in_channels * se_ratio)))
# Pointwise convolution
self.project_conv = ConvNormAct(
in_channels=int_ch,
out_channels=out_channels,
kernel_size=1,
rezero=rezero,
mv_rezero=mv_rezero,
act=False,
norm=norm,
norm_kwargs=norm_kwargs,
zero_init=zero_init,
)
def forward(self, inputs):
"""
MBConvBlock's fwd function.
Args:
inputs (tensor): Input tensor.
Returns:
Output of this block after processing.
"""
# Expansion and Depthwise Convolution
x = inputs
if self.ext_norm is not None:
x = self.ext_norm(x)
if self.exp_conv is not None:
x = self.exp_conv(x)
x = self.dw_conv(x)
# Squeeze and Excitation
if self.se is not None:
x = self.se(x)
# Pointwise Convolution
x = self.project_conv(x)
# Skip connection and drop connect
if self.id_skip:
# Skip connection
return x + inputs
return x
class FastGlobalAvgPool2d(nn.Module):
def __init__(
self, flatten=False,
):
super().__init__()
self.flatten = flatten
def forward(self, x):
n, c = x.shape[:2]
if self.flatten:
return x.view(n, c, -1).mean(dim=2)
return x.view(n, c, -1).mean(dim=2).view(n, c, 1, 1)
class HeadAdapter(nn.Module):
"""
Implements a classification head to be attached to a FPN
"""
def __init__(
self,
in_ch: List[int] = [48, 64, 80, 160],
head_ch: List[int] = [48, 64, 128, 320],
expand_ratios: List[int] = [1, 2, 2, 2],
se_ratios: List[float] = [0.25, 0.25, 0.125, 0.125],
dwkp: List[int] = [2, 2, 2, 2],
dwk: List[int] = [3, 3, 3, 3],
norm: bool = True,
norm_kwargs: dict = norm_kwargs_defaults,
ext_norm: bool = False,
):
super().__init__()
assert len(head_ch) == len(in_ch)
self.xforms = nn.ModuleList()
for idx, (_in_ch, _head_ch) in enumerate(zip(in_ch, head_ch)):
self.xforms.append(
MBConvBlock(
in_channels=_in_ch,
out_channels=_head_ch,
kernel_size=dwk[idx] + dwkp[idx],
expand_ratio=expand_ratios[idx],
stride=1,
se_ratio=se_ratios[idx],
rezero=False,
id_skip=False,
norm=norm,
norm_kwargs=norm_kwargs,
ext_norm=ext_norm,
)
)
def forward(self, x):
# run xforms
return [xform(x_i) for x_i, xform in zip(x, self.xforms)]
class FRBiFPN_OCR(nn.Module):
"""
Implements Fully Reversible BiFeaturePyramidNetwork (FR-BiFPN) for
Segmentation training
"""
def __init__(self, config):
super().__init__()
global ALIGN_CORNERS
ALIGN_CORNERS = config.MODEL.ALIGN_CORNERS
extra = config.MODEL.EXTRA
channels = extra["channels"]
dwkp = extra["dwkp"]
dwk = extra["dwk"]
stem_downsampling = extra["stem_downsampling"]
fpn_height = extra["fpn_height"]
num_ext_layers = extra["num_ext_layers"]
inv_blk_rep = extra["inv_blk_rep"]
expand_ratios = extra["expand_ratios"]
se_ratios = extra["se_ratios"]
rezero = extra["rezero"]
mv_rezero = extra["mv_rezero"]
disable_rev = extra["disable_rev"]
disable_recomp = extra["disable_recomp"]
space2depthstem = extra["space2depthstem"]
head_ch = extra["head_ch"]
head_dwkp = extra["head_dwkp"]
head_dwk = extra["head_dwk"]
head_expand_ratios = extra["head_expand_ratios"]
head_se_ratios = extra["head_se_ratios"]
norm = extra["norm"]
norm_kwargs = norm_kwargs_defaults
ext_norm = extra["ext_norm"]
zero_init = extra["zero_init"]
scale = extra["scale"]
# scale up the models
fr_bifpn_c_sizes = {
#: (d, w_m, res, dout)
0: (2, 1, 224, 0.25),
1: (2, 4 / 3, 256, 0.25),
2: (2, 2, 256, 0.3),
3: (3, 8 / 3, 288, 0.3),
4: (4, 4, 320, 0.4),
5: (4, 16 / 3, 352, 0.4),
6: (5, 20 / 3, 352, 0.5),
}
ext_len, width_mult, _, _ = fr_bifpn_c_sizes[scale]
scaled_channels = [int(width_mult * ch) // 16 * 16 for ch in channels]
scaled_head_ch = [int(width_mult * ch) // 16 * 16 for ch in head_ch]
channels = scaled_channels
num_ext_layers = ext_len
head_ch = scaled_head_ch
if isinstance(channels, list) and len(channels) == 1:
channels = channels[0]
if isinstance(channels, int):
self.in_channels, int_ch = channels, channels
channels *= 4 ** stem_downsampling
channels = [int(2 ** p * channels) for p in range(fpn_height)]
else:
int_ch = channels[0] // (4 ** stem_downsampling)
self.in_channels = int_ch
assert int_ch >= 3
if isinstance(head_ch, list) and len(head_ch) == 1:
head_ch = head_ch[0]
if isinstance(head_ch, int):
head_ch = [int(2 ** p * head_ch) for p in range(fpn_height)]
while len(expand_ratios) < fpn_height:
expand_ratios = [expand_ratios[0]] + expand_ratios
while len(se_ratios) < fpn_height:
se_ratios = [se_ratios[0]] + se_ratios
assert (
len(head_ch)
== len(channels)
== len(expand_ratios)
== len(se_ratios)
)
if isinstance(inv_blk_rep, list) and len(inv_blk_rep) == 1:
inv_blk_rep = inv_blk_rep[0]
if isinstance(inv_blk_rep, int):
inv_blk_rep = (num_ext_layers + len(channels) - 1) * [inv_blk_rep]
assert (num_ext_layers + len(channels) - 1) == len(inv_blk_rep)
self.zero_init = zero_init
if zero_init:
assert not rezero
self.rev_stack = RevSequential(
[], disable_rev=disable_rev, disable_recomp=disable_recomp
)
if space2depthstem:
if stem_downsampling:
self.rev_stack.append(
RevSpatialDownsample(
2 ** stem_downsampling, disable_rev=disable_rev
)
)
int_ch *= 4 ** stem_downsampling
else:
blk_args = {}
blk_args["out_channels"] = int_ch // 2
blk_args["in_channels"] = int_ch - blk_args["out_channels"]
blk_args["kernel_size"] = 3
blk_args["stride"] = 1
blk_args["padding"] = 1
blk_args["rezero"] = rezero
blk_args["mv_rezero"] = mv_rezero
blk_args["zero_init"] = zero_init
blk_args["norm"] = norm
blk_args["norm_kwargs"] = norm_kwargs
g_blk_args = blk_args.copy()
g_blk_args["in_channels"] = blk_args["out_channels"]
g_blk_args["out_channels"] = blk_args["in_channels"]
self.rev_stack.append(
RevResidualBlock(
f_transform=ConvNormAct(**blk_args),
g_transform=ConvNormAct(**g_blk_args),
disable_rev=disable_rev,
)
)
if stem_downsampling:
self.rev_stack.append(
RevSpatialDownsample(2, disable_rev=disable_rev)
)
int_ch *= 4
blk_args["out_channels"] = int_ch // 2
blk_args["in_channels"] = int_ch - blk_args["out_channels"]
blk_args["padding"] = None
blk_args["norm"] = norm
blk_args["norm_kwargs"] = norm_kwargs
g_blk_args = blk_args.copy()
g_blk_args["in_channels"] = blk_args["out_channels"]
g_blk_args["out_channels"] = blk_args["in_channels"]
self.rev_stack.append(
RevResidualBlock(
f_transform=ConvNormAct(**blk_args),
g_transform=ConvNormAct(**g_blk_args),
disable_rev=disable_rev,
)
)
if stem_downsampling > 1:
self.rev_stack.append(
RevSpatialDownsample(2, disable_rev=disable_rev)
)
int_ch *= 4
# create FR-BiFPN ie multi-resolution feature pyramid
mk_layer_args = {}
mk_layer_args["norm"] = norm
mk_layer_args["norm_kwargs"] = norm_kwargs
mk_layer_args["rezero"] = rezero
mk_layer_args["mv_rezero"] = mv_rezero
mk_layer_args["zero_init"] = zero_init
mk_layer_args["disable_rev"] = disable_rev
mk_layer_args["expand_ratios"] = expand_ratios
mk_layer_args["se_ratios"] = se_ratios
mk_layer_args["dwkp"] = dwkp
mk_layer_args["dwk"] = dwk
for idx, new_ch in enumerate(channels[1:]):
mk_layer_args["channels"] = channels[: (idx + 1)]
mk_layer_args["new_ch"] = new_ch
mk_layer_args["inv_blk_rep"] = inv_blk_rep[idx]
self.add_rev_make_layer(**mk_layer_args)
# extend FR-BiFPN ie multi-resolution feature pyramid
mk_layer_args["channels"] = channels
mk_layer_args["new_ch"] = 0
for mk_layer_args["inv_blk_rep"] in inv_blk_rep[len(channels) - 1 :]:
self.add_rev_make_layer(**mk_layer_args)
# make non-rev head adapter which uses rev-ckpt (ie recomputation)
if head_ch:
assert len(channels) == len(head_ch)
head_adapt = HeadAdapter(
in_ch=channels,
head_ch=head_ch,
expand_ratios=head_expand_ratios,
se_ratios=head_se_ratios,
dwkp=head_dwkp,
dwk=head_dwk,
norm=norm,
norm_kwargs=norm_kwargs,
ext_norm=ext_norm,
)
self.rev_stack.recomp_op = RecomputeSilo(
recomp_transforms=list(head_adapt.xforms),
disable_recomp=disable_recomp,
)
last_inp_channels = int(np.sum(head_ch))
# segementation defaults
ocr_mid_channels = config.MODEL.OCR.MID_CHANNELS
ocr_key_channels = config.MODEL.OCR.KEY_CHANNELS
num_classes = config.DATASET.NUM_CLASSES
self.conv3x3_ocr = nn.Sequential(
nn.Conv2d(
last_inp_channels,
ocr_mid_channels,
kernel_size=3,
stride=1,
padding=1,
),
ModuleHelper.NormReLU(ocr_mid_channels),
)
self.ocr_gather_head = SpatialGather_Module(num_classes)
self.ocr_distri_head = SpatialOCR_Module(
in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
)
self.cls_head = nn.Conv2d(
ocr_mid_channels,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
self.aux_head = nn.Sequential(
nn.Conv2d(
last_inp_channels,
last_inp_channels,
kernel_size=1,
stride=1,
padding=0,
),
ModuleHelper.NormReLU(last_inp_channels),
nn.Conv2d(
last_inp_channels,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True,
),
)
self._initialize()
def init_pretrained_weights(self, pretrained=''):
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {
k: v
for k, v in pretrained_dict.items()
if k in model_dict.keys()
}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict, strict=False)
def add_rev_make_layer(
self,
channels: Union[int, List[int]] = [48, 64, 80],
dwkp: List[int] = [0, 0, 0, 2],
dwk: List[int] = [3, 5, 5, 3],
new_ch: int = 160,
inv_blk_rep: Union[int, List[int]] = 2,
expand_ratios: List[int] = [1, 1, 2, 2],
se_ratios: List[float] = [0.25, 0.25, 0.125, 0.0],
rezero: bool = True,
mv_rezero: bool = False,
zero_init: bool = False,
disable_rev: bool = False,
upscale_mode: str = 'bilinear',
norm: bool = True,
norm_kwargs: dict = norm_kwargs_defaults,
):
# make RevSilo
blk_args = {}
blk_args["rezero"] = rezero
blk_args["mv_rezero"] = mv_rezero
blk_args["zero_init"] = zero_init
blk_args["norm"] = norm
blk_args["norm_kwargs"] = norm_kwargs
for idx in range(inv_blk_rep):
xforms = []
for ch, expr, ser, _dwk, _dwkp in zip(
channels, expand_ratios, se_ratios, dwk, dwkp
):
blk_args["kernel_size"] = _dwk + _dwkp
blk_args["out_channels"] = ch // 2
blk_args["in_channels"] = ch - blk_args["out_channels"]
blk_args["expand_ratio"] = expr
blk_args["se_ratio"] = ser
g_blk_args = blk_args.copy()
g_blk_args["in_channels"] = blk_args["out_channels"]
g_blk_args["out_channels"] = blk_args["in_channels"]
xforms += [
RevResidualBlock(
f_transform=MBConvBlock(**blk_args),
g_transform=MBConvBlock(**g_blk_args),
disable_rev=disable_rev,
)
]
self.rev_stack.append(RevSilo(xforms, disable_rev=disable_rev,))
# create / add Fuse layer using RevResidualSilo
if new_ch:
channels += [new_ch]
f_xforms, g_xforms = [], []
blk_args["stride"] = 2
for in_height, in_ch in enumerate(channels):
f_xforms += [[]]
g_xforms += [[]]
for out_height, out_ch in enumerate(channels):
h_diff = out_height - in_height
if h_diff > 0:
blk_args["in_channels"] = channels[in_height]
blk_args["out_channels"] = channels[out_height]
blk_args["expand_ratio"] = expand_ratios[in_height]
blk_args["se_ratio"] = se_ratios[in_height]
blk_args["rezero"] = rezero
blk_args["mv_rezero"] = mv_rezero
blk_args["zero_init"] = zero_init
blk_args["stride"] = 2 ** h_diff
blk_args["kernel_size"] = (
2 * blk_args["stride"] - 1 + dwkp[in_height]
)
f_xforms[in_height] += [MBConvBlock(**blk_args)]
elif h_diff < 0:
blk_args["stride"] = 1
blk_args["kernel_size"] = 3 + dwkp[in_height]
blk_args["in_channels"] = in_ch
blk_args["out_channels"] = out_ch
blk_args["expand_ratio"] = expand_ratios[out_height]
blk_args["se_ratio"] = se_ratios[out_height]
blk_args["rezero"] = rezero
blk_args["mv_rezero"] = mv_rezero
blk_args["zero_init"] = zero_init
xform = [
MBConvBlock(**blk_args),
nn.Upsample(
scale_factor=2 ** (-h_diff), mode=upscale_mode,
),
]
g_xforms[in_height] += [nn.Sequential(*xform)]
self.rev_stack.append(
RevResidualSilo(f_xforms, g_xforms, disable_rev=disable_rev,)
)
def _initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.zeros_(m.bias.data)
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
nn.init.ones_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
# rezero
for m in self.modules():
if isinstance(m, ReZero):
nn.init.zeros_(m.rezero)
# zero_init
for m in self.modules():
if (
isinstance(m, ConvNormAct)
and hasattr(m, 'zero_init')
and m.zero_init
):
nn.init.zeros_(m.norm.weight)
def _stack_x(self, x):
mc = self.in_channels % 3
c_stack = (self.in_channels // 3) * [x]
c_stack += [x[:, :mc]]
return torch.cat(c_stack, dim=1)
def _rev_stack_fwd(self, x):
# Since networks is fully reversible, graph is not built unless x
# requires grad
if isinstance(x, torch.Tensor):
x = self._stack_x(x)
x.requires_grad = True
else:
x = [self._stack_x(_x) for _x in x]
for _x in x:
_x.requires_grad = True
return self.rev_stack(x)
def forward(self, x):
x = self._rev_stack_fwd(x)
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(
x[1],
size=(x0_h, x0_w),
mode='bilinear',
align_corners=ALIGN_CORNERS,
)
x2 = F.interpolate(
x[2],
size=(x0_h, x0_w),
mode='bilinear',
align_corners=ALIGN_CORNERS,
)
x3 = F.interpolate(
x[3],
size=(x0_h, x0_w),
mode='bilinear',
align_corners=ALIGN_CORNERS,
)
feats = torch.cat([x[0], x1, x2, x3], 1)
# ocr
out_aux_seg = []
out_aux = self.aux_head(feats)
feats = self.conv3x3_ocr(feats)
context = self.ocr_gather_head(feats, out_aux)
feats = self.ocr_distri_head(feats, context)
out = self.cls_head(feats)
out_aux_seg.append(out_aux)
out_aux_seg.append(out)
return out_aux_seg
def get_seg_model(cfg, **kwargs):
model = FRBiFPN_OCR(cfg, **kwargs)
model.init_pretrained_weights(cfg.MODEL.PRETRAINED)
return model
|
# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from ..items import ChannelItem, RoomItem
import json
class ZhanqiSpider(Spider):
name = 'zhanqi'
allowed_domains = ['zhanqi.tv']
start_urls = [
'https://www.zhanqi.tv/api/static/game.lists/300-1.json'
]
custom_settings = {
'SITE': {
'code': 'zhanqi',
'name': '战旗',
'description': '战旗直播_高清流畅的游戏直播平台',
'url': 'https://www.zhanqi.tv',
'image': 'https://static.zhanqi.tv/assets/web/static/i/index/skin/logo.png',
'show_seq': 6
}
}
def parse(self, response):
room_query_list = []
for cjson in json.loads(response.text)['data']['games']:
yield ChannelItem({
'office_id': cjson['id'],
'short': cjson['gameKey'],
'name': cjson['name'],
'image': cjson['spic'],
'url': response.urljoin(cjson['url'])
})
url = 'https://www.zhanqi.tv/api/static/game.lives/{}/110-{{}}.json'.format(cjson['id'])
room_query_list.append({'url': url, 'channel': cjson['gameKey'], 'page': 1})
for room_query in room_query_list:
yield Request(room_query['url'].format(str(room_query['page'])), callback=self.parse_room_list,
meta=room_query)
def parse_room_list(self, response):
room_list = json.loads(response.text)['data']['rooms']
if isinstance(room_list, list):
for rjson in room_list:
yield RoomItem({
'office_id': rjson['id'],
'name': rjson['title'],
'image': rjson['bpic'],
'url': response.urljoin(rjson['url']),
'online': int(rjson['online']) if rjson['online'].isdigit() else 0,
'host': rjson['nickname'],
'channel': response.meta['channel']
})
if len(room_list) > 0:
next_meta = dict(response.meta, page=response.meta['page'] + 1)
yield Request(next_meta['url'].format(str(next_meta['page'])), callback=self.parse_room_list,
meta=next_meta)
|
import discord
from discord.ext import commands
from src.orm.models import Header
class ErrorEmbed(discord.Embed):
"""
Embed with standard structure to show a simple error message
...
Attributes
----------
header :Header
Header obj with info related to the error
ctx: commands.Context
obj with info related to discord server that triggers the error
Methods
-------
main(self)
Embed object with info related to an error triggered by an exception
"""
def __init__(self, header: Header, ctx: commands.Context):
self.__h = header
self.__ctx = ctx
def main(self):
""" Retrieve embed with message related to an error triggered by an Exception
Parameters
----------
Returns
-------
Embed
retrieve embed with error info
"""
embed= discord.Embed(color=discord.Color.red())
if self.__h.type in ['disabled_command', 'private_message_only', 'no_private_message']:
embed.add_field(name=self.__h.name, value=self.__h.translation.format(command=self.__ctx.command), inline=True)
elif self.__h.type == 'missing_required_argument':
embed.add_field(name=self.__h.name, value=self.__h.translation.format(params=self.__ctx.invoked_with,
prefix=self.__ctx.prefix, command=self.__ctx.command), inline=True)
else:
embed.add_field(name=self.__h.name, value=self.__h.translation, inline=True)
return embed
|
from __future__ import absolute_import, division, print_function
import pytest
bokeh = pytest.importorskip('bokeh')
from odo.backends.bokeh import convert, pd, ColumnDataSource
import pandas.util.testing as tm
df = pd.DataFrame([[100, 'Alice'],
[200, 'Bob'],
[300, 'Charlie']],
columns=['balance', 'name'])
def test_convert_dataframe_to_cds():
cds = convert(ColumnDataSource, df)
assert list(cds.data['name']) == ['Alice', 'Bob', 'Charlie']
assert list(cds.data['balance']) == [100, 200, 300]
df2 = convert(pd.DataFrame, cds)
assert isinstance(df2, pd.DataFrame)
tm.assert_frame_equal(df, df2)
|
from django.forms.widgets import *
class BootstrapItaliaDateWidget(DateInput):
template_name = 'widgets/date.html'
def __init__(self, *attrs, **kwargs):
super().__init__( *attrs, **kwargs)
class BootstrapItaliaRadioWidget(RadioSelect):
template_name = 'widgets/radio.html'
def __init__(self, *attrs, **kwargs):
super().__init__(*attrs, **kwargs)
class BootstrapItaliaSelectWidget(Select):
template_name = 'widgets/select.html'
def __init__(self, *attrs, **kwargs):
super().__init__(*attrs, **kwargs)
|
# -*- encoding:utf-8 -*-
impar = lambda n : 2 * n - 1
header = """
Demostrar que es cierto:
1 + 3 + 5 + ... + (2*n)-1 = n ^ 2
Luego con este programa se busca probar dicha afirmacion.
"""
def suma_impares(n):
suma = 0
for i in range(1, n+1):
suma += impar(i)
return suma
def main():
print(header)
num = int(input('Numero: '))
suma = suma_impares(num)
cuadrado = num ** 2
print('Suma de los ', num, ' primeros impares = ', suma)
print('Cuadrado del numero: ', cuadrado)
if suma == cuadrado:
print('Son iguales, luego se cumple la afirmacion')
else:
print('No son iguales, luego no se cumple la afirmacion')
if __name__ == '__main__':
main()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/activation_fun_deriv_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="B4W2erKbxzFe"
# # Plot some neural net activation functions and their derivatives
# Based on sec 4.1 of
# http://d2l.ai/chapter_multilayer-perceptrons/mlp.html
#
# + id="CjpOJet7x_Us"
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(seed=1)
# + id="QY5z78In9lC8"
# !mkdir figures
# + id="W-FG3Ep72GB9"
# #!wget https://raw.githubusercontent.com/d2l-ai/d2l-en/master/d2l/torch.py -q -O d2l.py
#import d2l
# + id="XNJacITv2Smd"
import torch
from torch import nn
from torch.nn import functional as F
# + id="i5tLv0SvxoOW"
x = torch.arange(-4.0, 4.0, 0.1, requires_grad=True)
fns = [torch.sigmoid,
#torch.relu,
torch.nn.LeakyReLU(negative_slope=0.1),
torch.nn.ELU(alpha=1.0),
torch.nn.SiLU(),
torch.nn.GELU()]
names = ['sigmoid',
#'relu',
'leaky-relu',
'elu',
'swish',
'gelu']
# evaluate functions and their gradients on a grid of points
xs = x.detach()
fdict = {}
gdict = {}
for i in range(len(fns)):
fn = fns[i]
name = names[i]
y = fn(x)
fdict[name] = y.detach() # vector of fun
y.backward(torch.ones_like(x), retain_graph=True) # returns gradient at each point
gdict[name] = torch.clone(x.grad) # gradient wrt x(i)
x.grad.data.zero_() # clear out old gradient for next iteration
# + colab={"base_uri": "https://localhost:8080/", "height": 577} id="VyLzcYPbQNaD" outputId="9c172ce3-ff8c-4dcf-8905-5f38909362f1"
# Plot the funcitons
styles = ['r-', 'g--', 'b-.', 'm:', 'k-']
ax = plt.subplot()
for i, name in enumerate(names):
lab = f'{name}'
ax.plot(xs, fdict[name], styles[i], label=lab)
ax.set_ylim(-0.5,2)
ax.legend()
plt.title('Activation function')
plt.tight_layout()
plt.savefig(f'figures/activation-funs.pdf', dpi=300)
plt.show()
ax = plt.subplot()
for i, name in enumerate(names):
lab = f'{name}'
ax.plot(xs, gdict[name], styles[i], label=lab)
ax.set_ylim(-0.5,1.5)
ax.legend()
plt.title('Gradient of activation function')
plt.tight_layout()
plt.savefig(f'figures/activation-funs-grad.pdf', dpi=300)
plt.show()
# + id="RkUeVBiW-aZ4" colab={"base_uri": "https://localhost:8080/"} outputId="1903298c-58f0-4e3e-c194-cdb32e346210"
# !ls figures
# + id="pGy1QBVrAfZK"
|
import numpy as np
def collapse(probas, output_type = 'bin'):
"""
Parameters
----------
probas: An array of probablities.
output_type: If ternary, output can be -1, 0 and 1.
Returns
-------
b: An array of binary states, whose dimensionality is identical to probas.
"""
p = np.abs(probas)
b = (p >= np.random.rand(*p.shape)).astype(np.int)
return np.where(probas >= 0, b, {'bin': 0, 'ter': -b}[output_type])
def duoramp(x, low = None, high = None):
"""
Parameters
----------
x : Anything that can be transformed into an numpy array
low : A number, optional
The lowest value an entry in x can be. The default is None.
high : A number, optional
The highest value an entry in x can be. The default is None.
Returns
-------
y : A numpy array
The value of each element is between low and high.
"""
y = np.asarray(x)
if low is not None:
y[y < low] = low
if high is not None:
y[y > high] = high
return y
def logistic(x, temperature = 1.0):
"""
Parameters
----------
x: A numeric array.
temperature: A non-negative number controlling the slope of the function.
Returns
-------
y: The value of the function, which is often used as a probability.
-------
The function is numerically stable for very big/small values.
"""
_x = np.asarray(x)
if temperature == 0: # The logistic function is reduced to a step function.
y = np.zeros(_x.shape)
y[_x > 0] = 1.0
y[_x == 0] = 0.5
else:
norx = _x / temperature
mask_p = norx > 0
mask_n = norx < 0
y = np.ones_like(norx)
y[mask_p] = 1 / (1 + np.exp(-norx[mask_p]))
# positive x gives small exp(-x): 1<denom<2
z = np.zeros_like(y[mask_n])
z = np.exp(norx[mask_n])
y[mask_n] = z / (1 + z)
# negative x gives small exp(x)=z: 1<denom<2
return y
def softmax(x, temperature = 1.0):
"""
Parameters
----------
x: A two-dimensional numeric array; each row is a distribution of activations.
temperature: A non-negative number controlling the slope of the function.
Returns
-------
P: The value of the function, which is often used as a probability. Each row adds up to 1.
-------
The function is numerically stable for very big/small values.
"""
# normalise x so that the biggest value is 0; this stablises np.exp
norx = x - np.amax(x, axis = 1).reshape(len(x), 1)
_p = []
if temperature == 0:
for xrow in norx:
prow = np.zeros_like(xrow, dtype = float)
mask_max = xrow == 0
prow[mask_max] = 1 / np.count_nonzero(mask_max)
_p.append(prow)
else:
for xrow in norx:
e = np.exp(xrow / temperature)
prow = e / np.sum(e)
_p.append(prow)
P = np.reshape(_p, norx.shape)
return P
def entropy(P, base = None):
"""
Parameters
----------
P: A two-dimensional numeric array; each row is a probability distribution.
base: The logarithmic base when calculating entropy with the default value being e.
Returns
-------
H: The entropy of each distribution in P.
"""
norp = P / np.sum(P, axis = 1).reshape(len(P), 1)
norp[norp == 0] = 1 # plog(p) = when p = 0 or 1
if base is None:
denom = 1
else:
denom = np.log(base)
logp = np.log(norp) / denom
H = np.asarray([-np.dot(ps, logps) for ps, logps in zip(norp, logp)])
return H
def kl_divergence(p, q, base = np.e):
return np.sum(np.where(p != 0, p * np.log(p / q), 0)) / np.log(base) |
from .books import Books
from .https import convert_to_https |
for _ in range(int(input())):
a,b = input().split()
try:
print(int(a) // int(b))
except Exception as e:
print('Error Code: ' + str(e)) |
from __future__ import annotations
import abc
import operator
from functools import reduce, wraps
from typing import Callable, Sequence, Any
from typing import Tuple
from storage import V
from storage.api import Predicate, E, Var
def force_var(func):
@wraps(func)
def decorated(self, arg):
if not isinstance(arg, (Var,)):
arg = Const(arg)
return func(self, arg)
return decorated
class BaseVar(Var[E, V]):
@force_var
def __eq__(self, other: Var) -> Predicate[Any]:
return EqualComparison(self, other)
@force_var
def __ne__(self, other):
return NotEqualComparison(self, other)
@force_var
def __gt__(self, other: Var) -> Predicate[Any]:
return Comparison(self, other, operator.gt)
@force_var
def __ge__(self, other: Var) -> Predicate[Any]:
return Comparison(self, other, operator.ge)
@force_var
def __lt__(self, other: Var) -> Predicate[Any]:
return Comparison(self, other, operator.lt)
@force_var
def __le__(self, other: Var) -> Predicate[Any]:
return Comparison(self, other, operator.le)
@force_var
def __contains__(self, item: Var) -> Predicate[Any]:
return Comparison(self, item, operator.contains)
@force_var
def __mul__(self, other: Var) -> Var:
return ReduceOperator((self, other,), operator.mul)
@force_var
def __add__(self, other: Var) -> Var:
return ReduceOperator((self, other,), operator.add)
@force_var
def __sub__(self, other: Var) -> Var:
return ReduceOperator((self, other,), operator.sub)
@force_var
def __truediv__(self, other: Var) -> Var:
return ReduceOperator((self, other,), operator.truediv)
@force_var
def __pow__(self, power, modulo=None) -> Var:
return ReduceOperator((self, power,), operator.pow)
def __invert__(self) -> Var:
return NotOperator(self)
@force_var
def __and__(self, other: Var) -> Var:
return AndOperator((self, other,))
@force_var
def __or__(self, other: Var) -> Var:
return OrOperator((self, other,))
def cast(self, cast_fn: Callable):
return CastOperator(cast_fn, self)
@abc.abstractmethod
def __call__(self, item: E) -> V:
raise NotImplementedError()
def optimize(self) -> Var[E]:
return self
def equals(self, other: Var) -> bool:
if self.__class__ != other.__class__:
return False
for self_item, other_item in zip(self.__dict__.items(), other.__dict__.items()):
if self_item != other_item:
return False
return True
class Comparison(BaseVar):
def __init__(self, var_a: Var, var_b: Var, op: Callable[[Any, Any], bool]):
self.var_a = var_a
self.var_b = var_b
self.op = op
def __call__(self, item: E) -> bool:
return self.op(self.var_a(item), self.var_b(item))
def __repr__(self):
return f'("{self.op.__name__}", {self.var_a}, {self.var_b})'
class EqualComparison(BaseVar[Any, bool]):
def __init__(self, var_a: Var, var_b: Var):
self.var_a = var_a
self.var_b = var_b
def __call__(self, item: E) -> bool:
return self.var_a(item) == self.var_b(item)
def optimize(self) -> Var[E]:
opt_var_a = self.var_a.optimize()
opt_var_b = self.var_b.optimize()
if opt_var_a.equals(opt_var_b):
return Vars.const(True)
return EqualComparison(opt_var_a, opt_var_b)
def __repr__(self):
return f'("eq", {self.var_a}, {self.var_b})'
class NotEqualComparison(BaseVar[Any, bool]):
def __init__(self, var_a: Var, var_b: Var):
self.var_a = var_a
self.var_b = var_b
def __call__(self, item: E) -> bool:
return self.var_a(item) != self.var_b(item)
def optimize(self) -> Var[E]:
opt_var_a = self.var_a.optimize()
opt_var_b = self.var_b.optimize()
if opt_var_a.equals(opt_var_b):
return Vars.const(False)
return NotEqualComparison(opt_var_a, opt_var_b)
def __repr__(self):
return f'("ne", {self.var_a}, {self.var_b})'
class CastOperator(BaseVar[E, V]):
def __init__(self, cast_fn: Callable[[Any], V], inner_var: Var[E, Any]):
self.cast_fn = cast_fn
self.inner_var = inner_var
def __call__(self, item: E) -> V:
return self.cast_fn(self.inner_var(item))
def cast(self, cast_fn: Callable):
if self.cast_fn == cast_fn:
return self
return CastOperator(cast_fn, self)
class NotOperator(BaseVar[E, V]):
def __init__(self, inner_var: Var[E, V]):
self.inner_var = inner_var
def __invert__(self):
return self.inner_var
def __call__(self, item: E) -> V:
return not self.inner_var(item)
def optimize(self) -> Var[E]:
inner_var = self.inner_var.optimize()
if Const.is_const(inner_var):
return Const(not inner_var.const)
return self
class OrOperator(BaseVar[E, Any]):
def __init__(self, inner_vars: Tuple[Var[E, Any], ...]):
self.inner_vars = inner_vars
@force_var
def __or__(self, other: Var):
return OrOperator(self.inner_vars + (other,))
def __call__(self, item: E) -> Any:
for var in self.inner_vars:
val = var(item)
if val:
return val
return False
def optimize(self) -> Var[E]:
inner_vars = []
for var in self.inner_vars:
optimized_var = var.optimize()
if Const.is_true(optimized_var):
return optimized_var
inner_vars.append(optimized_var)
return OrOperator(tuple(inner_vars))
def __repr__(self):
return f'("or",{",".join(map(repr, self.inner_vars))})'
class AndOperator(BaseVar[E, Any]):
def __init__(self, inner_vars: Tuple[Var[E, Any], ...]):
self.inner_vars = inner_vars
def __and__(self, other: Predicate[E]):
return AndOperator(self.inner_vars + (other,))
def __call__(self, item: E) -> bool:
for var in self.inner_vars:
if not var(item):
return False
return True
def optimize(self) -> Var[E]:
inner_vars = []
for var in self.inner_vars:
optimized_var = var.optimize()
if Const.is_false(optimized_var):
return optimized_var
inner_vars.append(optimized_var)
return AndOperator(tuple(inner_vars))
def __repr__(self):
return f'("and",{",".join(map(repr, self.inner_vars))})'
class ReduceOperator(BaseVar[Any, Any]):
def __init__(self, inner_vars: Tuple[Any, ...], op: Callable[[Any, Any], Any]):
self.inner_vars = inner_vars
self.op = op
def __repr__(self):
return f'("{self.op.__name__}",{",".join(map(repr, self.inner_vars))})'
@force_var
def __mul__(self, other: Var) -> Var:
if self.op == operator.mul:
return ReduceOperator(self.inner_vars + (other,), self.op)
return ReduceOperator((self, other,), operator.mul)
@force_var
def __add__(self, other: Var) -> Var:
if self.op == operator.add:
return ReduceOperator(self.inner_vars + (other,), self.op)
return ReduceOperator((self, other,), operator.add)
@force_var
def __sub__(self, other: Var) -> Var:
if self.op == operator.sub:
return ReduceOperator(self.inner_vars + (other,), self.op)
return ReduceOperator((self, other,), operator.sub)
@force_var
def __truediv__(self, other: Var) -> Var:
if self.op == operator.truediv:
return ReduceOperator(self.inner_vars + (other,), self.op)
return ReduceOperator((self, other,), operator.truediv)
@force_var
def __pow__(self, power, modulo=None) -> Var:
if self.op == operator.pow:
return ReduceOperator(self.inner_vars + (power,), self.op)
return ReduceOperator((self, power,), operator.pow)
def __call__(self, item: E) -> V:
return reduce(self.op, [var(item) for var in self.inner_vars])
class Const(BaseVar[Any, V]):
@classmethod
def is_const(cls, instance):
return isinstance(instance, (cls,))
@classmethod
def is_true(cls, instance):
if cls.is_const(instance):
return instance.const
return False
@classmethod
def is_false(cls, instance):
if cls.is_const(instance):
return not instance.const
return False
def __init__(self, const: V):
self.const = const
def __invert__(self) -> Var:
return Const(not self.const)
def __repr__(self):
return f'{repr(self.const)}'
@force_var
def __and__(self, other: Var) -> Var:
if isinstance(other, (Const,)):
return Const(self.const & other.const)
return AndOperator((self, other,))
@force_var
def __or__(self, other: Var) -> Var:
if isinstance(other, (Const,)):
return Const(self.const | other.const)
return OrOperator((self, other,))
@force_var
def __mul__(self, other: Var) -> Var:
if isinstance(other, (Const,)):
return Const(self.const * other.const)
return ReduceOperator((self, other,), operator.mul)
@force_var
def __add__(self, other: Var) -> Var:
if isinstance(other, (Const,)):
return Const(self.const + other.const)
return ReduceOperator((self, other,), operator.add)
@force_var
def __sub__(self, other: Var) -> Var:
if isinstance(other, (Const,)):
return Const(self.const - other.const)
return ReduceOperator((self, other,), operator.sub)
@force_var
def __truediv__(self, other: Var) -> Var:
if isinstance(other, (Const,)):
return Const(self.const / other.const)
return ReduceOperator((self, other,), operator.truediv)
@force_var
def __pow__(self, power, modulo=None) -> Var:
if isinstance(power, (Const,)):
return Const(self.const ** power.const)
return ReduceOperator((self, power,), operator.pow)
def cast(self, cast_fn: Callable):
if isinstance(cast_fn, (type,)) and isinstance(self.const, cast_fn):
return self
return Const(cast_fn(self.const))
def __call__(self, item: Any = None) -> V:
return self.const
class Func(BaseVar):
def __init__(self, func: Callable[[E], bool]):
self.func = func
def __call__(self, item: E) -> bool:
return self.func(item)
@classmethod
def from_lambda(cls, func: Callable[[E], bool]) -> Predicate[E]:
return cls(func)
class Keys(BaseVar):
def __init__(self, keys: Sequence[str] = ()):
self.keys = keys
def __call__(self, item):
for key in self.keys:
if item is None:
return None
item = item.get(key, None)
return item
def __repr__(self) -> str:
return f'${{{".".join(self.keys)}}}'
class Vars:
@staticmethod
def key(key: str) -> Var[Any, Any]:
return Keys((key,))
@staticmethod
def keys(keys: Sequence[str]) -> Var[Any, Any]:
return Keys(keys)
@staticmethod
def const(value: E) -> Var[Any, E]:
return Const(value)
|
"""
Music features Enrichment and Preprocessing
"""
__author__ = "Marcel Kurovski"
__copyright__ = "Marcel Kurovski"
__license__ = "mit"
import logging
import os
import numpy as np
import pandas as pd
_logger = logging.getLogger(__name__)
unscaled_attributes = ["key", "loudness", "tempo"]
scaled_attributes = [
"mode",
"danceability",
"energy",
"speechiness",
"acousticness",
"instrumentalness",
"liveness",
"valence",
]
def generate_playlist(cfg: dict):
filepath = os.path.join(cfg["interim_storage_folder"], "liked_songs_augmented.csv")
_logger.info(f"Loading augmented raw features from {filepath} ...")
liked_songs = pd.read_csv(filepath)
not_listened = (liked_songs["count"] == 0).values
weights = np.log1p(liked_songs["count"]).values.reshape(-1, 1)
feature_cols = scaled_attributes + unscaled_attributes
features = liked_songs[feature_cols].copy()
# scale the unscaled features
# assuming at least ordinal scale
features["key"] = features["key"] / 11
# TODO: improve beyond min-max-scaling
features["loudness"] = -(features["loudness"] - features["loudness"].min()) / (
features["loudness"].min() - features["loudness"].max()
)
features["tempo"] = (features["tempo"] - features["tempo"].min()) / (
features["tempo"].max() - features["tempo"].min()
)
features = features.values
# create
user = (features * weights).sum(axis=0) / weights.sum().reshape(1, -1)
similarities = (features * user).sum(axis=1) / (
np.sqrt(np.sum(np.square(user))) * np.sqrt(np.sum(np.square(features), axis=1))
)
order = np.argsort(similarities)[::-1]
order = [val for val in order if val in np.where(not_listened)[0]]
if len(order) < cfg["top_k_songs"]:
_logger.warning(
f"Only {len(order)} songs remain, "
f"requested {cfg['top_k_songs']} will not be fulfilled."
)
new_playlist = liked_songs.loc[order[: cfg["top_k_songs"]]]
output_path = os.path.join(
cfg["interim_storage_folder"], "recommended_playlist.csv"
)
_logger.info(f"Saving Playlist to {output_path} ...")
new_playlist.to_csv(output_path, index=False)
|
# -*- coding: utf-8 -*-
SERVER_TICK = 0.05
WORLD_SIZE = (100, 100) # width, height
HUNGER_SPEED = 0.005
ILLNESS_SPEED = 0.05
HEALING_SPEED = 0.02
HUNGER_RESTORED_BY_EATING = 0.1
MAX_FOOD_ON_CELL = 10
MAX_GROW_FOOD_SPEED = 0.1
SEND_USER_PERSPECTIVE_RATE = 1
DATABASE = {
'user': 'postgres',
'database': 'postgres',
'host': '127.0.0.1',
'password': 'postgres'
}
DEBUG = False
if DEBUG:
SERVER_TICK = 1
HUNGER_SPEED = 0.1
ILLNESS_SPEED = 0.1
HEALING_SPEED = 0.05
MAX_GROW_FOOD_SPEED = 1
# Adjust tick-related settings
HUNGER_SPEED *= SERVER_TICK
ILLNESS_SPEED *= SERVER_TICK
HEALING_SPEED *= SERVER_TICK
MAX_GROW_FOOD_SPEED *= SERVER_TICK
|
# -*- encoding: utf-8 -*-
"""
Created by eniocc at 11/10/2020
"""
from py_dss_interface.models.Topology.TopologyI import TopologyI
from py_dss_interface.models.Topology.TopologyS import TopologyS
from py_dss_interface.models.Topology.TopologyV import TopologyV
class Topology(TopologyI, TopologyV, TopologyS):
"""
This interface implements the Topology (ITopology) interface of OpenDSS by declaring 3 procedures for accessing
the different properties included in this interface: TopologyI, TopologyV, TopologyS.
"""
pass
|
import json, os, random
def main():
file = open(os.path.dirname(os.path.realpath(__file__)) + "/teams.json", 'w')
hackers = open(os.path.dirname(os.path.realpath(__file__)) + "/hackers.json", "r")
hackers = json.loads(hackers.read())
SOHO_teams = [[]]
FDBH_teams = [[]]
MedH_teams = [[]]
for hacker in hackers["hackers"]:
if hacker["has_team"] == 'true':
hackathon_list = [[]]
if (hacker["hackathon"] == "Suns Out Hacks Out"):
# nonlocal hackathon_list
hackathon_list = SOHO_teams
elif (hacker["hackathon"] == "First Day Back Hacks"):
# nonlocal hackathon_list
hackathon_list = FDBH_teams
else:
# nonlocal hackathon_list
hackathon_list = MedH_teams
for team in hackathon_list:
if len(team) < random.randint(2, 3):
team.append(hacker)
break
else:
hackathon_list.append([])
soho_team_str = str(SOHO_teams)
fdbh_team_str = str(FDBH_teams)
medh_team_str = str(MedH_teams)
soho_team_str.replace("\'", '\"')
fdbh_team_str.replace("\'", '\"')
medh_team_str.replace('\"', '\"')
file.write("{ \"SOHO teams\": " + soho_team_str + ", \"FDBH teams\": " + fdbh_team_str + ", \"MedH teams\": " + medh_team_str + "}")
if __name__ == "__main__":
main() |
# Generated by Django 3.0.2 on 2020-01-11 13:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('logoUri', models.ImageField(upload_to='team/logo_uri/%Y/%m/%d')),
('club_state', models.CharField(blank=True, max_length=250)),
],
),
]
|
# Generated by Django 2.2.5 on 2019-10-30 10:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_hub', '0033_film'),
]
operations = [
migrations.AlterField(
model_name='film',
name='acteurs',
field=models.TextField(help_text='Un acteur par ligne'),
),
]
|
# Copyright 2020 The TensorStore Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supports definining bazel repos for third-party dependencies.
The `TENSORSTORE_SYSTEM_LIBS` environment variable may be used to
specify that system libraries should be used in place of bundled
libraries. It should be set to a comma-separated list of the repo
names, e.g. `TENSORSTORE_SYSTEM_LIBS=net_zlib,se_curl` to use
system-provided zlib and libcurl.
"""
load(
"@bazel_tools//tools/build_defs/repo:utils.bzl",
"patch",
"update_attrs",
"workspace_and_buildfile",
)
load(
"@com_google_tensorstore//third_party:python/python_configure.bzl",
"get_numpy_include_rule",
"get_python_bin",
"python_env_vars",
)
SYSTEM_LIBS_ENVVAR = "TENSORSTORE_SYSTEM_LIBS"
SYSTEM_PYTHON_LIBS_ENVVAR = "TENSORSTORE_SYSTEM_PYTHON_LIBS"
# Checks if we should use the system lib instead of the bundled one
def use_system_lib(ctx, name, env_var = SYSTEM_LIBS_ENVVAR):
syslibenv = ctx.os.environ.get(env_var, "")
for n in syslibenv.strip().split(","):
if n.strip() == name:
return True
return False
def _third_party_http_archive_impl(ctx):
use_syslib = use_system_lib(ctx, ctx.attr.name)
if use_syslib:
if ctx.attr.system_build_file == None:
fail(("{name} was specified in {envvar}, but no " +
"system_build_file was specified in the repository " +
"rule for {name}.").format(
name = ctx.attr.name,
envvar = SYSTEM_LIBS_ENVVAR,
))
ctx.template(
path = "BUILD.bazel",
template = ctx.attr.system_build_file,
)
else:
if not ctx.attr.urls:
fail("urls must be specified")
if ctx.attr.build_file and ctx.attr.build_file_content:
fail("Only one of build_file and build_file_content can be provided.")
download_info = ctx.download_and_extract(
url = ctx.attr.urls,
output = "",
sha256 = ctx.attr.sha256,
type = ctx.attr.type,
stripPrefix = ctx.attr.strip_prefix,
canonical_id = ctx.attr.canonical_id,
)
patch(ctx)
workspace_and_buildfile(ctx)
return update_attrs(
ctx.attr,
_third_party_http_archive_attrs.keys(),
{"sha256": download_info.sha256},
)
_third_party_http_archive_attrs = {
"urls": attr.string_list(),
"sha256": attr.string(),
"canonical_id": attr.string(),
"strip_prefix": attr.string(),
"type": attr.string(),
"patches": attr.label_list(
default = [],
),
"patch_tool": attr.string(
default = "",
),
"patch_args": attr.string_list(
default = ["-p0"],
),
"patch_cmds": attr.string_list(
default = [],
),
"patch_cmds_win": attr.string_list(
default = [],
),
"build_file": attr.label(
allow_single_file = True,
),
"build_file_content": attr.string(),
"workspace_file": attr.label(),
"workspace_file_content": attr.string(),
"system_build_file": attr.label(
allow_single_file = True,
),
}
third_party_http_archive = repository_rule(
implementation = _third_party_http_archive_impl,
attrs = _third_party_http_archive_attrs,
environ = [
SYSTEM_LIBS_ENVVAR,
],
)
def _third_party_python_package_impl(ctx):
use_syslib = use_system_lib(ctx, ctx.attr.target, SYSTEM_PYTHON_LIBS_ENVVAR)
is_numpy = ctx.attr.target == "numpy"
build_file_content = ""
if is_numpy:
build_file_content = """
load("@com_google_tensorstore//:utils.bzl", "cc_library_with_strip_include_prefix")
"""
if use_syslib:
build_file_content += """
py_library(
name = """ + repr(ctx.attr.target) + """,
visibility = ["//visibility:public"],
)
"""
if is_numpy:
build_file_content += """
cc_library_with_strip_include_prefix(
name = "headers",
hdrs = [":numpy_include"],
strip_include_prefix = "numpy_include",
visibility = ["//visibility:public"],
)
""" + get_numpy_include_rule(ctx, get_python_bin(ctx))
else:
result = ctx.execute([
get_python_bin(ctx),
"-m",
"pip",
"install",
"--no-deps",
ctx.attr.requirement,
"-t",
".",
])
if result.return_code != 0:
fail("Failed to install Python package: %s\n%s%s" % (
ctx.attr.requirement,
result.stderr,
result.stdout,
))
build_file_content += """
py_library(
name = """ + repr(ctx.attr.target) + """,
srcs = glob(["**/*.py"]),
data = glob(["**/*"], exclude=["**/*.py", "**/* *", "BUILD.bazel", "WORKSPACE"]),
imports = ["."],
deps = """ + repr(ctx.attr.deps) + """,
visibility = ["//visibility:public"],
)
"""
if is_numpy:
build_file_content += """
cc_library_with_strip_include_prefix(
name = "headers",
hdrs = glob(["numpy/core/include/**/*.h"]),
strip_include_prefix = "numpy/core/include",
visibility = ["//visibility:public"],
)
"""
ctx.file(
"BUILD.bazel",
executable = False,
content = build_file_content,
)
_third_party_python_package_attrs = {
"requirement": attr.string(),
"target": attr.string(),
"deps": attr.string_list(),
}
third_party_python_package = repository_rule(
implementation = _third_party_python_package_impl,
attrs = _third_party_python_package_attrs,
environ = [
SYSTEM_PYTHON_LIBS_ENVVAR,
] + python_env_vars,
)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
__author__ = "d01 <Florian Jung>"
__email__ = "jungflor@gmail.com"
__copyright__ = "Copyright (C) 2015-16, Florian JUNG"
__license__ = "MIT"
__version__ = "0.1.4"
__date__ = "2016-04-18"
# Created: 2015-09-20 05:30
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
import os
if sys.argv[-1] == "build":
os.system("python setup.py clean sdist bdist bdist_egg bdist_wheel")
def get_version():
"""
Parse the version information from the init file
"""
import os
import re
version_file = os.path.join("paps", "__init__.py")
initfile_lines = open(version_file, 'rt').readlines()
vsre = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in initfile_lines:
mo = re.search(vsre, line, re.M)
if mo:
return mo.group(1)
raise RuntimeError("Unable to find version string in {}".format(version_file))
def get_file(path):
with open(path, "r") as f:
return f.read()
version = get_version()
readme = get_file("README.rst")
history = get_file("HISTORY.rst")
requirements = open("requirements.txt", "r").read().split("\n")
setup(
name="paps",
version=version,
description="Pi/Python-based Audience Participation System",
long_description=readme,
author=__author__,
author_email=__email__,
url="https://github.com/the01/python-paps",
packages=[
"paps",
"paps.si",
"paps.si.app",
"paps.crowd"
],
install_requires=requirements,
license=__license__,
keywords="audience participation",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7"
],
tests_require=["pytest", "mock"]
)
|
#!/usr/bin/env python
u"""
sph_spline.py
Written by Tyler Sutterley (01/2022)
Interpolates a sparse grid over a sphere using spherical surface splines in
tension following Wessel and Becker (2008)
Adapted from P. Wessel, SOEST, U of Hawaii, April 2008
Uses Generalized Legendre Function algorithm from Spanier and Oldman
"An Atlas of Functions", 1987
CALLING SEQUENCE:
output = sph_spline(lon, lat, data, longitude, latitude, tension=0)
INPUTS:
lon: input longitude
lat: input latitude
data: input data
longitude: output longitude
latitude: output latitude
OUTPUTS:
output: interpolated data
OPTIONS:
tension: tension to use in interpolation (greater than 0)
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
scipy: Scientific Tools for Python (https://docs.scipy.org/doc/)
cython: C-extensions for Python (http://cython.org/)
REFERENCES:
Wessel, P. and J. M. Becker, 2008, Interpolation using a generalized
Green's function for a spherical surface spline in tension,
Geophysical Journal International, doi:10.1111/j.1365-246X.2008.03829.x
UPDATE HISTORY:
Updated 01/2022: added function docstrings
Updated 09/2017: using rcond=-1 in numpy least-squares algorithms
Updated 08/2016: using cythonized version of generalized Legendre function
treat case for no tension but x is equal to 1 within machine precision
Written 08/2016
"""
import numpy as np
import scipy.special
from spatial_interpolators.PvQv_C import PvQv_C
def sph_spline(lon, lat, data, longitude, latitude, tension=0.):
"""
Interpolates a sparse grid over a sphere using spherical
surface splines in tension
Arguments
---------
lon: input longitude
lat: input latitude
data: input data
longitude: output longitude
latitude: output latitude
Keyword arguments
-----------------
tension: tension to use in interpolation (greater than 0)
Returns
-------
output: interpolated data grid
"""
#-- remove singleton dimensions
lon = np.squeeze(lon)
lat = np.squeeze(lat)
data = np.squeeze(data)
longitude = np.squeeze(longitude)
latitude = np.squeeze(latitude)
#-- size of new matrix
if (np.ndim(longitude) > 1):
nlon,nlat = np.shape(longitude)
#-- Check to make sure sizes of input arguments are correct and consistent
if (len(data) != len(lon)) | (len(data) != len(lat)):
raise Exception('Length of Longitude, Latitude, and Data must be equal')
if (np.shape(longitude) != np.shape(latitude)):
raise Exception('Size of output Longitude and Latitude must be equal')
if (tension < 0):
raise ValueError('TENSION must be greater than 0')
#-- convert input lat and lon into cartesian X,Y,Z over unit sphere
phi = np.pi*lon/180.0
th = np.pi*(90.0 - lat)/180.0
xs = np.sin(th)*np.cos(phi)
ys = np.sin(th)*np.sin(phi)
zs = np.cos(th)
#-- convert output longitude and latitude into cartesian X,Y,Z
PHI = np.pi*longitude.flatten()/180.0
THETA = np.pi*(90.0 - latitude.flatten())/180.0
XI = np.sin(THETA)*np.cos(PHI)
YI = np.sin(THETA)*np.sin(PHI)
ZI = np.cos(THETA)
sz = len(longitude.flatten())
#-- Find and remove mean from data
data_mean = data.mean()
data_range = data.max() - data.min()
#-- Normalize data
data_norm = (data - data_mean) / data_range
#-- compute linear system
N = len(data)
GG = np.zeros((N,N))
for i in range(N):
Rd = np.dot(np.transpose([xs,ys,zs]), np.array([xs[i],ys[i],zs[i]]))
#-- remove singleton dimensions and calculate spherical surface splines
GG[i,:] = SSST(Rd, P=tension)
#-- Compute model m for normalized data
m = np.linalg.lstsq(GG,data_norm,rcond=-1)[0]
#-- calculate output interpolated array (or matrix)
output = np.zeros((sz))
for j in range(sz):
Re = np.dot(np.transpose([xs,ys,zs]), np.array([XI[j],YI[j],ZI[j]]))
#-- remove singleton dimensions and calculate spherical surface splines
gg = SSST(Re, P=tension)
output[j] = data_mean + data_range*np.dot(gg, m)
#-- reshape output to original dimensions and return
if (np.ndim(longitude) > 1):
output = output.reshape(nlon,nlat)
return output
#-- SSST: Spherical Surface Spline in Tension
#-- Returns the Green's function for a spherical surface spline in tension,
#-- following Wessel and Becker [2008].
#-- If p == 0 or not given then use minimum curvature solution with dilogarithm
def SSST(x, P=0):
#-- floating point machine precision
eps = np.finfo(np.float).eps
if (P == 0):
#-- use dilogarithm (Spence's function) if using splines without tension
y = np.zeros_like(x)
if np.any(np.abs(x) < (1.0 - eps)):
k, = np.nonzero(np.abs(x) < (1.0 - eps))
y[k] = scipy.special.spence(0.5 - 0.5*x[k])
#-- Deal with special cases x == +/- 1
if np.any(((x + eps) >= 1.0) | ((x - eps) <= -1.0)):
k, = np.nonzero(((x + eps) >= 1.0) | ((x - eps) <= -1.0))
y[k] = scipy.special.spence(0.5 - 0.5*np.sign(x[k]))
else:
#-- if in tension
#-- calculate tension parameter
v = (-1.0 + np.lib.scimath.sqrt(1.0 - 4.0*P**2))/2.0
#-- Initialize output array
y = np.zeros_like(x, dtype=v.dtype)
A = np.pi/np.sin(v*np.pi)
#-- Where Pv solution works
if np.any(np.abs(x) < (1.0 - eps)):
k, = np.nonzero(np.abs(x) < (1.0 - eps))
y[k] = A*Pv(-x[k],v) - np.log(1.0 - x[k])
#-- Approximations where x is close to -1 or 1 using values from
#-- "An Atlas of Functions" by Spanier and Oldham, 1987 (590)
#-- Deal with special case x == -1
if np.any((x - eps) <= -1.0):
k, = np.nonzero((x - eps) <= -1.0)
y[k] = A - np.log(2.0)
#-- Deal with special case x == +1
if np.any((x + eps) >= 1.0):
k, = np.nonzero((x + eps) >= 1.0)
y[k] = np.pi*(1.0/np.tan(v*np.pi)) + 2.0*(np.euler_gamma +
scipy.special.psi(1.0+v)) - np.log(2.0)
#-- use only the real part (remove insignificant imaginary noise)
y = np.real(y)
return y
#-- Calculate Legendre function of the first kind for arbitrary degree v
def Pv(x,v):
P = np.zeros_like(x, dtype=v.dtype)
for i, val in enumerate(x):
if (val == -1):
p = np.inf
else:
#-- use compiled Cython version of PvQv (PvQv_C.so from PvQv_C.pyx)
p,q,k = PvQv_C(val, v)
# p,q,k = PvQv(val, v)
P[i] = p
return P
#-- Calculate generalized Legendre functions of arbitrary degree v
#-- Based on recipe in "An Atlas of Functions" by Spanier and Oldham, 1987 (589)
#-- Pv is the Legendre function of the first kind
#-- Qv is the Legendre function of the second kind
def PvQv(x, v):
iter = 0
if (x == -1):
P = -np.inf
Q = -np.inf
elif (x == +1):
P = 1.0
Q = np.inf
else:
#-- set a and R to 1
a = 1.0
R = 1.0
K = 4.0*np.sqrt(np.abs(v - v**2))
if ((np.abs(1 + v) + np.floor(1 + v.real)) == 0):
a = 1.0e99
v = -1.0 - v
#-- s and c = sin and cos of (pi*v/2.0)
s = np.sin(0.5*np.pi*v)
c = np.cos(0.5*np.pi*v)
w = (0.5 + v)**2
#-- if v is less than or equal to six (repeat until greater than six)
while (v.real <= 6.0):
v += 2
R = R*(v - 1.0)/v
#-- calculate X and g and update R
X = 1.0 / (4.0 + 4.0*v)
g = 1.0 + 5*X*(1.0 - 3.0*X*(0.35 + 6.1*X))
R = R*(1.0 - X*(1.0 - g*X/2))/np.sqrt(8.0*X)
#-- set g and u to 2.0*x
g = 2.0*x
u = 2.0*x
#-- set f and t to 1
f = 1.0
t = 1.0
#-- set k to 1/2
k = 0.5
#-- calculate new X
X = 1.0 + (1e8/(1.0 - x**2))
#-- update t
t = t*x**2 * (k**2.0 - w)/((k + 1.0)**2 - 0.25)
#-- add 1 to k
k += 1.0
#-- add t to f
f += t
#-- update u
u = u*x**2 * (k**2 - w)/((k + 1)**2 - 0.25)
#-- add 1 to k
k += 1.0
#-- add u to g
g += u
#-- if k is less than K and |Xt| is greater than |f|
#-- repeat previous set of operations until valid
while ((k < K) | (np.abs(X*t) > np.abs(f))):
iter += 1
t = t*x**2 * (k**2.0 - w) / ((k + 1.0)**2 - 0.25)
k += 1.0
f += t
u = u*x**2 * (k**2.0 - w) / ((k + 1.0)**2 - 0.25)
k += 1.0
g += u
#-- update f and g
f += (x**2*t/(1.0 - x**2))
g += (x**2*u/(1.0 - x**2))
#-- calculate generalized Legendre functions
P = ((s*g*R) + (c*f/R))/np.sqrt(np.pi)
Q = a*np.sqrt(np.pi)*((c*g*R) - (s*f/R))/2.0
#-- return P, Q and number of iterations
return (P, Q, iter)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, print_function
import unittest
from . import mocks
class OAuth1ProviderTest(unittest.TestCase):
def test_name(self):
provider = mocks.MockOAuth1Provider(**mocks.OAUTH1_CREDENTIALS)
self.assertEqual('mockoauth1provider', provider.name)
self.assertEqual('Mock OAuth1 Provider', provider.verbose_name)
self.assertEqual('Mock OAuth1 Provider', str(provider))
def test_get_token(self):
provider = mocks.MockOAuth1Provider(token=mocks.OAUTH1_VALID_TOKEN_DICT, **mocks.OAUTH1_CREDENTIALS)
self.assertDictEqual(mocks.OAUTH1_VALID_TOKEN_DICT, provider.get_token())
@mocks.patch_responses(mocks.OAUTH1_REQUEST_TOKEN_RESPONSE_1)
def test_get_request_token(self, responses):
provider = mocks.MockOAuth1Provider(**mocks.OAUTH1_CREDENTIALS)
request_token = provider.get_request_token('https://example.org/callback')
self.assertEqual(mocks.OAUTH1_REQUEST_TOKEN, request_token)
def test_get_authorization_url(self):
provider = mocks.MockOAuth1Provider(**mocks.OAUTH1_CREDENTIALS)
authorization_url = provider.get_authorization_url(mocks.OAUTH1_REQUEST_TOKEN)
self.assertEqual(mocks.OAUTH1_GET_AUTHORIZATION_URL_EXPECTED_RESULT, authorization_url)
@mocks.patch_responses(mocks.OAUTH1_ACCESS_TOKEN_RESPONSE_1)
def test_get_access_token(self, responses):
provider = mocks.MockOAuth1Provider(**mocks.OAUTH1_CREDENTIALS)
access_token = provider.get_access_token('https://example.org/callback?oauth_verifier=verifier&oauth_token=token', mocks.OAUTH1_REQUEST_TOKEN)
self.assertEqual(mocks.OAUTH1_ACCESS_TOKEN, access_token)
@mocks.patch_responses(mocks.OAUTH1_REQUEST_RESPONSE)
def test_request(self, responses):
provider = mocks.MockOAuth1Provider(token=mocks.OAUTH1_VALID_TOKEN_DICT, **mocks.OAUTH1_CREDENTIALS)
response = provider.request('https://example.org/profile')
self.assertDictEqual(mocks.OAUTH1_REQUEST_EXPECTED_RESULT, response.json())
@mocks.patch_responses(mocks.OAUTH1_REQUEST_RESPONSE)
def test_get_profile(self, responses):
provider = mocks.MockOAuth1Provider(token=mocks.OAUTH1_ACCESS_TOKEN, **mocks.OAUTH1_CREDENTIALS)
self.assertDictEqual(mocks.OAUTH1_GET_PROFILE_EXPECTED_RESULT, provider.get_profile())
@mocks.patch_responses(mocks.OAUTH1_REQUEST_TOKEN_RESPONSE_1, mocks.OAUTH1_ACCESS_TOKEN_RESPONSE_1)
def test_oauth1_dance(self, responses):
provider = mocks.MockOAuth1Provider(**mocks.OAUTH1_CREDENTIALS)
dance = provider.dance({}, 'https://example.org/callback')
authorization_url = dance.get_authorization_url()
self.assertEqual(mocks.OAUTH1_GET_AUTHORIZATION_URL_EXPECTED_RESULT, authorization_url)
token = dance.get_access_token('https://example.org/callback?oauth_verifier=verifier&oauth_token=token')
self.assertDictEqual(mocks.OAUTH1_ACCESS_TOKEN, token)
|
'''
Q. Given a linkedlist head , find whether the linkedlist has a cycle or not.
Hint: Similiar to finding the middle of the node using 2 nodes
'''
def hasCycle(self, head: ListNode) -> bool:
slow_node = fast_node = head
if head == None:
return False
else:
while(fast_node.next != None and fast_node.next.next != None):
fast_node = fast_node.next.next
slow_node = slow_node.next
if fast_node == slow_node:
return True
return False |
from .document import Document
class SocialDocument(Document):
"""Object representation of a supporting social document.
Social documents are normally URLs to social media profiles that help
verify the user's identity.
https://docs.synapsepay.com/docs/user-resources#section-social-document-types
"""
@classmethod
def create(cls, base_document, type=None, value=None):
"""Add a SocialDocument to the BaseDocument.
Args:
type (str): https://docs.synapsepay.com/docs/user-resources#section-social-document-types
value (str): url to social media profile, for example
Returns:
SocialDocument: a new SocialDocument instance
"""
payload = cls.payload_for_create(type, value)
base_doc = base_document.update(social_documents=[payload])
social_doc = [doc for doc in base_doc.social_documents
if doc.type == type][0]
return social_doc
|
"""
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
YOUR DESCRIPTION HERE
"""
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
class BreakoutGraphics:
def __init__(self, ball_radius = BALL_RADIUS, paddle_width = PADDLE_WIDTH,
paddle_height = PADDLE_HEIGHT, paddle_offset = PADDLE_OFFSET,
brick_rows = BRICK_ROWS, brick_cols = BRICK_COLS,
brick_width = BRICK_WIDTH, brick_height = BRICK_HEIGHT,
brick_offset = BRICK_OFFSET, brick_spacing = BRICK_SPACING,
title='Breakout'):
# Create a graphical window, with some extra space
window_width = brick_rows * (brick_width + brick_spacing) - brick_spacing
window_height = brick_offset + 3 * (brick_cols * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=window_width, height=window_height, title=title)
# Create a paddle
self.paddle = GRect(paddle_width, paddle_height)
self.paddle.filled = True
self.window.add(self.paddle, (window_width-paddle_width)/2, window_height-paddle_offset-paddle_height)
# Center a filled ball in the graphical window
self.ball = GOval(ball_radius*2, ball_radius*2)
self.ball.filled = True
self.window.add(self.ball, window_width/2-ball_radius, window_height/2-ball_radius)
# Default initial velocity for the ball
self.__dx, self.__dy = self.v_set()
# Initialize our mouse listeners
self.__game_start = False # 判斷遊戲是否開始
onmousemoved(self.mouse_move)
onmouseclicked(self.on_set)
# Draw bricks
self._brick_offset = brick_offset
self._brick_width = brick_width
self._brick_height = brick_height
self._brick_spacing = brick_spacing
self.brick_rows = brick_rows
self.brick_cols = brick_cols
self.__build_brick_col()
# Create score board
self.score = 0
self.score_board = GLabel(f"Score : {self.score}")
self.score_board.font = "Comic Sans MS-15"
self.window.add(self.score_board, 3, self.score_board.height+15)
@staticmethod
def v_set():
"""
設定速度
:return:(dx, dy), int
"""
vx = random.randint(1, MAX_X_SPEED)
if random.random() > 0.5:
vx *= -1
vy = INITIAL_Y_SPEED
return vx, vy
def get_speed(self):
"""
回傳給user端dx和dy
:return: (dx, dy), int
"""
return self.__dx, self.__dy
def game_start(self):
"""
回報遊戲是否開始
"""
return self.__game_start
def mouse_move(self, mouse):
position = mouse.x - self.paddle.width / 2 # 由滑鼠位置反推paddle的位置
if position <= 0:
self.paddle.x = 0
elif position >= self.window.width-self.paddle.width:
self.paddle.x = self.window.width-self.paddle.width
else:
self.paddle.x = position
def on_set(self, mouse):
self.__game_start = True # 回傳遊戲開始
def __build_brick_col(self):
"""
由上至下建造所有的brick
"""
for i in range(0, self.brick_cols):
color = i * 255 // (self.brick_cols + 1) # 設定灰階顏色
self.__build_brick_row(self._brick_offset + i * (self._brick_spacing + self._brick_height),
color)
def __build_brick_row(self, build_height, gray_scale):
"""
build_height: int, 建造的高度
gray_scale: int, RGB值
"""
for i in range(0, self.brick_rows):
a_brick = self.__build_a_brick(gray_scale)
self.window.add(a_brick, i*(self._brick_width+self._brick_spacing), build_height)
def __build_a_brick(self, gray_scale):
"""
gray_scale: int, RGB值
:return: obj, 建造好的brick
"""
brick = GRect(self._brick_width, self._brick_height)
brick.filled = True
brick.color = (gray_scale, gray_scale, gray_scale)
brick.fill_color = (gray_scale, gray_scale, gray_scale)
return brick
def reset_ball_position(self):
"""
把球重置到畫面中央,速度重設
"""
self.ball.x = (self.window.width-self.ball.width)/2
self.ball.y = (self.window.height-self.ball.height)/2
self.__dx, self.__dy = self.v_set()
def hit_paddle(self):
"""
判斷是否打到paddle
:return: boolean
"""
maybe_paddle = self.window.get_object_at(self.ball.x, self.ball.y+self.ball.height)
if maybe_paddle is not None and maybe_paddle == self.paddle:
return True
else:
maybe_paddle = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y + self.ball.height)
if maybe_paddle is not None and maybe_paddle == self.paddle:
return True
else:
return False
def rebound_x(self):
"""
將dx變號
"""
self.__dx *= -1
def rebound_y(self):
"""
將dy變號
"""
self.__dy *= -1
def hit_block(self):
"""
判斷是否打到block,並且設定打到磚塊後的反彈速度
:return: boolean
"""
# 判斷是否打到磚塊
maybe_block = self.window.get_object_at(self.ball.x, self.ball.y)
if maybe_block is None or maybe_block == self.paddle or maybe_block == self.score_board:
maybe_block = self.window.get_object_at(self.ball.x+self.ball.width, self.ball.y)
if maybe_block is None or maybe_block == self.paddle or maybe_block == self.score_board:
maybe_block = self.window.get_object_at(self.ball.x, self.ball.y+self.ball.height)
if maybe_block is None or maybe_block == self.paddle or maybe_block == self.score_board:
maybe_block = self.window.get_object_at(self.ball.x+self.ball.width, self.ball.y + self.ball.height)
if maybe_block is None or maybe_block == self.paddle or maybe_block == self.score_board:
return False
# 判斷打到磚塊的何處,計算反彈
ball_on_top = maybe_block.y - self.ball.y > self.ball.height-INITIAL_Y_SPEED and maybe_block.y - self.ball.y <= self.ball.height
ball_under_buttom = self.ball.y-maybe_block.y > self._brick_height-INITIAL_Y_SPEED and self.ball.y-maybe_block.y <= self._brick_height
ball_is_left = maybe_block.x - self.ball.x > self.ball.width-abs(self.__dx) and maybe_block.x - self.ball.x <= self.ball.width
ball_is_right = self.ball.x-maybe_block.x > self._brick_width-abs(self.__dx) and self.ball.x-maybe_block.x <= self._brick_width
if ball_on_top or ball_under_buttom:
self.rebound_y()
if ball_is_left or ball_is_right: # 也可用elif會比較正常,因為大部分是打到磚塊上下
self.rebound_x()
self.window.remove(maybe_block)
self.score += 10 # 打到磚塊後的得分
return True
|
import yaml
import json
from jinja2 import Template
if __name__ == "__main__":
index = "index.html"
with open("config.yaml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# print(json.dumps(config, indent=4))
with open("template.html", 'r') as f:
template = Template(f.read())
with open("index.html", 'w') as f:
f.write(template.render(**config)) |
# Skin info and colours
theme_name = ""
theme_author = ""
theme_version = ""
theme_bio = "" # A long bio will get cut off, keep it simple.
window_theme = "Black"
button_colour = "black"
attacks_theme = {"background": "Black", "button_colour": ('white', 'firebrick4')}
banner_size = (600, 100)
banner_padding = ((45, 10), 0)
# Command Line colours
menu1 = "red"
menu2 = "yellow"
# Button/Banner Images (Base64)
rtb_icon = b''
rtb_banner = b''
button_joiner = b''
button_leaver = b''
button_group_leaver = b''
button_token_checker = b''
button_checker_v2 = b''
button_message_spammer = b''
button_ascii_spammer = b''
button_mass_mentioner = b''
button_role_mass_mentioner = b''
button_vc_spammer = b''
button_dm_spammer = b''
button_friend_bomber = b''
button_group_spammer = b''
button_image_spammer = b''
button_status_changer = b''
button_nickname_changer = b''
button_embed_spammer = b''
button_avatar_changer = b''
button_server_cleaner = b''
button_hypesquad_changer = b''
button_reaction_adder = b''
button_plugins = b''
|
import pandas as pd
def read_kinoeva(input_file):
"""
Arguments:
input_file - path to the text file
Returns:
A pandas dataframe containing the data
"""
# df will have local scope
df = pd.read_csv(input_file, delim_whitespace=True, index_col=0)
# drop last column
df.drop(columns=['export'], inplace=True)
# replace column names
df.columns = ['X', 'Y']
# remove first row
df.drop(['#T'], inplace=True)
# convert data to numeric
df = df.apply(pd.to_numeric, errors='coerce', axis=1)
# rename index
df.index.name = 'T'
# save the original timestamp as another column
df['Original_timestamp'] = df.index
# time units are in h:mm:ss:hundredth
df.index = pd.to_timedelta(df.index.str.replace(r'(0:.*):(\d+)', r'0\1.\2'), errors='coerce')
return df |
"""
File: maskify_dataset.py
Usage: Given a dataset that contains bounding box annotations and positive and negative
histograms, we can derive the segmentation, or shape, of the object of interest
and record those segmentation values in a COCO formatted JSON data structure
"""
__author__ = "Brent Redmon"
__copyright__ = "Copyright 2019, Texas State University"
__credits__ = ["Brent Redmon", "Nicholas Warren"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = ["Brent Redmon", "Nicholas Warren"]
__email__ = "btr26@txstate.edu"
__status__ = "Production"
from PIL import Image
from sklearn.cluster import MeanShift, estimate_bandwidth
from skimage.filters import gaussian
from skimage.measure import label
import matplotlib.pyplot as plt
import numpy as np
from math import floor
from scipy.ndimage.morphology import binary_dilation, binary_fill_holes
import json
from pycocotools import mask
from skimage import measure
import os
from progress.bar import IncrementalBar
def print_report(reports, config):
print("Printing reports for each set processed:\n\n")
print("Dataset: ".format(config["dataset"]))
for subset_report in reports:
print("Subset: ".format(subset_report["name"]))
print("Annotation file save directory: ".format(subset_report["annotation_directory"]))
print("Processed {} images out of {}\n".format(subset_report["processed_images"], subset_report["total_images"]))
def maskify( im,
crop_box,
threshold,
positive_histogram,
negative_histogram,
threshold
):
""" For each annotation, create a COCO formated segmentation
Arguments:
- im: The input image
- crop_box: Tuple of coordinates isolating object of interest
- threshold: Percentage value representing liklihood of a pixel belonging
to the positive histogram class
- positive_histogram: Histogram representing pixels pertaining to an object
- negative_histogram: Histogram representing non-example pixels pertaining to
an object
Return: Array of COCO styled segmentation annotations
"""
# Get the size of the image
original_rows, original_cols = im.size
# Crop the image around the bounding box
im = im.crop(crop_box)
# Load pixel RGB data
pix = im.load()
# Get row and cols of cropped image
cols, rows = im.size
# Convert cropped image to numpy array
im = np.array(im)
# Get the height and width of the cropped image
rows = np.shape(im)[0]
cols = np.shape(im)[1]
# Get histogram bins
histogram_bins = np.shape(positive_histogram)[0]
# Get the factor based on the histogram bins. Used to index into to the histogram.
factor = 256 / histogram_bins
# Declare a results numpy array that contains only zeros
result = np.zeros((rows, cols))
# Determine the probability of water given RGB and histograms representing water and non water
for row in range(rows):
for col in range(cols):
# Get each RGB value
red = float(pix[col, row][0])
green = float(pix[col, row][1])
blue = float(pix[col, row][2])
# Get the index into histograms based on RGB value and histogram factor size (declared above)
red_index = floor(red / factor)
green_index = floor(green / factor)
blue_index = floor(blue / factor)
# Get positive and negative values from histograms
positive = positive_histogram[red_index, green_index, blue_index]
negative_value = negative_histogram[red_index, green_index, blue_index]
total = positive + negative
if total is not 0:
result[row, col] = water_value / total
# Set threshold equal to the median value of the resulting numpy array if
threshold = np.median(result) if threshold is 'auto' else threshold
# The intuition here is that if our threshold is equal to the median value of the resulting
# array, then there will be a largest connected component. Any other value, and we're risking
# the possibility of no largest connected component existing, which is a potential error that we
# have to account for.
if threshold != np.median(result):
result_backup = np.copy(result)
# Parse values of result given threshold
for row in range(rows):
for col in range(cols):
if result[row, col] < threshold:
result[row, col] = 1
else:
result[row, col] = 0
# Retry if all values in result are 0 (ie - no largest connected component)
if np.sum(result) == 0:
result = result_backup
for row in range(rows):
for col in range(cols):
if result[row, col] < np.median(result):
result[row, col] = 1
else:
result[row, col] = 0
# Get the largest connected component
labels = label(result)
assert( labels.max() != 0 ) # assume at least 1 CC
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1
# Fill holes in the boat
largestCC = binary_fill_holes(largestCC)
# Dialate to expand the mask
largestCC = binary_dilation(largestCC, iterations=4)
plt.imshow(largestCC)
# Create numpy zeros array the same size as the original image before cropping
image_with_mask = np.zeros((original_cols, original_rows))
# Overlay binary mask onto zeros array
image_with_mask[crop_box[1]:crop_box[1] + rows, crop_box[0]:crop_box[0] + cols] = largestCC
""" Convert the binary mask to COCO JSON format. Code referenced from:
- https://github.com/cocodataset/cocoapi/issues/131#issuecomment-371250565
"""
image_with_mask = np.array(image_with_mask, dtype=np.uint8)
fortran_ground_truth_binary_mask = np.asfortranarray(image_with_mask)
encoded_ground_truth = mask.encode(fortran_ground_truth_binary_mask)
ground_truth_area = mask.area(encoded_ground_truth)
ground_truth_bounding_box = mask.toBbox(encoded_ground_truth)
contours = measure.find_contours(image_with_mask, 0.5)
segmentations = []
for contour in contours:
contour = np.flip(contour, axis=1)
segmentation = contour.ravel().tolist()
segmentations.append(segmentation)
return segmentations
if __name__ == "__main__":
# Report data structure to display at the end
report = list()
# Load the config file
maskify_config = json.load(open("configs/maskify_config.json"))
dataset = maskify_config["dataset"]
""" A subset in this case is typically the following:
- Test set
- Train set
- Validation set
"""
for subset in maskify_config["subsets"]:
subset_report = {
"name": subset
"annotation_directory": "",
"processed_images": 0,
"total_images": ""
}
# Assign metadata to variables
coco_json = json.load(open("datasets/{}/train/annotations.json".format(dataset)))
# What do I need to pass into the function?
# - Image file name
# - Bounding box (original segmentation)
# - Threshold (usually 0.48)
# - Positive and negative histograms
# Initialize progress bar
bar = IncrementalBar("Processing {} images".format(subset), max = len(coco_json["annotations"]))
# Log the number of images being processed
subset_report["total_images"] = len(coco_json["annotations"])
print("\n\n")
for annotation in coco_json["annotations"]:
# Find the picture related to the image
for image in coco_json["images"]:
if annotation["image_id"] == image["id"]:
this_image = image["file_name"]
image_filename = image["file_name"]
break
# Open image
this_image = Image.open("datasets/{}/{}/images/{}".format(dataset, subset, this_image))
# Get crop boundary
crop_box = (annotation["segmentation"][0][0], annotation["segmentation"][0][1], annotation["segmentation"][0][4], annotation["segmentation"][0][5])
# Set threshold
threshold = maskify_config["threshold"]
# Load positive and negative histograms
positive_histogram = np.load(maskify_config["positive_histogram"])
negative_histogram = np.load(maskify_config["negative_histogram"])
try:
annotation["segmentation"] = maskify(this_image, crop_box, threshold, RGB_Water_Histogram, RGB_Non_Water_Histogram)
subset_report["processed_images"] = subset_report["processed_images"] + 1
except Exception as e:
print("Could not process training image: {} -- {}".format(image_filename, e))
bar.next()
bar.finish()
print("Saving {} annotations...".format(subset))
# Set the save directory too the "annotation_directory" index of subset_report
subset_report["annotation_directory"] = "datasets/{}/{}/annotations_maskified.json".format(dataset, subset)
with open("datasets/{}/{}/annotations_maskified.json".format(dataset, subset), "w") as outfile:
json.dump(coco_json, outfile)
# Append the report to the master report list
report.append(subset_report)
print_report(report, maskify_config) |
## A SIMPLE SCIENTIFIC CALCULATOR, LEVERING THE MATH CLASS,
## WITH UNLIMITED DEFINABLE VARIABLES, A RUNNING LOG OF ENTRIES
import math
from stack import Stack
class TraceStack(Stack):
def __init__(self):
self.limit = 1000
self.stack = []
def set_limit (self,limit):
self.limit = limit
def add (self,value):
if len(self.stack)<self.limit:
self.stack.append(value)
else:
self.stack.pop(0)
self.stack.append(value)
class Register:
""" Class for storing and accessing variables and constants"""
def __init__ (self):
self.variables = {}
self.constants = {'pi':math.pi,
'e':math.e,
'tau':math.tau,
'inf':math.inf,
'nan':math.nan}
def get (self, name):
if name in self.variables:
return self.variables[name]
if name in self.constants:
return self.constants[name]
def set (self, name,value):
if name not in self.constants:
self.variables[name] = value
def contains (self,name):
return name in self.variables or name in self.constants
class Program:
def __init__ (self,break_off=10000):
self.program_lines = []
self.line_directory = {}
self.program_blocks = {}
self.reverse_blocks = {}
self.variables = Register()
self.calculator = Calculator(register=self.variables)
self.logic = Logic(oracle = self)
self.trace_stack = TraceStack()
self.break_off = break_off
self.ccalc = self.calculator.calculate
def contains (self,x):
return x in self.variables.variables
def get (self,x):
""" Evaluates expressions that are either true or false"""
COMPTERMS = ['==','>=','<=','!=','>','<',]
def contains_comp (x):
"""True is x contains any of the COMP Terms"""
for comp in COMPTERMS:
if comp in x:
return True
return False
def comp_split (phrase):
""" Splits the phrase into a list of terms to be compared"""
level = 0
phrase = list(phrase)
for index, x in enumerate(phrase):
if 'x' == '(':
level += 1
elif 'x' == ')':
level -= 1
if level == 0:
found = False
for comp in COMPTERMS:
if len(comp) == 2 and x == comp[0] and phrase[index+1]==comp[1]:
phrase[index] = '#'+comp[0]
phrase[index+1] = comp[1]+'#'
found = True
elif not found and len(comp) == 1 and x == comp:
phrase[index] = '#'+x+'#'
phrase = ''.join(phrase).split('#')
newphrase = []
for x in phrase:
if x in COMPTERMS:
newphrase.append(x)
else:
newphrase.append(self.calculator.calculate(x))
return newphrase
def evaluate_comp_list (phrase):
def compare (a,R,b):
if R == '==':
return a==b
elif R == '!=':
return a!=b
elif R == '>':
return a>b
elif R == '<':
return a<b
elif R == '>=':
return a>=b
elif R == '<=':
return a<=b
def get_triads (phrase):
triads = []
for count, x in enumerate(phrase):
if count % 2 == 0 and count+2 < len(phrase):
triads.append((phrase[count],phrase[count+1],phrase[count+2]))
return triads
comp_results = []
for x in get_triads(phrase):
if not compare(x[0],x[1],x[2]):
return False
return True
if x in self.variables.variables:
val = self.variables.variables[x]
return val
else:
if contains_comp(x):
phrase = comp_split(x)
if contains_comp(phrase):
return evaluate_comp_list(phrase)
else:
phrase = [self.logic.interpret(self.logic.parse(x)) for x in phrase]
for phr in phrase:
if not phr:
return False
return True
elif x in ['True','False','bTrue','bFalse']:
return {'True':True,
'False':False,
'bTrue':True,
'bFalse':False}[x]
else:
x=self.calculator.calculate(x)
if not x:
return False
return True
def read_line (self,x):
x = x.strip()
if not x:
return False, False, False, False
x=list(x)
lastspace = False
spacecount = 0
p_level = 0
q_level = 0
first_q = ''
for index, char in enumerate(x):
if char == '(':
p_level +=1
if char == ')':
p_level -=1
if char == "'" and not first_q:
q_level +=1
first_q = char
if char == "'" and first_q == "'":
q_level -=1
first_q = ''
if char == '"' and not first_q:
q_level +=1
first_q = char
if char == '"' and first_q =='"':
q_level -= 1
first_q = ''
if not lastspace and spacecount < 4 and char == ' ' and p_level == 0 and q_level == 0:
lastspace = True
spacecount +=1
x[index] = '<#SPACING#>'
elif lastspace and char == ' ':
pass
elif char != ' ':
lastspace = False
x = ''.join(x)
x = x.split('<#SPACING#>')+[None, None, None]
return int(x[0]), x[1], x[2], x[3]
def load (self,script):
def sis(x):
if isinstance(x,str):
return x.strip()
return x
for line in script.split('\n'):
print(line)
if line.strip() and line.strip()[0] == '#':
pass
line_no, command, value1, value2 = self.read_line(line)
if line_no:
index = len(self.program_lines)
self.line_directory[line_no] = index
self.program_lines.append((command,
sis(value1),
sis(value2)))
def find_blocks (self):
indexes = Stack()
for counter, line in enumerate(self.program_lines):
if line[0] == 'WHILE':
indexes.add(counter)
elif line[0] == 'ENDWHILE':
x = indexes.pop()
self.program_blocks [counter] = x
self.reverse_blocks [x] = counter
def run (self):
def xprint (x,end=''):
if isinstance(x,float):
try:
print(int(x),end=end)
except:
print(x,end=end)
else:
print(x,end=end)
self.trace_stack = TraceStack()
line_counter = 0
iteration_counter = 0
while (self.break_off==0 or iteration_counter<self.break_off) and line_counter < len(self.program_lines):
iteration_counter+=1
line = self.program_lines[line_counter]
if line:
self.trace_stack.add('; '.join([str(x) for x in line if x]))
line_counter += 1
command, value1, value2 = line[0], line[1], line[2]
if not value1 is None:
if value2 is None:
terms = 1
else: terms = 2
else:
terms = 0
if value1 == '=':
command = command + value1 + value2
if '=' in command:
subject = command.split('=')[0]
predicate = self.calculator.calculate(command.split('=')[1])
self.variables.set(subject,predicate)
elif command in ['IS',
'GOTO',
'WHILE',
'ENDWHILE',
'PRINT',
'IFSKIP',
'PRINTLINE']:
def debracket (x):
if isinstance(x,str):
if x[0]+x[-1] in ['""',"''"]:
return x[1:-1]
return x
if command == 'IS' and terms == 2:
self.variables.set(value1,self.calculator.calculate(value2))
self.trace_stack.add(str(line_counter)+' IS:'+value1)
elif command == 'GOTO':
line_counter = self.line_directory[int(value1)]
self.trace_stack.add(str(line_counter)+' GOTO:'+value1+'/'+str(line_counter))
elif command == 'WHILE':
if not self.get(value1):
line_counter = self.reverse_blocks[line_counter-1]+1
self.trace_stack.add(str(line_counter)+' TERMINATES WHILE')
self.trace_stack.add(str(line_counter)+' GOTO '+str(line_counter))
self.trace_stack.add(str(line_counter)+' WHILE CONTINUES')
elif command == 'ENDWHILE':
line_counter = self.program_blocks[line_counter-1]
self.trace_stack.add(str(line_counter)+' WHILE RETURN')
self.trace_stack.add(str(line_counter)+' GOTO '+str(line_counter))
elif command == 'IFSKIP':
if self.get(value1):
if value2 is None:
value2 = 1
else:
value2 = self.calculator.calculate(int(value2))
line_counter += value2
self.trace_stack.add(str(line_counter)+' IFSKIP TRUE SKIPPING'+str(value2))
self.trace_stack.add(str(line_counter)+' IFSKIP FALSE')
elif command == 'PRINTLINE':
xprint(debracket(self.calculator.calculate(value1)),end='\n')
self.trace_stack.add(str(line_counter)+' PRINTLINE ')
elif command == 'PRINT':
xprint(debracket(self.calculator.calculate(value1)),end='')
self.trace_stack.add(str(line_counter)+' PRINT ')
elif command == 'END':
self.trace_stack.add(str(line_counter)+' PROGRAM TERMINATED')
break
## except:
## print('EXCEPT')
## self.trace_stack.add(str(line_counter)+' LINE EXCEPTION')
## line_counter += 1
##
## except:
## self.trace_stack.add(str(line_counter)+' PROGRAM EXCEPTION')
def interpret (self,x=None,break_off=10000):
self.__init__(break_off=break_off)
self.load(x)
self.find_blocks()
self.run()
class Logic:
def __init__ (self,oracle=None):
self.oracle = oracle
def contains (self,phrase,chars):
"""Returns TRUE if <phrase> contains ANY one of <chars>"""
for x in chars:
if x in phrase:
return True
return False
def bracketed (self,phrase):
"""Returns TRUE if <phrase> is encompassed by a left bracket and a right bracket
at the same hierarchical level"""
level = 0
left_point = None
right_point = None
for count,char in enumerate(phrase):
if char == '(':
if level == 0:
left_point = count
level+=1
if char == ')':
level-=1
if level == 0:
right_point = count
if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:
return True
return False
def split_into_phrases (self, phrase):
"""Inputs a string and returns a list containing elemenets, split according to parsing rules.
IF the list is of elements to be combined with AND, then no header.
If the list if of elements to be combined with OR, then '@' at the head of the list.
"""
if not self.contains(phrase,'()'):
#For a phrase without parantheses
if '|' in phrase:
return ['@']+[x for x in phrase.split('|')]
elif '&' in phrase:
return [x for x in phrase.split('&')]
#If the phrase contains parantheses.
phrase = list (phrase)
#convert string into a list of chars
level = 0
found = False # if one of the operators is found in the phrase
for operator in ['#','>','|','&']:
level = 0 # reset level
if not found:
for x,char in enumerate(phrase):
if char == '(':
level += 1
if char == ')':
level -=1
# level indicates level within hierarchy established by parantheses
if level == 0 and x+1 < len(phrase) and phrase[x+1] == operator:
phrase[x+1] = '<<'+operator+'>>'
found = True
break
if '<<&>>' in phrase:
# For AND
phrases = ''.join(phrase).split('<<&>>')
elif '<<|>>' in phrase:
# For OR
phrases = ['@']+''.join(phrase).split('<<|>>')
elif '<<>>>' in phrase:
# For INFERENCE
premise = ''.join(phrase).split('<<>>>')[0]
conclusion = ''.join(phrase).split('<<>>>')[1]
phrases = ['@','~'+premise,conclusion]
# A => B translated as ~A OR B
elif '<<#>>' in phrase:
# FOR EQUIVALENCY
premise = ''.join(phrase).split('<<#>>')[0]
conclusion = ''.join(phrase).split('<<#>>')[1]
phrase1 = '~'+'('+premise+'&'+'~'+conclusion+')'
phrase2 = '~'+'('+conclusion+'&'+'~'+premise+')'
phrases = [phrase1,phrase2]
# A<>B translated as (~A or B) & (~B or A)
return [x for x in phrases]
def all_is_P (self,phrase,predicate_function=None):
"""Returns TRUE if <predicate_function> is TRUE of
every element in <phrase>"""
returnvalue = True
for x in phrase:
if not predicate_function(x):
returnvalue = False
return returnvalue
def is_simple (self, phrase):
"""Returns TRUE if <phrase> is a simple name, i.e. a variable"""
return not self.contains(phrase,'()&|>#')
def is_bool (self, phrase):
"""Returns TRUE if <phrase> is boolean."""
return isinstance(phrase,bool)
def and_sum (self, phrase):
"""Returns TRUE iff every element in <phrase> is TRUE"""
for x in phrase:
if not x:
return False
return True
def or_sum (self, phrase):
"""Returns TRUE iff one element in <phrase> is TRUE"""
for x in phrase:
if x:
return True
return False
def heading_count(self, phrase,char='~'):
"""Returns the number of negating prefixes in <phrase> and the <phrase> shorn of prefixes."""
count = 0
for x in phrase:
if x != char:
break
count+=1
return count,phrase[count:]
def parse (self, phrase):
"""The primary recursive parsing function"""
if isinstance(phrase,str):
#If the phrase is a string
if self.is_simple(phrase):
#EXITS the recursion
if phrase[0:2] == '~~':
return phrase[2:]
#Eliminates negations that cancel each other
return phrase
elif self.bracketed(phrase):
#Eliminate top-level parantheses
return self.parse(phrase[1:-1])
elif phrase[0] == '~':
#If the phrase begins with a negating prefix...
negations,phrase = self.heading_count(phrase)
if self.bracketed(phrase):
#If the negated phrase is bracketed
if negations % 2 == 1:
subphrase = self.split_into_phrases(phrase[1:-1])
if subphrase[0] != '@':
#De Morgan's Law
return self.parse(['@']+['~'+x for x in subphrase])
else:
#De Morgan's Law
return self.parse(['~'+x for x in subphrase[1:]])
else:
return self.parse(phrase[1:-1])
return self.parse(self.split_into_phrases((negations%2)*'~'+phrase))
else:
return self.parse(self.split_into_phrases(phrase))
# IF the phrase is a list
if self.all_is_P(phrase,predicate_function=self.is_simple):
#If every terms of the phrase list is simple...
#This prepares for EXIT from recursion
return [self.parse(x) for x in phrase]
return self.parse([self.parse(x) for x in phrase])
def no_or_clauses (self,phrase):
""" Returns TRUE if <phrase> contains no OR lists."""
for x in phrase:
if isinstance(x,list) and x[0] == '@':
return False
return True
def multiply (self,phrase):
"""Recursive function to combine AND or OR lists.
The PRODUCT of OR lists is used to generate the TRUTH TABLE.
"""
if not isinstance(phrase,list):
return phrase
if self.no_or_clauses(phrase):
# IF there are only AND lists at the top level
return [self.multiply(x) for x in phrase]
else:
# For a combination of AND and OR lists
and_clauses = []
or_clauses = []
for x in phrase:
# DIVIDES into AND and OR lists
if isinstance(x,list) and x[0]=='@':
or_clauses.append(x)
else:
and_clauses.append(x)
multiplied_phrases = [and_clauses]
for x in or_clauses:
# Produces the product of two OR lists.
# [A,B][C,D] = [[A,C],[A,D],[B,C],[B,D]]
new_phrases = []
for y in x[1:]:
for z in list(multiplied_phrases):
if not isinstance(z,list):
new_phrases.append([z,y])
else:
new_phrases.append(z+[y])
multiplied_phrases = [self.multiply(x) for x in new_phrases]
return extract_lists(multiplied_phrases)
def interpret (self,phrase):
"""Recursive function interpreting LIST of AND and OR lists containing BOOLEAN values to
yield a BOOLEAN value.
<universe> is the dictionary representing the true facts with reference to which the
value of <phrase> will be calculated."""
if phrase is None:
return phrase
if isinstance(phrase,str):
if phrase=='@':
return '@'
negations,phrase = self.heading_count(phrase)
if not self.oracle.contains(phrase):
# IF the truth value of phrase not defined in universe.
return None
if negations % 2 == 0:
if self.oracle.contains(phrase):
# If no negative prefix, return value of phrase in universe.
return self.oracle.get(phrase)
else:
if self.oracle.contains(phrase):
# If negative prefix...
return not self.oracle.get(phrase)
if isinstance(phrase,bool):
return phrase
elif self.all_is_P(phrase,predicate_function=self.is_bool) or (phrase[0]=='@' and all_is_P(phrase[1:],predicate_function=self.is_bool)):
# If an AND or OR LIST, return TRUE or FALSE for the list.
if phrase[0]=='@':
return self.or_sum(phrase[1:])
else:
return self.and_sum(phrase)
phrase = [x for x in phrase if not (x is None)]
#Eliminate null elements.
if not phrase:
return None
return self.interpret([self.interpret(x,oracle=oracle) for x in phrase])
#Recursively calls function.
class ListType:
def __init__ (self,x=None):
if x is None:
self.list = []
else:
self.list = list(x)
def appends(self,x):
self.list.append(x)
return self.list
def contains(self,x):
return x in self.list
def slices(self,x,y):
return ListType(self.list[x:y])
def fetches(self,x):
return self.list[x]
def __str__ (self):
return ('['+', '.join([str(x) for x in self.list])+']')
def __add__ (self,other):
return ListType(self.list + other.list)
def __len__ (self):
return len(self.list)
class Calculator:
def __init__(self,register = None):
def debracket (x):
if x[0]+x[-1] in ['""',"''"]:
return x[1:-1]
def gcd (x,y):
return math.gcd(int(x),int(y))
def flinput (x):
return float(input(debracket(x)))
def sinput (x):
return '"'+str(input(debracket(x)))+'"'
self.operations = ['+','-','*','/','^','%']
# basic operators in order of evaluation
# functions imported from math
self.functions = {'fact':(math.factorial,1,1),
'abs':(math.fabs,1,1),
'floor':(math.floor,1,1),
'fmod':(math.floor,2,2),
'frexp':(math.frexp,1,1),
'gcd':(gcd,2,2),
'remainder':(math.remainder,2,2),
'trunc':(math.trunc,1,1),
'exp':(math.exp,1,1),
'expm1':(math.expm1,1,1),
'logn':(math.log,1,1),
'logx':(math.log,2,2),
'log1p':(math.log1p,1,1),
'log2':(math.log2,1,1),
'log10':(math.log10,1,1),
'pow':(math.pow,2,2),
'sum':(math.fsum,1,10000),
'acos':(math.acos,1,1),
'asin':(math.asin,1,1),
'atan':(math.atan,1,1),
'atan2':(math.atan2,2,2),
'cos':(math.cos,1,1),
'hypot':(math.hypot,2,10000),
'sin':(math.sin,1,1),
'tan':(math.tan,1,1),
'degrees':(math.degrees,1,1),
'radians':(math.radians,1,1),
'acosh':(math.acosh,1,1),
'asinh':(math.asinh,1,1),
'atanh':(math.atanh,1,1),
'cosh':(math.cosh,1,1),
'sinh':(math.sinh,1,1),
'tanh':(math.tanh,1,1),
'erf':(math.erf,1,1),
'erfc':(math.erfc,1,1),
'gamma':(math.gamma,1,1),
'lgamma':(math.lgamma,1,1),
'neg':(lambda x:-x,1,1),
'inputstring':(sinput,1,1),
'inputfloat':(flinput,1,1),
'list':(lambda x:ListType(x),1,100000000),
'contains':(lambda x,y:x.contains(y),2,2),
'notcontains':(lambda x,y:not x.contains(y),2,2),
'slice':(lambda x,y,z:x.slices(int(y),int(z)),3,3),
'fetch':(lambda x,y:x.fetches(int(y)),2,2),
'append':(lambda x,y:x.appends(y),2,2),
'len':(lambda x:len(x),1,1),
'not':(lambda x:not x,1,1)}
if register is None:
self.current_register = Register()
else:
self.current_register = register
# Initiate register for variables
self.SCRIPT =""" SLOTHcalc
PROGRAMMABLE CALCULATOR
Slow
Ludic
Obscolete
Technology
Has its place
by
Anthony Curtis Adler
OPERATORS = +,/,-,*,%(mod),^(power), ()
RELATIONS == != <= >= < >
FUNCTIONS abs,floor,fmod,frexp,gcd,remainder,
trunc,exp,expml,logn,logx,log1p,log2,log10,
power,sum,acos,asin,atan,atan2,cos,hypot,
sin,tan,degrees,radians,acost,asinh,atanh,cosh,
sing,tanh,erf,erfc,gamma,lgamma,neg
CONSTANTS pi, e, tau, inf, nan, True, False,
bTrue, bFalse (When entered as a string)
NEWCALC:name TO OPEN UP A NEW CALCULATOR
NEWPROGRAM:name TO WRITE A NEW PROGRAM
(The name 'main' is reserved for the default calculator)
IN THE CALCULATOR MODE:
Any alphanumeric phrase can serve as a variable.
To return an entry from the log, type @line@.
ALL to show the log
IN THE PROGRAMMABLE MODE:
SLOTH is basically a very basic basic.
COMMANDS INCLUDE:
PRINT expression
PRINTLINE expression
GOTO line#
WHILE conditionalexpression
ENDWHILE
IFSKIP conditionalexpression /If TRUE, then skip the next line
END
IS /ALTERNATIVE FORM
/OF VARIABLE ASSIGNMENT
/YOU can also use '='
SPECIAL FUNCTIONS
inputfloat('PROMPT')
inputstring('PROMPT')
ENVIRONMENT COMMANDS
line numbers can be entered explicitly or implicity
RUN:iterations to run a program for the first time.
/0 iterations for NO LIMIT
RERUN to run again without reinterpreting
CLEAR to clear program
ALL to show program
CALC:name to return to calc mode
TRACE to show the trace stack
DELETE:line to delete a line
EXAMPLES
1) HELLO WORLD
10 PRINT 'HELLO WORLD
2) CALCULATE FIBONACCI NUMBERS
10 PRINTLINE 'FIBONACCI'
20 LIMIT = inputfloat('LIMIT?')
30 OLD=0
40 NEW=1
50 COUNTER = 1
60 WHILE COUNTER<LIMIT
70 NEWER=OLD+NEW
80 OLD=NEW
90 NEW=NEWER
100 COUNTER=COUNTER+1
110 PRINT 'VALUE='
120 PRINTLINE NEW
130 ENDWHILE
140 PRINTLINE 'LIMIT'
150 PRINT LIMIT
MIT License
Copyright (c) 2020 Anthony Curtis Adler
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def calculate (self,phrase):
"""Core routine for parsing and evaluating phrase"""
def bracketed (phrase,bracketing='()'):
"""Returns TRUE if <phrase> is encompassed by a left bracket and a right bracket
at the same hierarchical level"""
level = 0
left_point = None
right_point = None
for count,char in enumerate(phrase):
if char == bracketing[0]:
if level == 0:
left_point = count
level+=1
if char == bracketing[1]:
level-=1
if level == 0:
right_point = count
if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:
return True
return False
def quoted (phrase):
level = 0
foundchar = ''
left_point = None
right_point = None
for count,char in enumerate(phrase):
if char in ['"',"'"] and level == 0:
foundchar = char
left_point = count
level += 1
elif level == 1 and char == foundchar:
right_point = count
level += 1
if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:
return True
return False
def is_function(phrase):
"""Tests to see if a phrase begins with a predefined function,
in which case it returns information about the iarity of function"""
for x in self.functions.keys():
if len(x) < len(phrase) and phrase[0:len(x)] == x:
if bracketed(phrase[len(x):]):
if self.functions[x][1]-1 <= phrase.count(',') <= self.functions[x][2]-1:
return x, self.functions[x][0], self.functions[x][2], phrase[len(x):]
else:
return False,False,False,False
def all_simple (phrase):
"""Tests if a phrase is a simple string representing an expression, rather than an operation"""
for x in phrase:
if (x not in self.operations and not (isinstance(x,(int,type(ListType()),float,bool) or (isinstance(x,str) and quoted(x)))) or self.current_register.contains(x)):
return False
return True
def parse (phrase):
COMPTERMS = ['==','>=','<=','!=','>','<',]
def contains_comp (x):
"""True is x contains any of the COMP Terms"""
for comp in COMPTERMS:
if comp in x:
return True
return False
"""Parses and analzes the phrase"""
if phrase in ['bTrue','bFalse','EmptyList']:
return {'bTrue':True,
'bFalse':False,
'EmptyList':ListType()}[phrase]
if isinstance(phrase,str):
if quoted(phrase):
return phrase
else:
try:
return float(phrase)
except:
pass
# If the phrase is a string
phrase = phrase.strip()
func_name, func, iarity, func_phrase = is_function(phrase)
# tests is it is function; otherwise the values are false.
if func_name:
if iarity == 1:
# If the function accepts one value
return func(parse(func_phrase))
if iarity == 2:
# Two values
func_phrase = func_phrase[1:-1]
term1,term2 = func_phrase.split(',')[0],func_phrase.split(',')[1]
return func(parse(term1),parse(term2))
if iarity == 3:
func_phrase = func_phrase[1:-1]
term1,term2, term3 = func_phrase.split(',')[0],func_phrase.split(',')[1],func_phrase.split(',')[2]
return func(parse(term1),parse(term2),parse(term3))
if iarity >3:
# A list of values
func_phrase = func_phrase[1:-1]
return func([parse(x) for x in func_phrase.split(',')])
elif phrase and phrase[0] == '-' and bracketed(phrase[1:]):
# Translates negative sign (as opposed to operators) into corresponding function
return -parse(phrase[2:-1])
elif bracketed(phrase):
# removes top-level bracket
phrase = phrase[1:-1]
return parse(phrase)
elif phrase in self.operations:
return phrase
elif self.current_register.contains(phrase):
# for variables and constants
return self.current_register.get(phrase)
elif contains_comp(phrase) and '(' not in phrase and ')' not in phrase:
return calc.computer.get(phrase)
elif phrase and phrase[0]=='@' and phrase[-1]=='@':
# to retrieve values from the log
index = int(parse(phrase[1:-1]))
if 0<= index <= len(self.lines):
return self.lines[index][0]
else:
phrase = list(phrase)
#phrase is converted to a list to allowing indexical assignments
operation_sequence = []
level = 0
inquotes = False
quote_form = ''
for counter, x in enumerate(phrase):
# Search for operators that are not enclosed in parantheses
if not inquotes and x in ['"',"'"]:
inquotes = True
quote_form = x
elif inquotes and x == quote_form:
if counter < len(phrase)-1:
if phrase[counter+1] in ['+']:
phrase[counter+1] = '#+#'
if x == '(':
level +=1
if x == ')':
level -=1
if level == 0:
if counter<len(phrase)-1:
if phrase[counter+1] in self.operations:
# If an operator is found, surround it with pound signs
phrase[counter+1] = '#'+phrase[counter+1]+'#'
if phrase[counter+2] in self.operations:
phrase[counter+2] = '~'
# For a minus sign that is not an operator
phrase = ''.join(phrase).replace('~','-').split('#')
# Split the phrase into expressions linked by operators
newphrase = []
for x in phrase:
# a clumsy way to distinction between numerical values, and string operators
try:
newphrase.append(float(x))
except:
newphrase.append(x)
phrase = newphrase
return parse(phrase)
if isinstance(phrase,list):
# If the phrase has already been parsed into a list
if len(phrase) == 1:
return (parse(phrase[0]))
if all_simple(phrase):
# If every value in the phrase list has been reduced to
# a numerical value or an operator
for operation in self.operations:
#In order to preserve the correct order of operations,
#the operations are analyzed in succession
while operation in phrase:
#This repeat as long as the operation is in the phrase,
#since with each pass it only "reduced"
#expression/operator/expression triplet
newlist = [] # For the result of each pass through the list.
lastvalue = None
counter = 0
stop = False
while counter < len(phrase) and not stop:
if counter < len(phrase)-2:
a = phrase[counter]
op = phrase[counter+1]
b = phrase[counter+2]
#take a triplet of values from the list
if op == operation:
# if an operator is found, reduced the triplet, and
# then add the reduced value, together with the rest
# of the list to the
if operation == '*':
c = a*b
elif operation == '+':
if isinstance(a,str) and isinstance(b,str):
c = a[0:-1]+b[1:]
else:
c = a+b
elif operation == '/':
c = a/b
elif operation == '^':
c = a**b
elif operation == '%':
c = a % b
elif operation == '-':
c = a - b
newlist.append(c)
newlist += phrase[counter+3:]
stop = True
else:
newlist.append(a)
else:
# otherwise, just add the text value to the new list
newlist.append(phrase[counter])
counter +=1
phrase = newlist
else:
# if the list is not yet simple, return a new list after parsing each element.
phrase = [parse(x) for x in phrase]
return parse(phrase)
if isinstance(phrase,(int,float,type(ListType()),bool)):
# if a numerical value, stop the recursion
return phrase
return parse(phrase)
def show_line (self,counter,x):
return str(counter)+':'+str(x[1])+(20-len(str(x[1])))*' '+'|'+x[0]
def show_all (self):
#Shows all the lines in the log
for counter, x in enumerate(self.lines):
print(self.show_line(counter,x))
def clear (self):
self.lines = [('',0)]
self.counter = 0
def delete(self,x):
if '-' not in x:
x = int(x)
if 0 <= x < len(self.lines):
indexes = [x]
else:
x_from, x_to = int(x.split('-')[0]),int(x.split('-')[1])
if 0 <= x_from < x_to <= len(self.lines):
indexes = range(x_from,x_to+1)
if indexes:
for ind in indexes:
print('DELETED/',self.show_line(ind,self.lines[ind]))
self.lines[ind] = None
self.lines = [x for x in self.lines if x]
def new_calc(self,x):
self.scriptname = x
if x not in self.scripts:
self.scripts[x] = (0,[('',0)])
self.counter = self.scripts[x][0]
self.lines = self.scripts[x][1]
self.show_counter = 0
self.programming = False
def new_program(self,x):
self.scriptname = x
if x not in self.scripts:
self.scripts[x] = (0,[('',0)])
self.counter = self.scripts[x][0]
self.lines = self.scripts[x][1]
self.show_counter = 0
self.programming = True
self.entered_lines = {int(x.split(']')) for x in self.lines if x[0].split('[')[0].isnumeric()}
class Programmable(Calculator):
def console (self):
# The console operating the calculator
self.computer = Program()
self.commands = {'ALL':self.show_all,
'CLEAR':self.clear}
self.one_commands = {'DELETE':self.delete,
'NEWCALC':self.new_calc,
'NEWPROGRAM':self.new_program}
self.programming = False
self.show_counter = 0
self.scripts = {}
self.script = ''
self.scripts['main'] = (0,[('',0)])
self.scriptname = 'main'
self.lines = self.scripts[self.scriptname][1]
self.counter = self.scripts[self.scriptname][0]
print(self.SCRIPT)
self.entered_lines = set()
while True:
if self.programming:
newnumber = 0
def get_line (line_no):
for counter, x in enumerate(self.lines):
if x[0].split(' ')[0].isnumeric() and int(x[0].split(' ')[0]) == line_no:
return counter
return self.counter
query = input(self.scriptname+'[')
if query.strip().split(':')[0] == 'RUN':
if ':' in query and query.strip().split(':')[1].isnumeric():
break_off = int(query.strip().split(':')[1])
else:
break_off = 10000
script = '\n'.join(x[0] for x in self.lines)
self.computer.interpret(script,break_off=break_off)
print()
elif query.strip() == 'RERUN':
if self.computer:
self.computer.run()
print()
elif query.strip() == 'ALL':
self.script = '\n'.join(x[0] for x in self.lines)
print(self.script)
print()
elif (':' in query.strip()
and query.strip().split(':')[0]=='DELETE'):
if int(query.strip().split(':')[1]) in self.entered_lines:
if query.strip().split(':')[1].isnumeric():
to_delete = get_line (int(query.strip().split(':')[1]))
self.lines.pop(to_delete)
self.entered_lines.remove(int(query.strip().split(':')[1]))
elif query.strip() == 'TRACE':
print('\n'.join(self.computer.trace_stack.stack))
elif query.strip() == 'CLEAR':
self.lines = [('',0)]
self.entered_lines = set()
self.counter = 0
elif len(query.strip())>3 and query.strip()[0:5] == 'CALC':
if ':' in query.strip():
self.new_calc(query.strip().split(':')[1])
else:
self.new_calc('main')
else:
if query and not query.split(' ')[0].isnumeric() and query[0] != '@':
if self.entered_lines:
newnumber= int(max(self.entered_lines)/10)*10+10
else:
newnumber = 10
query=str(newnumber)+' '+query
if ' ' in query and query.split(' ')[0].isnumeric():
command_line,command = int(query.split(' ')[0]),' '.join(query.split(' ')[1:])
print(command_line,'/',command)
if command_line in self.entered_lines:
self.counter = get_line (command_line)
self.lines[self.counter] = (query,0)
elif self.entered_lines and command_line < min(self.entered_lines):
self.lines = [(query,0)]+self.lines
elif self.entered_lines and command_line < max(self.entered_lines):
less_than = sorted([x for x in self.entered_lines if x<command_line])
if less_than:
left_bound= len(less_than)
self.lines = self.lines[0:left_bound+1] + [(query,0)] + self.lines[left_bound+1:]
else:
self.entered_lines.add(command_line)
self.counter += 1
self.lines.append((query,0))
print(str(self.counter)+'|'+' '+query)
elif query and query[0]=='@':
query=query[1:]
try:
print(self.calculate(query))
except:
print('INVALID INPUT')
if newnumber:
self.entered_lines.add(newnumber)
else:
query = input(self.scriptname+'@')
if '---' in query:
query = query.replace('---','-')
#eliminate redundant minus signs
if query in self.commands:
self.commands[query]()
#for system commands
elif query.split(':')[0] in self.one_commands:
if ':' in query:
self.one_commands[query.split(':')[0]](query.split(':')[1])
elif query == 'QUIT':
#to quit
break
elif query[0:5]=='GOTO:':
line = int(query.split(':')[1])
if 0 <= line <len(self.lines):
self.counter = line
else:
print('INVALID INPUT')
else:
if query:
self.counter +=1
if query[0] in self.operations:
query = str(self.lines[self.counter-1][1])+query
# if no initial value, perform operation on previous value
if ('=' in query and '==' not in query) or ('==' in query and '=' in query.split('==')[0]):
# To define a variable (=subject)
subject, predicate = query.split('=')[0],'='.join(query.split('=')[1:])
subject = subject.strip()
else:
# If not variable = subject
predicate = query
subject = ''
try:
value = self.calculate(predicate)
except:
try:
value = self.computer.get(predicate)
# To evaluate comparative formulations
except:
value = 'ERROR'
if subject:
# if a variable has been given, define its value
if value != 'ERROR':
# to make sure that an ERROR message is not recorded as a value
self.current_register.set(subject,value)
else:
# If the counter is not yet at the end of the log
if self.counter < len(self.lines):
self.lines[self.counter] = (query,value)
else:
# Otherwise just append query and result-value to log
self.lines.append((query,value))
print(' '+subject+' '*(24-len(subject))+'|',predicate,'=',value)
else:
if self.show_counter < len(self.lines)-1:
self.show_counter+=1
print (self.show_counter,':',
str(self.lines[self.show_counter][1])
+(20-len(str(self.lines[self.show_counter][1])))*' ',
'|',self.lines[self.show_counter][0])
if self.show_counter == len(self.lines)-1:
self.show_counter = 0
if self.counter > len(self.lines)-1:
self.counter = len(self.lines)-1
if __name__ == '__main__':
calc = Programmable(register=Register())
calc.console()
|
# -*- coding: utf-8 -*-
"""
Tools for drawing Python class inherit and MRO graphs with graphviz.
Copyright (c) 2017-2018 Red Liu <lli_njupt@163.com>
Released under the MIT licence.
"""
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from enum import Enum
from graphviz import Digraph
import inspect
'''
Every node has a unique type in a map graph and every type defined a
different style like colour and shape.
root node: Alway be the map's root, can be any kinds of objects
cls: class
obj: instance
func: function
str: string
tuple: typle
dict: dictionary
list: list
number: int float bool and complex
clsfunc: class function
method: object method
abstract: abstract
generator: generator
traceback: traceback
descriptor: method and data descriptor
other: all not includes in list upon
'''
NodeType = Enum('NodeType', 'root cls obj func str number \
tuple list dict clsfunc method abstract generator \
traceback descriptor other')
def isnumber(obj):
'''Return true if obj is int, float, bool or complex'''
if isinstance(obj, int) \
or isinstance(obj, float) \
or isinstance(obj, bool) \
or isinstance(obj, complex):
return True
return False
def isdescriptor(obj):
if inspect.ismethoddescriptor(obj) or inspect.isdatadescriptor(obj):
return True
return False
def isother(obj):
for i in type_isdict:
if i == NodeType.other:
continue
if type_isdict[i](obj):
return False
if inspect.isbuiltin(obj) or inspect.ismodule(obj):
return False
return True
type_isdict = { NodeType.cls : inspect.isclass,
#NoteType.obj has function below with different parameters
NodeType.func : inspect.isfunction,
NodeType.str : lambda obj: isinstance(obj, str),
NodeType.number : isnumber,
NodeType.tuple : lambda obj: isinstance(obj, tuple),
NodeType.list : lambda obj: isinstance(obj, list),
NodeType.dict : lambda obj: isinstance(obj, dict),
NodeType.clsfunc : inspect.isfunction,
NodeType.method : inspect.ismethod,
NodeType.abstract : inspect.isabstract,
NodeType.generator : inspect.isgenerator,
NodeType.traceback : inspect.istraceback,
NodeType.descriptor: isdescriptor,
NodeType.other: isother,
}
class ObjMap():
def __init__(self, obj):
self.root_node = obj
''' get root node module name obj belongs to '''
try:
module = inspect.getmodule(obj)
self.root_module = module.__name__
except:
self.root_module = ""
pass
''' get root node name itself '''
try:
self.root_node_name = obj.__name__
except:
''' style like <sample.A object at 0xb70f418c> '''
name = str(obj)
if name.startswith('<'):
name = name.split()[0]
name = name[1:] + ".instance"
self.root_node_name = name
# get a type str like 1 -> 'int'
typestr = str(type(obj))
typestr = typestr.split("'")[1]
if typestr == "type":
typestr = "class" # class is more usual
self.root_type = typestr
def isin_root_module(self, clsinfo):
''' clsinfo is in the same module as this class '''
''' if root node don't have module info, be true arbitrary'''
if len(self.root_module) == 0:
return True
try:
if clsinfo.__module__ != self.root_module:
return False
except:
pass
return True
def __objs_predicate(self, obj, predicate):
''' generate all nodes with predicate '''
objs = []
for name,type in inspect.getmembers(obj, predicate):
if(self.isin_root_module(getattr(obj, name))):
objs.append(getattr(obj, name))
return objs
def __obj_nodes_predicate(self, obj, predicate):
''' generate all nodes with predicate '''
nodes = []
for name,type in inspect.getmembers(obj, predicate):
if(self.isin_root_module(getattr(obj, name))):
nodes.append(name)
return nodes
def __obj_edges_predicate(self, obj, predicate):
''' generate all edges from obj to nodes with predicate '''
nodes = self.__obj_nodes_predicate(obj, predicate)
if len(nodes) == 0:
return []
edges = []
for i in nodes:
edges.append([obj.__name__, i])
return edges
def root_nodes(self, obj):
''' create root node, a node is as ['namestr'] '''
name = self.root_node_name
if len(self.root_module) and not inspect.ismodule(obj):
name = self.root_module + '.' + self.root_node_name
return [name]
def class_nodes(self, obj):
nodes = self.__obj_nodes_predicate(obj, type_isdict[NodeType.cls])
return nodes
def func_nodes(self, obj):
return self.__obj_nodes_predicate(obj, type_isdict[NodeType.func])
def tuple_nodes(self, obj):
return self.__obj_nodes_predicate(obj, type_isdict[NodeType.tuple])
def list_nodes(self, obj):
return self.__obj_nodes_predicate(obj, type_isdict[NodeType.list])
def dict_nodes(self, obj):
return self.__obj_nodes_predicate(obj, type_isdict[NodeType.dict])
def str_nodes(self, obj):
return self.__obj_nodes_predicate(obj, type_isdict[NodeType.str])
def number_nodes(self, obj):
return self.__obj_nodes_predicate(obj, type_isdict[NodeType.number])
def abstract_nodes(self, obj):
return self.__obj_nodes_predicate(obj, type_isdict[NodeType.abstract])
def generator_nodes(self, obj):
return self.__obj_nodes_predicate(obj, type_isdict[NodeType.generator])
def traceback_nodes(self, obj):
return self.__obj_nodes_predicate(obj, type_isdict[NodeType.traceback])
@staticmethod
def isprivate_name(name):
return name.startswith("__")
def descriptor_nodes(self, obj):
nodes = []
for name,type in inspect.getmembers(obj, type_isdict[NodeType.descriptor]):
if not self.isprivate_name(name):
nodes.append(name)
return nodes
@staticmethod
def __iscls_instance(obj, classes, name):
var = getattr(obj, name)
for i in classes:
if isinstance(var, i):
return True
return False
def obj_classes(self, obj):
return self.__objs_predicate(obj, type_isdict[NodeType.cls])
def obj_nodes(self, obj):
nodes = []
for name,typ in inspect.getmembers(obj):
if self.isprivate_name(name):
continue
if self.__iscls_instance(obj, self.obj_classes(obj), name):
nodes.append(name)
return nodes
def other_nodes(self, obj):
nodes = []
for name,typ in inspect.getmembers(obj, type_isdict[NodeType.other]):
if name.startswith('__'):
continue
''' exclude objs '''
if not self.__iscls_instance(obj, self.obj_classes(obj), name):
nodes.append(name)
return nodes
# trim all methods if name same as the cls
def objmethod_nodes(self, obj):
nodes = []
for name,type in inspect.getmembers(obj, type_isdict[NodeType.method]):
nodes.append(name)
return nodes
def objmethod_filter_nodes(self, obj):
clsnodes = []
nodes = []
for name,type in inspect.getmembers(obj.__class__, type_isdict[NodeType.func]):
clsnodes.append(name)
for name,type in inspect.getmembers(obj, type_isdict[NodeType.method]):
if name not in clsnodes:
nodes.append(name)
return nodes
''' html table node styles '''
method_style = {"title" : "methods", "align" : "left", "color" : "#bebada",
"get_nodes" : objmethod_nodes}
function_style = {"title" : "functions", "align" : "left", "color" : "#bebada",
"get_nodes" : func_nodes}
incls_method_style = method_style.copy()
incls_method_style['get_nodes'] = objmethod_filter_nodes
hnode_styles = {
# * means this style will be updated dynamically
NodeType.root: {"title" : "*", "align" : "center", "color" : "#8dd3c7",
"get_nodes" : root_nodes},
NodeType.cls: {"title" : "classes", "align" : "left", "color" : "SandyBrown",
"get_nodes" : class_nodes,
NodeType.clsfunc: {"title" : "functions", "align" : "left",
"color" : "#bebada",
"get_nodes" : func_nodes},
NodeType.descriptor:{"title" : "descriptors", "align" : "left",
"color" : "YellowGreen",
"get_nodes" : descriptor_nodes},
},
NodeType.obj: {"title" : "instances", "align" : "left", "color" : "SandyBrown",
"get_nodes" : obj_nodes,
NodeType.method: incls_method_style,
NodeType.func : function_style,
},
NodeType.method: method_style,
NodeType.func: function_style,
NodeType.str: {"title" : "strings", "align" : "left", "color" : "Gainsboro",
"get_nodes" : str_nodes},
NodeType.number: {"title" : "numbers", "align" : "left", "color" : "Gainsboro",
"get_nodes" : number_nodes},
NodeType.tuple: {"title" : "tuples", "align" : "left", "color" : "BurlyWood",
"get_nodes" : tuple_nodes},
NodeType.list: {"title" : "lists", "align" : "left", "color" : "BurlyWood",
"get_nodes" : list_nodes},
NodeType.dict: {"title" : "dicts", "align" : "left", "color" : "BurlyWood",
"get_nodes" : dict_nodes},
NodeType.abstract: {"title" : "abstracts", "align" : "left", "color" : "Salmon",
"get_nodes" : abstract_nodes},
NodeType.generator:{"title" : "generators", "align" : "left", "color" : "Salmon",
"get_nodes" : generator_nodes},
NodeType.traceback:{"title" : "tracebacks", "align" : "left", "color" : "red",
"get_nodes" : traceback_nodes},
NodeType.other: {"title" : "others", "align" : "left", "color" : "Gainsboro",
"get_nodes" : other_nodes},
}
# 'root.' prefix avoid attrnames conflict, so the name is for node
def root_type_title_update(self, obj):
self.hnode_styles[NodeType.root]['title'] = '.'.join(["root", self.root_type])
# remove it while show it in table
@staticmethod
def root_type_title_remove_prefix(title):
if title.startswith("root."):
return title[title.find('.') + 1:]
return title
@classmethod
def label_htab_create(cls, nodes, title, align="center", color="SandyBrown"):
tab_header = '''<<table border="0" cellborder="1" cellspacing="0">\n'''
tab_tail = "</table>>\n"
title = cls.root_type_title_remove_prefix(title)
th = '\t<tr><td bgcolor="%s" style="rounded"><b><i>%s</i></b></td></tr>\n'\
% (color, title)
trs = ""
for i in nodes:
trs += '''\t<tr><td port="%s" align="%s">%s</td></tr>\n''' % (i, align,i)
return tab_header + th + trs + tab_tail
def dot_add_htab_node(self, dot, obj, nodetype, style=None, title=None):
if style == None:
style = self.hnode_styles[nodetype]
get_nodes_func = style['get_nodes']
if not get_nodes_func:
print("can't get nodes function for", nodetype)
return False
if nodetype == NodeType.root: # update the root's title
self.root_type_title_update(obj)
if title == None:
title = style['title']
nodes = get_nodes_func(self, obj)
if len(nodes):
lab = self.label_htab_create(nodes, title, style['align'],
style['color'])
dot.node(title, label=lab, shape="plaintext")
for i in NodeType:
if not style.__contains__(i):
continue
for node in nodes:
subtitle = node + "." + style[i]['title']
if self.dot_add_htab_node(dot, getattr(obj, node), i,
style[i], subtitle):
dot.edge(':'.join([title, node]), subtitle,
color=style[i]['color'])
return True
return False
def dot_add_obj_nodes(self, dot, obj):
handled_nodes = []
for i in self.hnode_styles:
if self.dot_add_htab_node(dot, obj, i):
handled_nodes.append(i)
for i in handled_nodes:
if i == NodeType.root:
continue
dot.edge(self.hnode_styles[NodeType.root]['title'],
self.hnode_styles[i]['title'],
color=self.hnode_styles[i]['color'])
def clsobj_relation_edges(self, inobj):
edges = []
objnodes = self.obj_nodes(inobj)
clsnodes = self.class_nodes(inobj)
for obj in objnodes:
for cls in clsnodes:
if isinstance(getattr(inobj, obj), getattr(inobj, cls)):
edges.append([cls, obj])
return edges
# at last add instances and cls relationship
edges = clsobj_relation_edges(self, obj)
if not len(edges):
return
dot.attr('edge', style="dashed", color=self.hnode_styles[NodeType.obj]['color'])
objtitle = self.hnode_styles[NodeType.obj]['title']
clstitle = self.hnode_styles[NodeType.cls]['title']
for i in edges:
dot.edge(':'.join([clstitle, i[0]]), ':'.join([objtitle, i[1]]))
# Rank directions: "TB", "LR", "BT", "RL"
# splines: "spline", "ortho", "polyline", "curved", "line"
# reference to' https://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:splines'
# 'polyline' is better when there're many lines between nodes
def objmap_create(self, filename="obj.gv", format="png", rankdir="TB", splines="spline"):
dot = Digraph('structs', node_attr={'shape': 'record'})
dot.attr(rankdir=rankdir)
dot.attr(splines=splines)
dot.attr(compound='true')
#dot.attr(concentrate='true')
self.dot_add_obj_nodes(dot, self.root_node)
dot.render(filename, format=format, view=False)
dot.save()
class StackMap():
@staticmethod
def label_stacktab_create(stack, align="left", color="SandyBrown"):
tab_header = '''<<table border="0" cellborder="1" cellspacing="0">\n'''
tab_tail = "</table>>\n"
index_num = 0
th = '\t<tr><td bgcolor="%s"><b><i>%s</i></b></td>'\
'<td bgcolor="%s"><b><i>%s</i></b></td>'\
'<td bgcolor="%s"><b><i>%s</i></b></td>'\
'<td bgcolor="%s"><b><i>%s</i></b></td>'\
'<td bgcolor="%s"><b><i>%s</i></b></td>'\
'</tr>\n' % (color, "no", color, "file", color, "lineno", \
color, "function", color, "index")
trs = ""
for i in stack:
frame,filename,lineno,funcname,lines,index = i
if filename.startswith("./"):
filename = filename[2:]
funcname = funcname.strip('<>')
tr = '\t<tr bgcolor="%s">'\
'<td align="%s">%s</td>'\
'<td align="%s">%s</td>'\
'<td align="%s">%s</td>'\
'<td align="%s">%s</td>'\
'<td align="%s">%s</td>'\
'</tr>\n' % (color, align, index_num, align, filename, align, \
lineno, align, funcname, align, index)
index_num += 1
trs += tr
return tab_header + th + trs + tab_tail
# Rank directions: "TB", "LR", "BT", "RL"
@classmethod
def draw_stack(cls, stack, filename="stack.gv", format="png", rankdir="TB"):
dot = Digraph('structs', node_attr={'shape': 'record'})
dot.attr(rankdir=rankdir)
lab = cls.label_stacktab_create(stack)
dot.node(filename, label=lab, shape="plaintext")
dot.render(filename, format=format, view=False)
dot.save()
def test():
import sample.sample
objmap = ObjMap(sample.sample)
objmap.objmap_create()
StackMap.draw_stack(inspect.stack())
if __name__ == "__main__":
test() |
import pytest
from pymoku.instruments import LaserLockBox
try:
from unittest.mock import patch, ANY
except ImportError:
from mock import patch, ANY
filt_coeff = [[1.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0, 0.0]]
@pytest.fixture
def dut(moku):
with patch(
'pymoku._frame_instrument.'
'FrameBasedInstrument._set_running'):
i = LaserLockBox()
moku.deploy_instrument(i)
moku.reset_mock()
return i
def test_set_defaults(dut, moku):
'''
TODO Default test
'''
dut.set_defaults()
moku._write_regs.assert_called_with(ANY)
def test_set_input_gain(dut, moku):
'''
TODO Default test
'''
dut.set_input_gain(0)
moku._write_regs.assert_called_with(ANY)
def test_set_custom_filter(dut, moku):
'''
TODO Default test
'''
dut.set_custom_filter(filt_coeff)
moku._write_regs.assert_called_with(ANY)
def test_set_output_range(dut, moku):
'''
TODO Default test
'''
dut.set_output_range(1, 0.1, 0.1)
moku._write_regs.assert_called_with(ANY)
def test_set_offsets(dut, moku):
'''
TODO Default test
'''
dut.set_offsets('out1', 1.0)
moku._write_regs.assert_called_with(ANY)
def test_set_pid_by_gain(dut, moku):
'''
TODO Default test
'''
dut.set_pid_by_gain(1, 1, 1, 0, 0, None, None, True)
moku._write_regs.assert_called_with(ANY)
def test_set_pid_by_freq(dut, moku):
'''
TODO Default test
'''
dut.set_pid_by_freq(1, 1, None, None, None, None, True)
moku._write_regs.assert_called_with(ANY)
def test_set_pid_enables(dut, moku):
'''
TODO Default test
'''
dut.set_pid_enables(1, True)
moku._write_regs.assert_called_with(ANY)
def test_set_output_enables(dut, moku):
'''
TODO Default test
'''
dut.set_output_enables(1, True)
moku._write_regs.assert_called_with(ANY)
def test_set_channel_pid_enables(dut, moku):
'''
TODO Default test
'''
dut.set_channel_pid_enables(1, True)
moku._write_regs.assert_called_with(ANY)
def test_set_local_oscillator(dut, moku):
'''
TODO Default test
'''
dut.set_local_oscillator(0, 0, 'internal', True)
moku._write_regs.assert_called_with(ANY)
def test_set_aux_sine(dut, moku):
'''
TODO Default test
'''
dut.set_aux_sine(2, 0, 0, False, 'out2')
moku._write_regs.assert_called_with(ANY)
def test_set_scan(dut, moku):
'''
TODO Default test
'''
dut.set_scan(2, 0, 0, 'triangle', 'out1')
moku._write_regs.assert_called_with(ANY)
def test_set_trigger(dut, moku):
'''
TODO Default test
'''
dut.set_trigger('in1', 'rising', 1.0,
None, None, 10e-2, False, 'auto', False)
moku._write_regs.assert_called_with(ANY)
def test_set_monitor(dut, moku):
'''
TODO Default test
'''
dut.set_monitor('a', 'pid_fast')
moku._write_regs.assert_called_with(ANY)
@pytest.mark.parametrize('attr, value', [
('_fast_scale', 1.0 / 2 ** 14),
('_slow_scale', 1.0 / 2 ** 14),
('_aux_scale', 1 / 2 ** 14),
('scan_amplitude', 1 / 2 ** 14),
('fast_scan_enable', 1),
('slow_scan_enable', 1),
('lo_phase_offset', 1),
('aux_phase_offset', 1),
('fast_offset', 1 / 2 ** 15),
('output_offset_ch1', 1 / 2 ** 15),
('output_offset_ch2', 1 / 2 ** 15),
('monitor_select0', 1),
('monitor_select1', 1),
('input_gain_select', 1),
('MuxLOPhase', 1),
('MuxLOSignal', 1),
('MuxAuxPhase', 1),
('trig_aux', 1),
('cond_trig', 1),
('cliprange_lower_ch1', 1 / 2 ** 15),
('cliprange_upper_ch1', 1 / 2 ** 15),
('cliprange_lower_ch2', 1 / 2 ** 15),
('cliprange_upper_ch2', 1 / 2 ** 15),
('fast_aux_enable', 1),
('slow_aux_enable', 1),
('fast_channel_en', 1),
('slow_channel_en', 1),
('out1_en', 1),
('out2_en', 1),
('input1_light', 1),
('input2_light', 1),
])
def test_attributes(dut, moku, attr, value):
'''
TODO Default test
'''
setattr(dut, attr, value)
dut.commit()
moku._write_regs.assert_called_with(ANY)
|
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from azure.mgmt.storage.models import StorageAccount
from azure_common import BaseTest, DEFAULT_SUBSCRIPTION_ID
from c7n_azure.constants import FUNCTION_EVENT_TRIGGER_MODE, FUNCTION_TIME_TRIGGER_MODE
from c7n_azure.policy import AzureEventGridMode, AzureFunctionMode
from mock import mock
class AzurePolicyModeTest(BaseTest):
def setUp(self):
super(AzurePolicyModeTest, self).setUp()
def test_azure_function_event_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'},
'storageAccount': {
'name': 'testschemaname'
},
'appInsights': {
'name': 'testschemaname'
}
}}
})
self.assertTrue(p)
def test_azure_function_periodic_mode_schema_validation(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_TIME_TRIGGER_MODE,
'schedule': '0 * /5 * * * *',
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'},
'storageAccount': {
'name': 'testschemaname'
},
'appInsights': {
'name': 'testschemaname'
}
}}
})
self.assertTrue(p)
def test_init_azure_function_mode_with_service_plan(self):
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': {
'name': 'test-cloud-custodian',
'location': 'eastus',
'resourceGroupName': 'test'}
}}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertTrue(params.storage_account['name'].startswith('custodian'))
self.assertEqual(params.app_insights['name'], 'test-cloud-custodian')
self.assertEqual(params.service_plan['name'], "test-cloud-custodian")
self.assertEqual(params.service_plan['location'], "eastus")
self.assertEqual(params.app_insights['location'], "eastus")
self.assertEqual(params.storage_account['location'], "eastus")
self.assertEqual(params.storage_account['resource_group_name'], 'test')
self.assertEqual(params.app_insights['resource_group_name'], 'test')
self.assertEqual(params.service_plan['resource_group_name'], "test")
self.assertTrue(params.function_app_name.startswith('test-azure-serverless-mode-'))
def test_init_azure_function_mode_no_service_plan_name(self):
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertEqual(params.service_plan['name'], "cloud-custodian")
self.assertEqual(params.service_plan['location'], "eastus")
self.assertEqual(params.service_plan['resource_group_name'], "cloud-custodian")
self.assertEqual(params.app_insights['name'], 'cloud-custodian')
self.assertEqual(params.app_insights['location'], "eastus")
self.assertEqual(params.app_insights['resource_group_name'], 'cloud-custodian')
self.assertTrue(params.storage_account['name'].startswith('custodian'))
self.assertEqual(params.storage_account['location'], "eastus")
self.assertEqual(params.storage_account['resource_group_name'], 'cloud-custodian')
self.assertTrue(params.function_app_name.startswith('test-azure-serverless-mode-'))
def test_init_azure_function_mode_invalid_policy_name(self):
p = self.load_policy({
'name': 'this-policy-name-is-going-to-be-too-long-since-the-maximum-size-is-60',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
with self.assertRaises(ValueError):
function_mode.get_function_app_params()
def test_init_azure_function_mode_invalid_characters_in_policy_name(self):
p = self.load_policy({
'name': 'invalid_policy_name1',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertRegexpMatches(params.function_app_name, "invalid-policy-name1-[a-zA-Z0-9]+")
def test_init_azure_function_mode_with_resource_ids(self):
ai_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/microsoft.insights/components/testai'
sp_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/Microsoft.Web/serverFarms/testsp'
sa_id = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups' \
'/testrg/providers/Microsoft.Storage/storageAccounts/testsa'
p = self.load_policy({
'name': 'test-azure-serverless-mode',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite'],
'provision-options': {
'servicePlan': sp_id,
'storageAccount': sa_id,
'appInsights': ai_id
}}
})
function_mode = AzureFunctionMode(p)
params = function_mode.get_function_app_params()
self.assertEqual(function_mode.policy_name, p.data['name'])
self.assertEqual(params.storage_account['id'], sa_id)
self.assertEqual(params.storage_account['name'], 'testsa')
self.assertEqual(params.storage_account['resource_group_name'], 'testrg')
self.assertEqual(params.app_insights['id'], ai_id)
self.assertEqual(params.app_insights['name'], 'testai')
self.assertEqual(params.app_insights['resource_group_name'], 'testrg')
self.assertEqual(params.service_plan['id'], sp_id)
self.assertEqual(params.service_plan['name'], "testsp")
self.assertEqual(params.service_plan['resource_group_name'], "testrg")
self.assertTrue(params.function_app_name.startswith('test-azure-serverless-mode-'))
def test_event_grid_mode_creates_advanced_filtered_subscription(self):
p = self.load_policy({
'name': 'test-azure-event',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events': ['VmWrite']},
})
with mock.patch('c7n_azure.azure_events.AzureEventSubscription.create') as mock_create:
storage_account = StorageAccount(id=1, location='westus')
event_mode = AzureEventGridMode(p)
event_mode.target_subscription_ids = [DEFAULT_SUBSCRIPTION_ID]
event_mode._create_event_subscription(storage_account, 'some_queue', None)
name, args, kwargs = mock_create.mock_calls[0]
# verify the advanced filter created
event_filter = args[4].advanced_filters[0]
self.assertEqual(event_filter.key, 'Data.OperationName')
self.assertEqual(event_filter.values, ['Microsoft.Compute/virtualMachines/write'])
self.assertEqual(event_filter.operator_type, 'StringIn')
def test_event_grid_mode_creates_advanced_filtered_subscription_with_multiple_events(self):
p = self.load_policy({
'name': 'test-azure-event',
'resource': 'azure.vm',
'mode':
{'type': FUNCTION_EVENT_TRIGGER_MODE,
'events':
['VmWrite',
{
'resourceProvider': 'Microsoft.Resources/subscriptions/resourceGroups',
'event': 'write'
}]},
})
with mock.patch('c7n_azure.azure_events.AzureEventSubscription.create') as mock_create:
storage_account = StorageAccount(id=1, location='westus')
event_mode = AzureEventGridMode(p)
event_mode.target_subscription_ids = [DEFAULT_SUBSCRIPTION_ID]
event_mode._create_event_subscription(storage_account, 'some_queue', None)
name, args, kwargs = mock_create.mock_calls[0]
# verify the advanced filter created
event_filter = args[4].advanced_filters[0]
self.assertEqual(event_filter.key, 'Data.OperationName')
self.assertEqual(event_filter.values,
['Microsoft.Compute/virtualMachines/write',
'Microsoft.Resources/subscriptions/resourceGroups/write'])
self.assertEqual(event_filter.operator_type, 'StringIn')
|
import discord
import aiohttp
import os
from discord.ext import commands
from random import randint
from tabulate import tabulate
from .utils import checks
from .utils.dataIO import dataIO
from .utils.chat_formatting import pagify, box
class Football:
"""Football stats"""
__author__ = 'UltimatePancake'
def __init__(self, bot: commands.Bot):
self.bot = bot
self.api_url = 'http://api.football-data.org/v1/'
self.config = dataIO.load_json('data/football/config.json')
async def _make_request(self, url: str, params, server_id: str):
"""The one that actually does the work"""
headers = {
'X-Response-Control': 'minified',
'User-Agent': 'Friendly Red bot'
}
if server_id in self.config:
if 'API_TOKEN' in self.config[server_id]:
headers['X-Auth-Token'] = self.config['API_TOKEN']
else:
await self.bot.say(box('Requests made without an authentication token are limited to 100 requests per 24 hours.\nYou can request a key by registering at http://api.football-data.org and setting it via [p]football tokenset.'))
async with aiohttp.get(url, headers=headers, params=params) as r:
if r.status == 200:
data = await r.json()
return data
elif r.status == 400:
await self.bot.say(box('Bad Request [400]:\nYour request was malformed most likely the value of a Filter was not set according to the Data Type that is expected.'))
return
elif r.status == 403:
await self.bot.say(box('Restricted Resource [403]:\nYou tried to access a resource that exists, but is not available for you. This can be out of the following reasons:\n- the resource is only available to authenticated clients\n- the resource is only available to donating clients\n- the resource is not available in the API version you are using'))
return
elif r.status == 404:
await self.bot.say(box('Not found [404]\nYou tried to access a resource that doesn’t exist.'))
return
elif r.status == 429:
await self.bot.say(box('Too many requests [429]\nYou exceeded your allowed requests per minute/day depending on API version and your user status.\nSee http://api.football-data.org/docs/v1/index.html#_request_throttling for more information.'))
await self.bot.say(box('Requests reset in ' + r.headers['X-RequestCounter-Reset'] + ' seconds.'))
return
else:
await self.bot.say(box('Pancake has no idea what you\'ve done, seriously.'))
await self.bot.say(box(r.status + '\n' + r.json()['error']))
return
async def _get_full_leagues_data(self, server_id: str, season: str=None):
"""Retrieves all league data from API"""
if season is None:
season = ''
params = {'season': season}
url = self.api_url + 'competitions/'
return await self._make_request(url, params, server_id)
async def _get_league_fixtures_timeframe(self, server_id: str, league_id: str, timeframe: str):
"""Retrieves specific league matchday fixtures from API
Optional timeframe parameter:
The value of the timeFrame argument must start with either p(ast) or n(ext), representing a timeframe either in the past or future. It is followed by a number in the range 1..99. It defaults to n7 in the fixture resource and is unset for fixture as a subresource.
For instance: p6 would return all fixtures in the last 6 days, whereas n23 would result in returning all fixtures in the next 23 days."""
params = {'timeFrame': timeframe}
url = self.api_url + 'competitions/{}/fixtures'.format(league_id)
return await self._make_request(url, params, server_id)
async def _get_league_fixtures_matchday(self, server_id: str, league_id: str, matchday: str):
"""Retrieves specific league matchday fixtures from API"""
params = {'matchday': matchday}
url = self.api_url + 'competitions/{}/fixtures'.format(league_id)
return await self._make_request(url, params, server_id)
async def _get_league_leaderboard(self, server_id: str, league_id: str, matchday: str):
"""Retrieves specific league leaderboard from API"""
if matchday is None:
matchday = ''
params = {'matchday': matchday}
url = self.api_url + 'competitions/{}/leagueTable'.format(league_id)
return await self._make_request(url, params, server_id)
async def _get_team_info(self, server_id: str, team_id: str):
"""Retrieves specific team info"""
params = {}
url = self.api_url + 'teams/{}'.format(team_id)
return await self._make_request(url, params, server_id)
async def _get_team_players(self, server_id: str, team_id: str):
"""Retrieves specific team players"""
params = {}
url = self.api_url + 'teams/{}/players'.format(team_id)
return await self._make_request(url, params, server_id)
@commands.group(pass_context=True)
async def football(self, ctx: commands.Context):
"""Gets league/team standings and stats"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@checks.admin_or_permissions(manage_server=True)
@football.command(name='tokenset', pass_context=True)
async def _tokenset(self, ctx: commands.Context, token: str):
"""Sets token for football-data.org API
http://api.football-data.org/client/register"""
self.config[ctx.message.server.id] = token
dataIO.save_json('data/football/config.json', self.config)
await self.bot.say('football-data API token set')
@football.command(name='leagues', pass_context=True)
async def _leagues(self, ctx: commands.Context, season: str=None):
"""Gets leagues info"""
headers = ['League', 'id', 'Name', 'Teams', 'Games', 'Matchdays']
data = await self._get_full_leagues_data(ctx.message.server.id, season)
pretty_data = []
for league in data:
pretty_data.append([league['league'], league['id'], league['caption'], league['numberOfTeams'], league['numberOfGames'], league['numberOfMatchdays']])
await self.bot.say(box(tabulate(pretty_data, headers=headers)))
@football.command(name='leaderboard', pass_context=True)
async def _leaderboard(self, ctx: commands.Context, league_id: str, matchday: str=None):
"""Gets league leaderboard"""
headers = [' ', 'ID', 'Team', 'Points', 'P', 'G', 'GA', 'GD']
data = await self._get_league_leaderboard(ctx.message.server.id, league_id, matchday)
pretty_data = []
# await self.bot.say('```diff\n+ ' + data['leagueCaption'] + '\n- Matchday: ' + str(data['matchday']) + '\n```')
await self.bot.say('```diff\n+ {}\n- Matchday: {}\n```'.format(data['leagueCaption'], data['matchday']))
if 'standing' in data:
for team in data['standing']:
pretty_data.append([team['rank'], team['teamId'], team['team'], team['points'], team['playedGames'], team['goals'], team['goalsAgainst'], team['goalDifference']])
await self.bot.say(box(tabulate(pretty_data, headers=headers)))
elif 'standings' in data:
for group, v in data['standings'].items():
asyncio.sleep(1)
await self.bot.say('```diff\n+ Group ' + group + '```')
pretty_data = []
for team in v:
pretty_data.append([team['rank'], team['team'], team['points'], team['playedGames'], team['goals'], team['goalsAgainst'], team['goalDifference']])
await self.bot.say(box(tabulate(pretty_data, headers=headers)))
@football.group(pass_context=True)
async def fixtures(self, ctx: commands.Context):
"""Fixture commands"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@fixtures.command(name='last', pass_context=True)
async def _lastfixtures(self, ctx: commands.Context, league_id: str):
"""Gets last matchday fixtures"""
headers = ['ID', 'Home', 'G', ' ', 'G', 'Away']
data = await self._get_league_fixtures_timeframe(ctx.message.server.id, league_id, 'p7')
await self.bot.say('```diff\n+ Last fixtures```')
pretty_data = []
for fixture in data['fixtures']:
pretty_data.append([
fixture['id'],
'[{}] {}'.format(fixture['homeTeamId'], fixture['homeTeamName']),
fixture['goalsHomeTeam'],
' - ',
fixture['goalsAwayTeam'],
'[{}] {}'.format(fixture['awayTeamId'], fixture['awayTeamName'])
])
await self.bot.say(box(tabulate(pretty_data, headers=headers)))
@fixtures.command(name='next', pass_context=True)
async def _nextfixtures(self, ctx: commands.Context, league_id: str):
"""Gets last matchday fixtures"""
headers = ['ID', 'Home', ' ', 'Away', 'Date']
data = await self._get_league_fixtures_timeframe(ctx.message.server.id, league_id, 'n7')
await self.bot.say('```diff\n+ Next fixtures```')
pretty_data = []
for fixture in data['fixtures']:
pretty_data.append([
fixture['id'],
'[{}] {}'.format(fixture['homeTeamId'], fixture['homeTeamName']),
' - ',
'[{}] {}'.format(fixture['awayTeamId'], fixture['awayTeamName']),
fixture['date']
])
await self.bot.say(box(tabulate(pretty_data, headers=headers)))
@fixtures.command(name='matchday', pass_context=True)
async def _matchdayfixtures(self, ctx: commands.Context, league_id: str, matchday: str='1'):
"""Gets specific matchday fixtures
Defaults to matchday 1"""
headers = ['ID', 'Home', ' ', ' ', 'Away']
data = await self._get_league_fixtures_matchday(ctx.message.server.id, league_id, matchday)
await self.bot.say('```diff\n+ Matchday ' + matchday + ' fixtures```')
pretty_data = []
for fixture in data['fixtures']:
pretty_data.append([
fixture['id'],
'[{}] {}'.format(fixture['homeTeamId'], fixture['homeTeamName']),
fixture['result']['goalsHomeTeam'],
fixture['result']['goalsAwayTeam'],
'[{}] {}'.format(fixture['awayTeamId'], fixture['awayTeamName'])
])
await self.bot.say(box(tabulate(pretty_data, headers=headers)))
@football.command(pass_context=True)
async def team(self, ctx: commands.Context, team_id: str=None, show_players: bool=True):
"""Gets team information"""
if team_id is None:
await self.bot.send_cmd_help(ctx)
else:
team_data = await self._get_team_info(ctx.message.server.id, team_id)
embed = discord.Embed(colour=randint(0, 0xFFFFFF))
embed.title = team_data['name']
if team_data['squadMarketValue'] is not None:
embed.add_field(name='Squad market value', value=team_data['squadMarketValue'])
embed.set_thumbnail(url=team_data['crestUrl'].replace('http', 'https'))
embed.set_footer(text='id: {}'.format(team_id))
await self.bot.say(embed=embed)
if show_players:
team_players = await self._get_team_players(ctx.message.server.id, team_id)
await self.bot.say('```diff\n+ {} roster```'.format(team_data['name']))
headers = ['Name', 'Jersey', 'Nationality', 'DoB', 'Position', 'Contract']
pretty_data = []
for player in team_players['players']:
pretty_data.append([
player['name'],
player['jerseyNumber'],
player['nationality'],
player['dateOfBirth'],
player['position'],
player['contractUntil']
])
for page in pagify(tabulate(pretty_data, headers=headers), ['\n'], shorten_by=8):
await self.bot.say(box(page))
def check_folder():
if not os.path.exists('data/football'):
print('Creating pubg folder...')
os.makedirs('data/football')
def check_file():
contents = {}
if not os.path.exists('data/football/config.json'):
print('Creating empty config.json')
dataIO.save_json('data/football/config.json', contents)
def setup(bot):
check_folder()
check_file()
bot.add_cog(Football(bot))
|
##########################################################################
# mriWorks - Copyright (C) IRMAGE/INSERM, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# https://cecill.info/licences/Licence_CeCILL_V2-en.html
# for details.
##########################################################################
import yaml
import re
from PyQt5.QtWidgets import QDialog, QCheckBox, QVBoxLayout, QHBoxLayout, \
QPushButton, QScrollArea, QWidget, QMenuBar, QAction, \
QTextEdit
from PyQt5 import QtCore
from PyQt5.QtGui import QFontMetrics
import time
class chOptions(QDialog):
def __init__(self, pathYaml, nameclass, ports, parent=None):
super(chOptions, self).__init__(parent)
doc = "No description"
# try:
# if '_' in nameclass:
# firstAttr = nameclass[0:nameclass.index("_")]
# secondAttr = nameclass[nameclass.index("_") + 1:]
# TxtToExecute = firstAttr + "." + secondAttr + "().help(True)"
# else:
# firstAttr = nameclass
# secondAttr = ''
# TxtToExecute = firstAttr + ".help(True)"
# TxtToImport = "from nipype.interfaces import " + firstAttr
# exec(TxtToImport)
# doc = eval(TxtToExecute)
# doc = doc[doc.index('[Optional]') + 11:doc.index('Outputs')]
# except Exception as e:
# doc = "No description"
self.nameclass = nameclass
self.poqs = ports
self.labels_inputs = self.poqs[0]
self.values_inputs = self.poqs[1]
self.setWindowTitle(nameclass)
self.setWindowFlags(self.windowFlags() &
QtCore.Qt.WindowCloseButtonHint)
menubar = QMenuBar()
checkAll = QAction('Check all options', self)
checkAll.setShortcut('Ctrl+A')
menubar.addAction(checkAll)
checkAll.triggered.connect(self.checkAllOptions)
uncheckAll = QAction('Uncheck all options', self)
uncheckAll.setShortcut('Ctrl+U')
menubar.addAction(uncheckAll)
uncheckAll.triggered.connect(self.uncheckAllOptions)
self.listCh = []
listLabels = []
vbox = QVBoxLayout(self)
vbox.addWidget(menubar)
_ss = ports
self.list1 = []
self.list2 = []
self.list3 = []
for tr in _ss[0]:
self.list1.append(tr)
for tr in _ss[1]:
self.list2.append(tr)
self.list3.append(tr)
scrolllayout = QVBoxLayout()
scrollwidget = QWidget()
scrollwidget.setLayout(scrolllayout)
scroll = QScrollArea()
scroll.setWidgetResizable(True)
scroll.setWidget(scrollwidget)
desc = QTextEdit()
desc.setPlainText(doc)
desc.setReadOnly(True)
desc.setLineWrapMode(True)
font = desc.document().defaultFont()
fontMetrics = QFontMetrics(font)
textSize = fontMetrics.size(0, doc)
textWidth = textSize.width() + 30
textHeight = textSize.height() + 30
desc.setMinimumSize(textWidth, textHeight)
desc.resize(textWidth, textHeight)
hbox2 = QHBoxLayout()
vbox2 = QVBoxLayout()
with open(pathYaml, 'r', encoding='utf8') as stream:
try:
self.dicts = yaml.load(stream, yaml.FullLoader)
for el in self.dicts[nameclass]:
checkedTo = False
enableTo = True
if el in self.list1:
ind = self.list1.index(el)
self.list1.remove(el)
if 'Node(' in str(self.list2[ind]):
enableTo = False
vals = self.list2[ind]
del self.list2[ind]
del self.list3[ind]
checkedTo = True
b = QCheckBox(el, self)
b.setChecked(checkedTo)
b.setEnabled(enableTo)
self.listCh.append(b)
listLabels.append(b.text())
vbox2.addWidget(self.listCh[-1])
except Exception as exc:
print('yamlerror', exc)
return
with open(pathYaml, 'r', encoding='utf8') as stream:
rd = stream.readlines()
rd = rd[rd.index(nameclass + ":\n") + 1:]
# rd = rd[:len(self.listCh)]
doc = ''
n = len(listLabels)
for lst in rd:
tmp = ''
try:
tmp = lst.rstrip()
tmp = tmp[:tmp.index('#')]
tmp = tmp[:tmp.index(':')]
if n == 0:
break
if tmp.strip() in listLabels:
n = n - 1
doc = doc + "<br><span style=\" font-size:10pt; font-weight:600; color:#222222;\" >" + tmp + " : </span><br>"
except Exception as e:
pass
comm = ''
try:
comm = lst[lst.index('#') + 1:]
doc = doc + "<span style=\" font-size:10pt; font-weight:600; color:#2222ee;\" >" + comm + "</span><br>"
except Exception as e:
pass
# if len(comm) != 0:
if len(doc) != 0:
desc.clear()
desc.append(doc)
hbox2.addLayout(vbox2)
hbox2.addWidget(desc)
scrolllayout.addLayout(hbox2)
vbox.addWidget(scroll)
buttonOk = QPushButton('Ok', self)
buttonCancel = QPushButton('Cancel', self)
hboxButton = QHBoxLayout()
hboxButton.addWidget(buttonOk)
hboxButton.addWidget(buttonCancel)
vbox.addLayout(hboxButton)
self.setMinimumWidth(800)
buttonOk.clicked.connect(self.go)
buttonCancel.clicked.connect(self.CANCEL)
def CANCEL(self):
self.answer = "cancel"
self.close()
def go(self):
for aze in self.listCh:
if aze.isChecked():
txt = aze.text()
self.list1.append(str(txt))
valueExists = False
val = ''
ind = 0
try:
ind = self.labels_inputs.index(txt)
# print("type ? ",type(self.values_inputs[ind]).__name__)
if 'Node(' in str(self.values_inputs[ind]):
val = self.values_inputs[ind]
valueExists = True
except Exception as e:
pass
if not valueExists:
if type(self.dicts[self.nameclass][aze.text()])\
.__name__ == 'str':
if ('enumerate' in
self.dicts[self.nameclass][aze.text()]):
imb = self.dicts[self.nameclass][aze.text()]
else:
try:
imb = "" + eval(self.dicts[self.nameclass][aze.text()])
except Exception as e:
imb = "" + self.dicts[self.nameclass][aze.text()]
else:
try:
imb = eval(self.dicts[self.nameclass][aze.text()])
except Exception as e:
imb = self.dicts[self.nameclass][aze.text()]
_imb1 = imb
if type(imb).__name__ == 'str':
if 'enumerate' in imb:
self.list2.append(list(eval(_imb1))[0][1])
else:
self.list2.append(_imb1)
else:
self.list2.append(_imb1)
self.list3.append(imb)
else:
self.list2.append(val)
else:
if aze.text() in self.list1:
ind = self.list1.index(aze.text())
self.list1.remove(aze.text())
del self.list2[ind]
del self.list3[ind]
self.newports = (self.list1, self.list2, self.poqs[2], self.poqs[3])
self.close()
self.answer = "ok"
def getNewValues(self):
return self.newports, list(self.list3)
def getAnswer(self):
return self.answer
def checkAllOptions(self):
for aze in self.listCh:
aze.setChecked(True)
def uncheckAllOptions(self):
for aze in self.listCh:
if aze.isEnabled():
aze.setChecked(False)
def closeEvent(self, closeEvent):
self.answer = "cancel"
self.close()
|
import os
from collections import defaultdict
import mmcv
import numpy as np
from mmcv.utils import print_log
from .api_wrappers import COCO
from .builder import DATASETS
from .coco import CocoDataset
try:
import panopticapi
from panopticapi.evaluation import pq_compute_multi_core, VOID
from panopticapi.utils import id2rgb
except ImportError:
panopticapi = None
pq_compute_multi_core = None
id2rgb = None
VOID = None
__all__ = ['CocoPanopticDataset']
# A custom value to distinguish instance ID and category ID; need to
# be greater than the number of categories.
# For a pixel in the panoptic result map:
# pan_id = ins_id * INSTANCE_OFFSET + cat_id
INSTANCE_OFFSET = 1000
class COCOPanoptic(COCO):
"""This wrapper is for loading the panoptic style annotation file.
The format is shown in the CocoPanopticDataset class.
Args:
annotation_file (str): Path of annotation file.
"""
def __init__(self, annotation_file=None):
if panopticapi is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
super(COCOPanoptic, self).__init__(annotation_file)
def createIndex(self):
# create index
print('creating index...')
# anns stores 'segment_id -> annotation'
anns, cats, imgs = {}, {}, {}
img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann, img_info in zip(self.dataset['annotations'],
self.dataset['images']):
img_info['segm_file'] = ann['file_name']
for seg_ann in ann['segments_info']:
# to match with instance.json
seg_ann['image_id'] = ann['image_id']
seg_ann['height'] = img_info['height']
seg_ann['width'] = img_info['width']
img_to_anns[ann['image_id']].append(seg_ann)
# segment_id is not unique in coco dataset orz...
if seg_ann['id'] in anns.keys():
anns[seg_ann['id']].append(seg_ann)
else:
anns[seg_ann['id']] = [seg_ann]
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])
print('index created!')
self.anns = anns
self.imgToAnns = img_to_anns
self.catToImgs = cat_to_imgs
self.imgs = imgs
self.cats = cats
def load_anns(self, ids=[]):
"""Load anns with the specified ids.
self.anns is a list of annotation lists instead of a
list of annotations.
Args:
ids (int array): integer ids specifying anns
Returns:
anns (object array): loaded ann objects
"""
anns = []
if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):
# self.anns is a list of annotation lists instead of
# a list of annotations
for id in ids:
anns += self.anns[id]
return anns
elif type(ids) == int:
return self.anns[ids]
@DATASETS.register_module()
class CocoPanopticDataset(CocoDataset):
"""Coco dataset for Panoptic segmentation.
The annotation format is shown as follows. The `ann` field is optional
for testing.
.. code-block:: none
[
{
'filename': f'{image_id:012}.png',
'image_id':9
'segments_info': {
[
{
'id': 8345037, (segment_id in panoptic png,
convert from rgb)
'category_id': 51,
'iscrowd': 0,
'bbox': (x1, y1, w, h),
'area': 24315,
'segmentation': list,(coded mask)
},
...
}
}
},
...
]
"""
CLASSES = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
' truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',
'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff',
'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light',
'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',
'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',
'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',
'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged',
'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',
'paper-merged', 'food-other-merged', 'building-other-merged',
'rock-merged', 'wall-other-merged', 'rug-merged'
]
THING_CLASSES = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
STUFF_CLASSES = [
'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain',
'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house',
'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',
'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',
'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',
'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged',
'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',
'paper-merged', 'food-other-merged', 'building-other-merged',
'rock-merged', 'wall-other-merged', 'rug-merged'
]
def load_annotations(self, ann_file):
"""Load annotation from COCO Panoptic style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCOPanoptic(ann_file)
self.cat_ids = self.coco.get_cat_ids()
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.categories = self.coco.cats
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
info['segm_file'] = info['filename'].replace('jpg', 'png')
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
# filter out unmatched images
ann_info = [i for i in ann_info if i['image_id'] == img_id]
return self._parse_ann_info(self.data_infos[idx], ann_info)
def _parse_ann_info(self, img_info, ann_info):
"""Parse annotations and load panoptic ground truths.
Args:
img_info (int): Image info of an image.
ann_info (list[dict]): Annotation info of an image.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, seg_map.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_mask_infos = []
for i, ann in enumerate(ann_info):
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w, y1 + h]
category_id = ann['category_id']
contiguous_cat_id = self.cat2label[category_id]
is_thing = self.coco.load_cats(ids=category_id)[0]['isthing']
if is_thing:
is_crowd = ann.get('iscrowd', False)
if not is_crowd:
gt_bboxes.append(bbox)
gt_labels.append(contiguous_cat_id)
else:
gt_bboxes_ignore.append(bbox)
is_thing = False
mask_info = {
'id': ann['id'],
'category': contiguous_cat_id,
'is_thing': is_thing
}
gt_mask_infos.append(mask_info)
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_mask_infos,
seg_map=img_info['segm_file'])
return ann
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
ids_with_ann = []
# check whether images have legal thing annotations.
for lists in self.coco.anns.values():
for item in lists:
category_id = item['category_id']
is_thing = self.coco.load_cats(ids=category_id)[0]['isthing']
if not is_thing:
continue
ids_with_ann.append(item['image_id'])
ids_with_ann = set(ids_with_ann)
valid_inds = []
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _pan2json(self, results, outfile_prefix):
"""Convert panoptic results to COCO panoptic json style."""
label2cat = dict((v, k) for (k, v) in self.cat2label.items())
pred_annotations = []
outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')
for idx in range(len(self)):
img_id = self.img_ids[idx]
segm_file = self.data_infos[idx]['segm_file']
pan = results[idx]
pan_labels = np.unique(pan)
segm_info = []
for pan_label in pan_labels:
sem_label = pan_label % INSTANCE_OFFSET
# We reserve the length of self.CLASSES for VOID label
if sem_label == len(self.CLASSES):
continue
# convert sem_label to json label
cat_id = label2cat[sem_label]
is_thing = self.categories[cat_id]['isthing']
mask = pan == pan_label
area = mask.sum()
segm_info.append({
'id': int(pan_label),
'category_id': cat_id,
'isthing': is_thing,
'area': int(area)
})
# evaluation script uses 0 for VOID label.
pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID
pan = id2rgb(pan).astype(np.uint8)
mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file))
record = {
'image_id': img_id,
'segments_info': segm_info,
'file_name': segm_file
}
pred_annotations.append(record)
pan_json_results = dict(annotations=pred_annotations)
return pan_json_results
def results2json(self, results, outfile_prefix):
"""Dump the panoptic results to a COCO panoptic style json file.
Args:
results (dict): Testing results of the dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.panoptic.json"
Returns:
dict[str: str]: The key is 'panoptic' and the value is
corresponding filename.
"""
result_files = dict()
pan_results = [result['pan_results'] for result in results]
pan_json_results = self._pan2json(pan_results, outfile_prefix)
result_files['panoptic'] = f'{outfile_prefix}.panoptic.json'
mmcv.dump(pan_json_results, result_files['panoptic'])
return result_files
def evaluate_pan_json(self, result_files, outfile_prefix, logger=None):
"""Evaluate PQ according to the panoptic results json file."""
imgs = self.coco.imgs
gt_json = self.coco.img_ann_map # image to annotations
gt_json = [{
'image_id': k,
'segments_info': v,
'file_name': imgs[k]['segm_file']
} for k, v in gt_json.items()]
pred_json = mmcv.load(result_files['panoptic'])
pred_json = dict(
(el['image_id'], el) for el in pred_json['annotations'])
# match the gt_anns and pred_anns in the same image
matched_annotations_list = []
for gt_ann in gt_json:
img_id = gt_ann['image_id']
if img_id not in pred_json.keys():
raise Exception('no prediction for the image'
' with id: {}'.format(img_id))
matched_annotations_list.append((gt_ann, pred_json[img_id]))
gt_folder = self.seg_prefix
pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')
pq_stat = pq_compute_multi_core(matched_annotations_list, gt_folder,
pred_folder, self.categories)
eval_results = {}
metrics = [('All', None), ('Things', True), ('Stuff', False)]
pq_results = {}
output = '\n'
for name, isthing in metrics:
pq_results[name], per_class_pq_results = pq_stat.pq_average(
self.categories, isthing=isthing)
if name == 'All':
pq_results['per_class'] = per_class_pq_results
output += ('{:10s}| {:>5s} {:>5s} {:>5s} {:>5s}\n'.format(
'', 'PQ', 'SQ', 'RQ', 'N'))
output += ('-' * (10 + 7 * 4) + '\n')
for name, _isthing in metrics:
output += '{:10s}| {:5.2f} {:5.2f} {:5.2f} {:5d}\n'.format(
name, 100 * pq_results[name]['pq'],
100 * pq_results[name]['sq'], 100 * pq_results[name]['rq'],
pq_results[name]['n'])
eval_results[f'{name}_pq'] = pq_results[name]['pq'] * 100.0
eval_results[f'{name}_sq'] = pq_results[name]['sq'] * 100.0
eval_results[f'{name}_rq'] = pq_results[name]['rq'] * 100.0
print_log(output, logger=logger)
return eval_results
def evaluate(self,
results,
metric='pq',
logger=None,
jsonfile_prefix=None,
**kwargs):
"""Evaluation in COCO Panoptic protocol.
Args:
results (list[dict]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Only
support 'pq' at present.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
dict[str, float]: COCO Panoptic style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['pq'] # todo: support other metrics like 'bbox'
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
outfile_prefix = os.path.join(tmp_dir.name, 'results') \
if tmp_dir is not None else jsonfile_prefix
if 'pq' in metrics:
eval_pan_results = self.evaluate_pan_json(result_files,
outfile_prefix, logger)
eval_results.update(eval_pan_results)
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Utils to assist with dataclass-related operations."""
import itertools
from dataclasses import is_dataclass
import numpy as np
import pandas as pd
def dataclass_eq(base_dataclass: object, other: object) -> bool:
"""Check if base_dataclass is equal to the other object, with proper handling for numpy array fields.
Args:
base_dataclass: Base dataclass to compare against.
other: Other object to compare against the base dataclass.
Raises:
ValueError: If base_dataclass is not an instance of a dataclass.
Returns:
Flag indicating whether base_dataclass and the other object are considered equal.
"""
if not is_dataclass(base_dataclass):
raise ValueError(f"'{base_dataclass.__class__.__name__}' is not a dataclass!")
# Check whether the two objects point to the same instance
if base_dataclass is other:
return True
# Check whether the two objects are both dataclasses of the same type
if base_dataclass.__class__ is not other.__class__:
return False
# Check whether the dataclasses have equal values in all members
base_tuple = vars(base_dataclass).values()
other_tuple = vars(other).values()
return all(_dataclass_member_eq(base_mem, other_mem) for base_mem, other_mem in zip(base_tuple, other_tuple))
def _dataclass_member_eq(base: object, other: object) -> bool:
"""Check if dataclass members base and other are equal, with proper handling for numpy arrays.
Args:
base: Base object to compare against.
other: Other object to compare against the base object.
Returns:
Bool flag indicating whether objects a and b are equal.
"""
# Objects are equal if they point to the same instance
if base is other:
return True
# If both objects are lists, check equality for all members
if isinstance(base, list) and isinstance(other, list):
return all(_dataclass_member_eq(base_i, other_i) for base_i, other_i in itertools.zip_longest(base, other))
# If both objects are np arrays, delegate equality check to numpy's built-in operation
if isinstance(base, np.ndarray) and isinstance(other, np.ndarray):
return bool(np.array_equal(base, other))
# If both objects are pd dataframes, delegate equality check to pandas' built-in operation
if isinstance(base, pd.DataFrame) and isinstance(other, pd.DataFrame):
return bool(pd.DataFrame.equals(base, other))
# Equality checks for all other types are delegated to the standard equality check
try:
return bool(base == other)
except (TypeError, ValueError):
return False
|
import os
import sys
# nametrans_map = {'cascade_mask_rcnn_r50_fpn_1x_dota'}
configs_dota = {
'retinanet_r50_fpn_2x_dota': 'retinanet_r50_fpn_2x_dota' ,
'retinanet_v5_obb_r50_fpn_2x_dota': 'retinanet_obb_r50_fpn_2x_dota',
'mask_rcnn_r50_fpn_1x_dota': 'mask_rcnn_r50_fpn_1x_dota',
'htc_without_semantic_r50_fpn_1x_dota': 'htc_without_semantic_r50_fpn_1x_dota',
'faster_rcnn_r50_fpn_1x_dota': 'faster_rcnn_r50_fpn_1x_dota',
'faster_rcnn_r50_fpn_1x_dota_obb_v3': 'faster_rcnn_obb_r50_fpn_1x_dota',
'faster_rcnn_dpool_v3_r50_fpn_1x_dota_obb': 'faster_rcnn_obb_dpool_r50_fpn_1x_dota',
'faster_rcnn_obb_hbb_v3_r50_fpn_1x_dota': 'faster_rcnn_h-obb_r50_fpn_1x_dota',
'faster_rcnn_r50_fpn_1x_dota_RoITrans_v5': 'faster_rcnn_RoITrans_r50_fpn_1x_dota',
'cascade_mask_rcnn_r50_fpn_1x_dota': 'cascade_mask_rcnn_r50_fpn_1x_dota'
}
configs_dota1_5 = {'retinanet_r50_fpn_2x_dota1_5_v2': 'retinanet_r50_fpn_2x_dota1_5',
'retinanet_v5_obb_r50_fpn_2x_dota1_5_v2': 'retinanet_obb_r50_fpn_2x_dota1_5',
'mask_rcnn_r50_fpn_1x_dota1_5_v2': 'mask_rcnn_r50_fpn_1x_dota1_5',
'cascade_mask_rcnn_r50_fpn_1x_dota1_5_v2': 'cascade_mask_rcnn_r50_fpn_1x_dota1_5',
'htc_without_semantic_r50_fpn_1x_dota1_5_v2': 'htc_without_semantic_r50_fpn_1x_dota1_5',
'faster_rcnn_r50_fpn_1x_dota1_5_v2': 'faster_rcnn_r50_fpn_1x_dota1_5',
'faster_rcnn_r50_fpn_1x_dota1_5_v2_obb_v3': 'faster_rcnn_obb_r50_fpn_1x_dota1_5',
'faster_rcnn_dpool_v3_r50_fpn_1x_dota1_5_v2_obb': 'faster_rcnn_obb_dpool_r50_fpn_1x_dota1_5',
'faster_rcnn_obb_hbb_v3_r50_fpn_1x_dota1_5_v2': 'faster_rcnn_h-obb_r50_fpn_1x_dota1_5',
'faster_rcnn_r50_fpn_1x_dota1_5_v2_RoITrans_v5': 'faster_rcnn_RoITrans_r50_fpn_1x_dota1_5'}
configs_dota2 = {
# 'retinanet_r50_fpn_1x_dota2_v3',
'retinanet_r50_fpn_2x_dota2_v3': 'retinanet_r50_fpn_2x_dota2',
'retinanet_v5_obb_r50_fpn_2x_dota2_v3': 'retinanet_obb_r50_fpn_2x_dota2',
'mask_rcnn_r50_fpn_1x_dota2_v3': 'mask_rcnn_r50_fpn_1x_dota2',
'cascade_mask_rcnn_r50_fpn_1x_dota2_v3': 'cascade_mask_rcnn_r50_fpn_1x_dota2',
'htc_without_semantic_r50_fpn_1x_dota2_v3': 'htc_without_semantic_r50_fpn_1x_dota2',
'faster_rcnn_r50_fpn_1x_dota2_v3': 'faster_rcnn_r50_fpn_1x_dota2',
'faster_rcnn_r50_fpn_1x_dota2_v3_obb_v3': 'faster_rcnn_r50_fpn_1x_dota2_obb',
'faster_rcnn_dpool_v3_r50_fpn_1x_dota2_v3_obb': 'faster_rcnn_dpool_r50_fpn_1x_dota2_obb',
'faster_rcnn_obb_hbb_v3_r50_fpn_1x_dota2_v3': 'faster_rcnn_h-obb_r50_fpn_1x_dota2',
'faster_rcnn_r50_fpn_1x_dota2_v3_RoITrans_v5': 'faster_rcnn_r50_fpn_1x_dota2_RoITrans',
'faster_rcnn_r101_fpn_1x_dota2_v3_RoITrans_v5': 'faster_rcnn_r101_fpn_1x_dota2_RoITrans',
'faster_rcnn_x101_64x4d_fpn_1x_dota2_v3_RoITrans_v5': 'faster_rcnn_x101_64x4d_fpn_1x_dota2_RoITrans',
'faster_rcnn_x101_64x4d_fpn_1x_dota2_v3_obb_v3': 'faster_rcnn_x101_64x4d_fpn_1x_dota2_obb',
'faster_rcnn_r101_fpn_1x_dota2_v3_obb_v3': 'faster_rcnn_r101_fpn_1x_dota2_obb',
'mask_rcnn_x101_64x4d_fpn_1x_dota2_v3': 'mask_rcnn_x101_64x4d_fpn_1x_dota2',
'mask_rcnn_r101_fpn_1x_dota2_v3': 'mask_rcnn_r101_fpn_1x_dota2',
'faster_rcnn_dpool_v3_r101_fpn_1x_dota2_v3_obb': 'faster_rcnn_dpool_r101_fpn_1x_dota2_obb',
'faster_rcnn_dpool_v3_x101_64x4d_fpn_1x_dota2_v3_obb': 'faster_rcnn_dpool_x101_64x4d_fpn_1x_dota2_obb',
'retinanet_v5_obb_r101_fpn_2x_dota2_v3': 'retinanet_obb_r101_fpn_2x_dota2',
'retinanet_v5_obb_x101_64x4d_fpn_2x_dota2_v3': 'retinanet_obb_x101_64x4d_fpn_2x_dota2'
}
def nametrans(config_path):
dota_1_path = os.path.join(config_path, 'DOTA')
dota1_5_path = os.path.join(config_path, 'DOTA1_5')
dota2_path = os.path.join(config_path, 'DOTA2')
for srcconfig in configs_dota:
os.system('mv {} {}'.format(os.path.join(dota_1_path, srcconfig + '.py'),
os.path.join(dota_1_path, configs_dota[srcconfig] + '.py')))
for srcconfig in configs_dota1_5:
os.system('mv {} {}'.format(os.path.join(dota1_5_path, srcconfig + '.py'),
os.path.join(dota1_5_path, configs_dota1_5[srcconfig] + '.py')))
for srcconfig in configs_dota2:
os.system('mv {} {}'.format(os.path.join(dota2_path, srcconfig + '.py'),
os.path.join(dota2_path, configs_dota2[srcconfig] + '.py')))
def work_dir_trans(work_dir_path):
configs_all = {}
configs_all.update(configs_dota)
configs_all.update(configs_dota1_5)
configs_all.update(configs_dota2)
for config in configs_all:
os.system('mv {} {}'.format(os.path.join(work_dir_path, config),
os.path.join(work_dir_path, configs_all[config])))
if __name__ == '__main__':
nametrans(r'/home/dingjian/project/code/Aerialdetection/configs')
# work_dir_trans(r'/home/dingjian/project/code/Aerialdetection/work_dirs') |
"""Intialize the smmap package"""
__author__ = "Sebastian Thiel"
__contact__ = "byronimo@gmail.com"
__homepage__ = "https://github.com/gitpython-developers/smmap"
version_info = (3, 0, 1)
__version__ = '.'.join(str(i) for i in version_info)
# make everything available in root package for convenience
from .mman import *
from .buf import *
|
from typing import List, Optional, Text, Tuple
import numpy as np
from numpy import ndarray
from pandas import DataFrame, Timedelta
from pandas.core.series import Series
from pymove.preprocessing import filters
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
from pymove.utils.distances import haversine
from pymove.utils.log import progress_bar
def union_poi_bank(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between the different bank categories
for Points of Interest in a single category named 'banks'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union bank categories to one category')
print('... There are {} -- {}'.format(data[label_poi].nunique(), label_poi))
banks = [
'bancos_filiais',
'bancos_agencias',
'bancos_postos',
'bancos_PAE',
'bank',
]
filter_bank = data[label_poi].isin(banks)
data.at[data[filter_bank].index, label_poi] = 'banks'
def union_poi_bus_station(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between the different bus station categories
for Points of Interest in a single category named 'bus_station'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union bus station categories to one category')
filter_bus_station = data[label_poi].isin(
['transit_station', 'pontos_de_onibus']
)
data.at[data[filter_bus_station].index, label_poi] = 'bus_station'
def union_poi_bar_restaurant(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between bar and restaurant categories
for Points of Interest in a single category named 'bar-restaurant'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union restaurant and bar categories to one category')
filter_bar_restaurant = data[label_poi].isin(['restaurant', 'bar'])
data.at[data[filter_bar_restaurant].index, label_poi] = 'bar-restaurant'
def union_poi_parks(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between park categories
for Points of Interest in a single category named 'parks'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union parks categories to one category')
filter_parks = data[label_poi].isin(['pracas_e_parques', 'park'])
data.at[data[filter_parks].index, label_poi] = 'parks'
def union_poi_police(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between police categories
for Points of Interest in a single category named 'police'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union distritos policies and police categories')
filter_police = data[label_poi] == 'distritos_policiais'
data.at[data[filter_police].index, label_poi] = 'police'
def join_collective_areas(
gdf_: DataFrame, gdf_rules_: DataFrame, label_geometry: Optional[Text] = GEOMETRY
):
"""
It performs the integration between trajectories and collective
areas, generating a new column that informs if the point of the
trajectory is inserted in a collective area.
Parameters
----------
gdf_ : geopandas.GeoDataFrame
The input trajectory data
gdf_rules_ : geopandas.GeoDataFrame
The input coletive areas data
label_geometry : str, optional
Label referring to the Point of Interest category, by default GEOMETRY
"""
print('Integration between trajectories and collectives areas')
polygons = gdf_rules_[label_geometry].unique()
gdf_[VIOLATING] = False
for p in progress_bar(polygons):
# intersects = gdf_[label_geometry].apply(lambda x: x.intersects(p))
intersects = gdf_[label_geometry].intersects(p)
index = gdf_[intersects].index
gdf_.at[index, VIOLATING] = True
def _reset_and_creates_id_and_lat_lon(
data: DataFrame,
df_pois: DataFrame,
lat_lon_poi: Optional[bool] = True,
reset_index: Optional[bool] = True
) -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes, returns the minimum distance
between the two dataframes, and return their respective variables
(id, tags, latitude and longitude).
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
lat_lon_poi : bool, optional
Flag to determine if the ids and tags is of size equivalent to df_pois,
by default True
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
Returns
-------
distances, ids, tags, lat, lon: arrays with default values for join operation
"""
if reset_index:
print('... Resetting index to operation...')
data.reset_index(drop=True, inplace=True)
df_pois.reset_index(drop=True, inplace=True)
# create numpy array to store new column to DataFrame of movement objects
distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
ids = np.full(data.shape[0], '', dtype='object_')
tags = np.full(data.shape[0], '', dtype='object_')
# creating lat and lon array to operation
if lat_lon_poi:
lat = np.full(df_pois.shape[0], np.Infinity, dtype=np.float64)
lon = np.full(df_pois.shape[0], np.Infinity, dtype=np.float64)
else:
lat = np.full(data.shape[0], np.Infinity, dtype=np.float64)
lon = np.full(data.shape[0], np.Infinity, dtype=np.float64)
return distances, ids, tags, lat, lon
def _reset_set_window__and_creates_event_id_type(
data: DataFrame, df_events: DataFrame, label_date: Text, time_window: int
) -> Tuple[Series, Series, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes, set time window, and returns
the current distance between the two dataframes, and return their
respective variables (event_id, event_type).
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input event point of interest data.
label_date : str
Label of data referring to the datetime.
time_window : int
Number of seconds of the time window.
Returns
-------
window_starts, window_ends, current_distances, event_id, event_type
"""
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_events.reset_index(drop=True, inplace=True)
# compute windows time
window_starts = data[label_date] - Timedelta(seconds=time_window)
window_ends = data[label_date] + Timedelta(seconds=time_window)
# create vector to store distances
current_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
event_type = np.full(data.shape[0], '', dtype='object_')
event_id = np.full(data.shape[0], '', dtype='object_')
return window_starts, window_ends, current_distances, event_id, event_type
def _reset_set_window_and_creates_event_id_type_all(
data: DataFrame, df_events: DataFrame, label_date: Text, time_window: int
) -> Tuple[Series, Series, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes, set time window, and returns
the current distance between the two dataframes, and return their
respective variables (event_id, event_type).
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input event point of interest data.
label_date : str
Label of data referring to the datetime.
time_window : Int
Number of seconds of the time window.
Returns
-------
window_starts, window_ends, current_distances, event_id, event_type
arrays with default values for join operation
"""
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_events.reset_index(drop=True, inplace=True)
# compute windows time
window_starts = data[label_date] - Timedelta(seconds=time_window)
window_ends = data[label_date] + Timedelta(seconds=time_window)
# create vector to store distances
current_distances = np.full(
data.shape[0], None, dtype=np.ndarray
)
event_type = np.full(data.shape[0], None, dtype=np.ndarray)
event_id = np.full(data.shape[0], None, dtype=np.ndarray)
return window_starts, window_ends, current_distances, event_id, event_type
def join_with_pois(
data: DataFrame,
df_pois: DataFrame,
label_id: Optional[Text] = TRAJ_ID,
label_poi_name: Optional[Text] = NAME_POI,
reset_index: Optional[Text] = True
):
"""
Performs the integration between trajectories and points
of interest, generating two new columns referring to the
name and the distance from the point of interest closest
to each point of the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_id : str, optional
Label of df_pois referring to the Point of Interest id, by default TRAJ_ID
label_poi_name : str, optional
Label of df_pois referring to the Point of Interest name, by default NAME_POI
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
"""
print('Integration with POIs...')
values = _reset_and_creates_id_and_lat_lon(data, df_pois, True, reset_index)
current_distances, ids_POIs, tag_POIs, lat_user, lon_user = values
for idx, row in progress_bar(data.iterrows(), total=len(data)):
# create a vector to each lat
lat_user.fill(row[LATITUDE])
lon_user.fill(row[LONGITUDE])
# computing distances to idx
distances = np.float64(
haversine(
lat_user,
lon_user,
df_pois[LATITUDE].values,
df_pois[LONGITUDE].values,
)
)
# get index to arg_min and min distance
index_min = np.argmin(distances)
current_distances[idx] = np.min(distances)
# setting data for a single object movement
ids_POIs[idx] = df_pois.at[index_min, label_id]
tag_POIs[idx] = df_pois.at[index_min, label_poi_name]
data[ID_POI] = ids_POIs
data[DIST_POI] = current_distances
data[NAME_POI] = tag_POIs
print('Integration with POI was finalized')
def join_with_pois_optimizer(
data,
df_pois: DataFrame,
label_id: Optional[Text] = TRAJ_ID,
label_poi_name: Optional[Text] = NAME_POI,
dist_poi: Optional[List] = None,
reset_index: Optional[Text] = True
):
"""
Performs the integration between trajectories and points
of interest, generating two new columns referring to the
name and distance from the nearest point of interest,
within the limit of distance determined by the parameter 'dist_poi',
of each point in the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_id : str, optional
Label of df_pois referring to the Point of Interest id, by default TRAJ_ID
label_poi_name : str, optional
Label of df_pois referring to the Point of Interest name, by default NAME_POI
dist_poi : list, optional
List containing the minimum distance limit between each type of
point of interest and each point of the trajectory to classify the
point of interest closest to each point of the trajectory, by default None
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
"""
print('Integration with POIs optimized...')
if len(df_pois[label_poi_name].unique()) == len(dist_poi):
values = _reset_and_creates_id_and_lat_lon(data, df_pois, False, reset_index)
minimum_distances, ids_POIs, tag_POIs, lat_POI, lon_POI = values
df_pois.rename(
columns={label_id: TRAJ_ID, label_poi_name: NAME_POI},
inplace=True
)
for idx, row in progress_bar(df_pois.iterrows(), total=len(df_pois)):
# update lat and lon of current index
lat_POI.fill(row[LATITUDE])
lon_POI.fill(row[LONGITUDE])
# First iteration is minimum distances
if idx == 0:
minimum_distances = np.float64(
haversine(
lat_POI,
lon_POI,
data[LATITUDE].values,
data[LONGITUDE].values
)
)
ids_POIs.fill(row.id)
tag_POIs.fill(row.type_poi)
else:
# compute dist between a POI and ALL
print(data[LONGITUDE].values)
current_distances = np.float64(
haversine(
lat_POI,
lon_POI,
data[LATITUDE].values,
data[LONGITUDE].values
)
)
compare = current_distances < minimum_distances
index_True = np.where(compare is True)[0]
minimum_distances = np.minimum(
current_distances, minimum_distances, dtype=np.float64
)
if index_True.shape[0] > 0:
ids_POIs[index_True] = row.id
tag_POIs[index_True] = row.type_poi
data[ID_POI] = ids_POIs
data[DIST_POI] = minimum_distances
data[NAME_POI] = tag_POIs
print('Integration with POI was finalized')
else:
print('the size of the dist_poi is different from the size of pois')
def join_with_pois_by_category(
data: DataFrame,
df_pois: DataFrame,
label_category: Optional[Text] = TYPE_POI,
label_id: Optional[Text] = TRAJ_ID
):
"""
It performs the integration between trajectories and points
of interest, generating new columns referring to the
category and distance from the nearest point of interest
that has this category at each point of the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_category : str, optional
Label of df_pois referring to the point of interest category, by default TYPE_POI
label_id : str, optional
Label of df_pois referring to the point of interest id, by default TRAJ_ID
"""
print('Integration with POIs...')
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_pois.reset_index(drop=True, inplace=True)
# create numpy array to store new column to DataFrame of movement objects
current_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
ids_POIs = np.full(data.shape[0], np.NAN, dtype='object_')
unique_categories = df_pois[label_category].unique()
size_categories = len(unique_categories)
print('There are %s categories' % size_categories)
for i, c in enumerate(unique_categories, start=1):
# creating lat and lon array to operation
df_category = df_pois[df_pois[label_category] == c]
df_category.reset_index(drop=True, inplace=True)
desc = 'computing dist to {} category ({}/{})'.format(c, i, size_categories)
for idx, row in progress_bar(data.iterrows(), total=len(data), desc=desc):
lat_user = np.full(
df_category.shape[0], row[LATITUDE], dtype=np.float64
)
lon_user = np.full(
df_category.shape[0], row[LONGITUDE], dtype=np.float64
)
# computing distances to
distances = haversine(
lat_user,
lon_user,
df_category[LATITUDE].values,
df_category[LONGITUDE].values,
)
# get index to arg_min and min distance
index_min = np.argmin(distances)
# setting data for a single object movement
current_distances[idx] = np.min(distances)
ids_POIs[idx] = df_category.at[index_min, label_id]
data['id_%s' % c] = ids_POIs
data['dist_%s' % c] = current_distances
print('Integration with POI was finalized')
def join_with_poi_datetime(
data: DataFrame,
df_events: DataFrame,
label_date: Optional[Text] = DATETIME,
time_window: Optional[int] = 900,
label_event_id: Optional[Text] = EVENT_ID,
label_event_type: Optional[Text] = EVENT_TYPE
):
"""
It performs the integration between trajectories and points
of interest, generating new columns referring to the
category of the point of interest, the distance from the
nearest point of interest based on time of each point of
the trajectories.
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input events points of interest data.
label_date : str, optional
Label of data referring to the datetime of the input trajectory data,
by default DATETIME
time_window : float, optional
tolerable length of time range for assigning the event's
point of interest to the trajectory point, by default 900
label_event_id : str, optional
Label of df_events referring to the id of the event, by default EVENT_ID
label_event_type : str, optional
Label of df_events referring to the type of the event, by default EVENT_TYPE
"""
print('Integration with Events...')
values = _reset_set_window__and_creates_event_id_type(
data, df_events, label_date, time_window
)
window_starts, window_ends, current_distances, event_id, event_type = values
for idx in progress_bar(data.index):
# filter event by datetime
df_filtered = filters.by_datetime(
df_events, window_starts[idx], window_ends[idx]
)
size_filter = df_filtered.shape[0]
if size_filter > 0:
df_filtered.reset_index(drop=True, inplace=True)
lat_user = np.full(
size_filter, data.at[idx, LATITUDE], dtype=np.float64
)
lon_user = np.full(
size_filter, data.at[idx, LONGITUDE], dtype=np.float64
)
# compute dist to poi filtered
distances = haversine(
lat_user,
lon_user,
df_filtered[LATITUDE].values,
df_filtered[LONGITUDE].values,
)
# get index to arg_min
index_arg_min = np.argmin(distances)
# get min distances
min_distance = np.min(distances)
# store data
current_distances[idx] = min_distance
event_type[idx] = df_filtered.at[index_arg_min, label_event_type]
event_id[idx] = df_filtered.at[index_arg_min, label_event_id]
data[label_event_id] = event_id
data[DIST_EVENT] = current_distances
data[label_event_type] = event_type
print('Integration with event was completed')
def join_with_poi_datetime_optimizer(
data: DataFrame,
df_events: DataFrame,
label_date: Optional[Text] = DATETIME,
time_window: Optional[int] = 900,
label_event_id: Optional[Text] = EVENT_ID,
label_event_type: Optional[Text] = EVENT_TYPE
):
"""
It performs a optimized integration between trajectories and points
of interest of events, generating new columns referring to
the category of the event, the distance from the nearest
event and the time when the event happened at each point of
the trajectories.
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input events points of interest data.
label_date : str, optional
Label of data referring to the datetime of the input trajectory data,
by default DATETIME
time_window : float, optional
tolerable length of time range for assigning the event's
point of interest to the trajectory point, by default 900
label_event_id : str, optional
Label of df_events referring to the id of the event, by default EVENT_ID
label_event_type : str, optional
Label of df_events referring to the type of the event, by default EVENT_TYPE
"""
print('Integration with Events...')
values = _reset_set_window__and_creates_event_id_type(
data, df_events, label_date, time_window
)
window_starts, window_ends, current_distances, event_id, event_type = values
minimum_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
# Rename for access columns of each row directly
df_events.rename(
columns={label_event_id: label_event_id, label_event_type: label_event_type},
inplace=True
)
for idx, row in progress_bar(df_events.iterrows(), total=len(df_events)):
df_filtered = filters.by_datetime(
data, window_starts[idx], window_ends[idx]
)
size_filter = df_filtered.shape[0]
if size_filter > 0:
indexes = df_filtered.index
lat_event = np.full(
df_filtered.shape[0], row[LATITUDE], dtype=np.float64
)
lon_event = np.full(
df_filtered.shape[0], row[LONGITUDE], dtype=np.float64
)
# First iteration is minimum distances
if idx == 0:
minimum_distances[indexes] = haversine(
lat_event,
lon_event,
df_filtered[LATITUDE].values,
df_filtered[LONGITUDE].values,
)
event_id[indexes] = row.event_id
event_type[indexes] = row.event_type
else:
current_distances[indexes] = haversine(
lat_event,
lon_event,
df_filtered[LATITUDE].values,
df_filtered[LONGITUDE].values,
)
compare = current_distances < minimum_distances
index_True = np.where(compare is True)[0]
minimum_distances = np.minimum(
current_distances, minimum_distances
)
event_id[index_True] = row.event_id
event_type[index_True] = row.event_type
data[label_event_id] = event_id
data[DIST_EVENT] = minimum_distances
data[label_event_type] = event_type
print('Integration with events was completed')
def join_with_pois_by_dist_and_datetime(
data: DataFrame,
df_pois: DataFrame,
label_date: Optional[Text] = DATETIME,
label_event_id: Optional[Text] = EVENT_ID,
label_event_type: Optional[Text] = EVENT_TYPE,
time_window: Optional[float] = 3600,
radius: Optional[float] = 1000,
):
"""
It performs the integration between trajectories and points of interest,
generating new columns referring to the category of the point of interest,
the distance between the location of the user and location of the poi
based on the distance and on time of each point of the trajectories.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input events points of interest data.
label_date : str, optional
Label of data referring to the datetime of the input trajectory data,
by default DATETIME
label_event_id : str, optional
Label of df_events referring to the id of the event, by default EVENT_ID
label_event_type : str, optional
Label of df_events referring to the type of the event, by default EVENT_TYPE
time_window : float, optional
tolerable length of time range for assigning the event's
point of interest to the trajectory point, by default 3600
radius: float, optional
maximum radius of pois, by default 1000
"""
print('Integration with Events...')
if label_date not in df_pois:
raise KeyError("POI's DataFrame must contain a %s column" % label_date)
values = _reset_set_window_and_creates_event_id_type_all(
data, df_pois, label_date, time_window
)
window_start, window_end, current_distances, event_id, event_type = values
for idx, row in progress_bar(data.iterrows(), total=data.shape[0]):
# set min and max of coordinates by radius
bbox = filters.get_bbox_by_radius(
(row[LATITUDE], row[LONGITUDE]), radius
)
# filter event by radius
df_filtered = filters.by_bbox(
df_pois, bbox
)
# filter event by datetime
filters.by_datetime(
df_filtered,
start_datetime=window_start[idx],
end_datetime=window_end[idx],
inplace=True
)
# get df_filtered size
size_filter = df_filtered.shape[0]
if size_filter > 0:
# reseting index of data frame
df_filtered.reset_index(drop=True, inplace=True)
# create lat and lon array to operation
lat_user = np.full(
size_filter, row[LATITUDE], dtype=np.float64
)
lon_user = np.full(
size_filter, row[LONGITUDE], dtype=np.float64
)
# calculate of distances between points
distances = haversine(
lat_user,
lon_user,
df_filtered[LATITUDE].to_numpy(),
df_filtered[LONGITUDE].to_numpy()
)
current_distances[idx] = distances
event_type[idx] = df_filtered[label_event_type].to_numpy(dtype=np.ndarray)
event_id[idx] = df_filtered[label_event_id].to_numpy(dtype=np.ndarray)
data[label_event_id] = event_id
data[DIST_EVENT] = current_distances
data[label_event_type] = event_type
print('Integration with event was completed')
def join_with_home_by_id(
data: DataFrame,
df_home: DataFrame,
label_id: Optional[Text] = TRAJ_ID,
label_address: Optional[Text] = ADDRESS,
label_city: Optional[Text] = CITY,
drop_id_without_home: Optional[bool] = False,
):
"""
It performs the integration between trajectories and home points,
generating new columns referring to the distance of the nearest
home point, address and city of each trajectory point.
Parameters
----------
data : DataFrame
The input trajectory data.
df_home : DataFrame
The input home points data.
label_id : str, optional
Label of df_home referring to the home point id, by default TRAJ_ID
label_address : str, optional
Label of df_home referring to the home point address, by default ADDRESS
label_city : str, optional
Label of df_home referring to the point city, by default CITY
drop_id_without_home : bool, optional
flag as an option to drop id's that don't have houses, by default FALSE
"""
print('Integration with Home...')
ids_without_home = []
if data.index.name is None:
print('...setting {} as index'.format(label_id))
data.set_index(label_id, inplace=True)
for idx in progress_bar(data.index.unique()):
filter_home = df_home[label_id] == idx
if df_home[filter_home].shape[0] == 0:
print('...id: {} has not HOME'.format(idx))
ids_without_home.append(idx)
else:
home = df_home[filter_home].iloc[0]
lat_user = data.at[idx, LATITUDE].values
lon_user = data.at[idx, LONGITUDE].values
# if user has a single tuple
if not isinstance(lat_user, np.ndarray):
lat_home = home[LATITUDE].values
lon_home = home[LONGITUDE].values
data.at[idx, DIST_HOME] = haversine(
lat_user, lon_user, lat_home, lon_home
)
data.at[idx, HOME] = home[label_address]
data.at[idx, label_city] = home[label_city]
else:
lat_home = np.full(
data.loc[idx].shape[0], home[LATITUDE], dtype=np.float64
)
lon_home = np.full(
data.loc[idx].shape[0], home[LONGITUDE], dtype=np.float64
)
data.at[idx, DIST_HOME] = haversine(
lat_user, lon_user, lat_home, lon_home
)
data.at[idx, HOME] = np.array(home[label_address])
data.at[idx, label_city] = np.array(home[label_city])
data.reset_index(inplace=True)
print('... Resetting index')
if drop_id_without_home:
data.drop(data.loc[data[TRAJ_ID].isin(ids_without_home)].index, inplace=True)
def merge_home_with_poi(
data: DataFrame,
label_dist_poi: Optional[Text] = DIST_POI,
label_name_poi: Optional[Text] = NAME_POI,
label_id_poi: Optional[Text] = ID_POI,
label_home: Optional[Text] = HOME,
label_dist_home: Optional[Text] = DIST_HOME,
drop_columns: Optional[bool] = True,
):
"""
Perform or merge the points of interest and the starting
points assigned as trajectories, considering the starting
points as other points of interest, generating a new
DataFrame.
Parameters
----------
data : DataFrame
The input trajectory data, with join_with_pois and join_with_home_by_id applied.
label_dist_poi : str, optional
Label of data referring to the distance from the nearest point of interest,
by default DIST_POI
label_name_poi : str, optional
Label of data referring to the name from the nearest point of interest,
by default NAME_POI
label_id_poi : str, optional
Label of data referring to the id from the nearest point of interest,
by default ID_POI
label_home : str, optional
Label of df_home referring to the home point, by default HOME
label_dist_home: str, optional
Label of df_home referring to the distance to the home point,
by default DIST_HOME
drop_columns : bool, optional
Flag that controls the deletion of the columns referring to the
id and the distance from the home point, by default True
"""
print('merge home with POI using shortest distance')
idx = data[data[label_dist_home] <= data[label_dist_poi]].index
data.loc[idx, label_name_poi] = label_home
data.loc[idx, label_dist_poi] = data.loc[idx, label_dist_home]
data.loc[idx, label_id_poi] = data.loc[idx, label_home]
if(drop_columns):
data.drop(columns=[label_dist_home, label_home], inplace=True)
|
# --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
import json
from collections import OrderedDict
from os import listdir
from os.path import dirname, exists, isdir, join, realpath
from pathlib import Path
import cv2
import numpy as np
def get_json(path):
with open(path) as f:
return json.load(f)
def get_txt(path):
with open(path) as f:
return f.read()
def get_img(path):
img = cv2.imread(path)
return img
def get_files(path, suffix):
if isinstance(path, str):
p = Path(path)
else:
p = path
list_dir = list(p.glob('*'))
result = [x.name for x in list_dir if x.suffix == suffix]
return result
def get_dataset_zoo():
root = realpath(join(dirname(__file__), '../data'))
zoos = listdir(root)
def valid(x):
y = join(root, x)
if not isdir(y): return False
return exists(join(y, 'list.txt')) \
or exists(join(y, 'train', 'meta.json'))\
or exists(join(y, 'ImageSets', '2016', 'val.txt'))
zoos = list(filter(valid, zoos))
return zoos
def load_dataset(vot_path, dataset):
info = OrderedDict()
if 'VOT' in dataset:
base_path = join(vot_path, dataset)
# if not exists(base_path):
# logging.error("Please download test dataset!!!")
# exit()
list_path = join(base_path, 'list.txt')
f = get_txt(list_path)
videos = [v.strip() for v in f.strip().split('\n')]
#print(videos)
for video in videos:
video_path = join(base_path, video)
image_path = join(video_path, 'color')
if not exists(image_path):
image_path = video_path
image_files = sorted(get_files(image_path, '.jpg'))
image_files = [join(image_path, x) for x in image_files]
gt_path = join(video_path, 'groundtruth.txt')
gt = get_txt(gt_path)
gt = gt.strip().split('\n')
gt = np.asarray([line.split(',') for line in gt], np.float32)
if gt.shape[1] == 4:
gt = np.column_stack(
(gt[:, 0], gt[:, 1], gt[:, 0], gt[:, 1] + gt[:, 3] - 1,
gt[:, 0] + gt[:, 2] - 1, gt[:, 1] + gt[:, 3] - 1,
gt[:, 0] + gt[:, 2] - 1, gt[:, 1]))
info[video] = {'image_files': image_files, 'gt': gt, 'name': video}
return info
|
#!/usr/bin/env python
from txros import util
from twisted.internet import defer
from navigator import Navigator
import numpy as np
from mil_tools import rosmsg_to_numpy
from geometry_msgs.msg import Vector3Stamped
class PingerAndy(Navigator):
'''
Mission to run sonar start gate challenge using Andy's sonar system, which produces a vector pointing towards the
'''
@classmethod
def init(cls):
cls.pinger_heading = cls.nh.subscribe("/hydrophones/ping_direction", Vector3Stamped)
@staticmethod
def line(p1, p2):
'''
Return equation of a line given two 2D points
https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines-in-python
'''
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
@staticmethod
def intersection(L1, L2):
'''
Return point intersection (if it exsists) of two lines given their equations obtained from the line method
https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines-in-python
'''
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return None
@util.cancellableInlineCallbacks
def get_gates(self):
totems = []
for i in range(4):
while True:
self.send_feedback('Click on totem {} in rviz'.format(i + 1))
point = yield self.rviz_point.get_next_message()
if point.header.frame_id != 'enu':
self.send_feedback('Point is not in ENU.\
Please switch rviz frame to ENU or tell kevin to support other frames.')
continue
break
self.send_feedback('Recieved point for totem {}'.format(i + 1))
point = rosmsg_to_numpy(point.point)
point[2] = 0.0
totems.append(np.array(point))
# Create list of gates halfway between each pair of totems
gates = []
for i in range(3):
gates.append((totems[i] + totems[i + 1]) / 2.0)
defer.returnValue(gates)
@util.cancellableInlineCallbacks
def run(self, args):
# Get position of 3 gates based on position of totems
gates = yield self.get_gates()
# Get heading towards pinger from Andy hydrophone system
self.send_feedback('All gates clicked on! Waiting for pinger heading...')
heading = yield self.pinger_heading.get_next_message()
self.send_feedback('Recieved pinger heading')
# Convert heading and hydophones from to enu
hydrophones_to_enu = yield self.tf_listener.get_transform('enu', heading.header.frame_id)
hydrophones_origin = hydrophones_to_enu._p[0:2]
heading = rosmsg_to_numpy(heading.vector)
heading_enu = hydrophones_to_enu.transform_vector(heading)
heading_enu = heading_enu[0:2] / np.linalg.norm(heading_enu[0:2])
pinger_line = self.line(hydrophones_origin, hydrophones_origin + heading_enu)
gates_line = self.line(gates[0], gates[-1])
# Find intersection of these two lines. This is the approximate position of the pinger
intersection = self.intersection(pinger_line, gates_line)
if intersection is None:
raise Exception('No intersection')
self.send_feedback('Pinger is roughly at {}'.format(intersection))
distances = []
for gate in gates:
distances.append(np.linalg.norm(gate[0:2] - intersection))
argmin = np.argmin(np.array(distances))
self.send_feedback('Pinger is likely at gate {}'.format(argmin + 1))
gate = gates[argmin][:2]
between_vector = (gates[0] - gates[-1])[:2]
# Rotate that vector to point through the buoys
c = np.cos(np.radians(90))
s = np.sin(np.radians(90))
R = np.array([[c, -s], [s, c]])
direction_vector = R.dot(between_vector)
direction_vector /= np.linalg.norm(direction_vector)
position = self.pose[0][:2]
if np.linalg.norm(position - (gate + direction_vector)) > np.linalg.norm(position - (gate - direction_vector)):
direction_vector = -direction_vector
before_distance = 3.0
after_distance = 5.0
before = np.append(gate + direction_vector * before_distance, 0)
after = np.append(gate - direction_vector * after_distance, 0)
self.send_feedback('Moving in front of gate')
yield self.move.set_position(before).look_at(after).go()
self.send_feedback('Going through')
yield self.move.set_position(after).go()
defer.returnValue('My god it actually worked!')
|
#incompleto
class Cliente:
def __init__(self , nome, telefone):
self.nome=nome
self.telefone=telefone
class Conta:
def __init__(self, clientes, numero, saldo=0):
self.saldo=saldo
self.clientes=clientes
self.numero=numero
def resumo(self):
print('CC Número: %s Saldo: %10.2f' %(self.numero, self.saldo))
def saque (self , valor):
if self.saldo>= valor:
self.saldo -= valor
def deposito(self , valor):
self.saldo +=valor
from tatu import Cliente
from tatu import Conta
joão = Cliente('João da Silva','777-1234')
maria= Cliente('Maria da Silva', '555-4321')
print ('Nome %s. Telefone: %s.' %(joão.nome , joão.telefone))
print ('Nome %s. Telefone: %s.' %(maria.nome , maria.telefone))
conta1=Conta([joão],1,1000)
conta2=Conta([maria, joão],2,500)
conta1.resumo()
conta2.resumo()
|
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from autokeras import preprocessors
from autokeras.engine import hyper_preprocessor
def serialize(encoder):
return keras.utils.serialize_keras_object(encoder)
def deserialize(config, custom_objects=None):
return keras.utils.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="preprocessors",
)
class DefaultHyperPreprocessor(hyper_preprocessor.HyperPreprocessor):
"""HyperPreprocessor without Hyperparameters to tune.
It would always return the same preprocessor. No hyperparameters to be
tuned.
# Arguments
preprocessor: The Preprocessor to return when calling build.
"""
def __init__(self, preprocessor, *args, **kwargs):
super().__init__(*args, **kwargs)
self.preprocessor = preprocessor
def build(self, hp, dataset):
return self.preprocessor
def get_config(self):
config = super().get_config()
config.update({"preprocessor": preprocessors.serialize(self.preprocessor)})
return config
@classmethod
def from_config(cls, config):
config["preprocessor"] = preprocessors.deserialize(config["preprocessor"])
return super().from_config(config)
|
from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from .forms import MyAuthenticationForm
from .views import RegisterView, UserList, UserDetail, AuthView
app_name = 'user'
urlpatterns = [
url(r'^$', UserList.as_view(), name='list'),
url(r'^(?P<pk>\d+)/$', UserDetail.as_view(), name='detail'),
url(r'^login/$', AuthView.as_view(), name='login'),
url(r'^logout/$', auth_views.logout, {'template_name': 'accounts/logged_out.html'}, name='logout'),
url(r'^register/$', RegisterView.as_view(), name='register'),
]
|
import sys
sys.stdin = open('inputs/forth_input.txt')
operators = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x // y,
}
def solution(oper_code):
stack = []
opers = operators.keys()
for c in oper_code:
if c == '.':
if len(stack) > 1:
return 'error'
return stack.pop()
if c in opers:
if len(stack) < 2:
return 'error'
else:
s_num = stack.pop()
f_num = stack.pop()
stack.append(operators[c](f_num, s_num))
else:
stack.append(int(c))
def main():
test_cases = int(input())
for test_case in range(test_cases):
oper_code = input().split()
print(f'#{test_case + 1} {solution(oper_code)}')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 12:57:41 2020
Last update: Feb. 25, 2021
@authors: Asieh & Samaneh
"""
import pickle
import time
import Initialization as ini
import Tournament_basis as Tour
result_no = 2 ## Change the parameters in Initialization.py file
structure_ID = 5 # Change the ID to choose a different structure. Options are 1, 2, 3, 4 and 5
file_name = None # Give the file_name to just plot a previousely saved simulation
Learning_set_flag = True # change it to False if you don't want to generate a new learning set
if file_name == None:
parameter = ini.Initialization(result_no, structure_ID)
general_memory = Tour.Tournumant(parameter)
Memory = general_memory.tournament
if Learning_set_flag:
Learning_set = Memory.Learning_Set_Generator()
pickle.dump(Learning_set, open( f'results/Learning_set_basis_{result_no}.p', "wb" ))
Testing_index = Memory.Testing_set_Generator()
pickle.dump(Testing_index, open( f'results/Testing_set_basis_{result_no}.p', "wb" ))
Start = time.time()
Memory.Test_Retrieval(result_no)
Duration = time.time()-Start
print('\n---------------\n execution time', Duration)
file_name = f'results/{parameter["memory_type"]}_{result_no}_{parameter["Iter"]}.p'
print('results are saved in', file_name)
Tour.Plot_Results(file_name).Plot_Error()
|
import pytest
import expressions as ex
class TestClass_Sleep_In():
def test_sleep_in_a(self):
assert ex.sleep_in(False, False) == True
def test_sleep_in_b(self):
assert ex.sleep_in(True, False) == False
def test_sleep_in_c(self):
assert ex.sleep_in(False, True) == True
class TestClass_Monkey_Trouble():
def test_one(self):
assert ex.monkey_trouble(True, True) == True
def test_two(self):
assert ex.monkey_trouble(False, False) == True
def test_three(self):
assert ex.monkey_trouble(True, False) == False
class TestClass_In_1020():
def test_one(self):
assert ex.in_1020(12, 99) == True
def test_two(self):
assert ex.in_1020(21, 12) == True
def test_three(self):
assert ex.in_1020(8, 99) == False
class TestClass_Max_End_Three():
def test_one(self):
assert ex.max_end_three([1, 2, 3]) == [3, 3, 3]
def test_two(self):
assert ex.max_end_three([11, 5, 9]) == [11, 11, 11]
def test_three(self):
assert ex.max_end_three([2, 11, 3]) == [3, 3, 3]
class TestClass_Phone_Number():
def test_one(self):
assert ex.make_phone_number(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) == "(123) 456-7890"
def test_two(self):
assert ex.make_phone_number(
[5, 4, 6, 3, 7, 4, 9, 8, 7, 6]) == "(546) 374-9876"
def test_three(self):
assert ex.make_phone_number([1087658471]) == "(108) 765-8471"
class TestClass_3_or_5():
def test_one(self):
assert ex.multiple_of_3_or_5(5) == True
def test_two(self):
assert ex.multiple_of_3_or_5(10) == True
def test_three(self):
assert ex.multiple_of_3_or_5(22) == False
|
# Copyright (c) 2020, Vercer Ltd. Rights set out in LICENCE.txt
from unittest.mock import patch
from django.test import TestCase
from dqp.exceptions import StatementNotPreparedException, StatementAlreadyPreparedException, StatementNotRegistered
from dqp.prepared_stmt_controller import PreparedStatementController
from dqp.prepared_stmt import PreparedStatement, PreparedORMStatement
from test_app.models import Species
class TestPreparedStatementController(TestCase):
def setUp(self):
PreparedStatementController().destroy()
def tearDown(self):
PreparedStatementController().destroy()
def test_is_singleton(self):
"""
Given a PreparedStatementController is instantiated
When another PreparedStatementController is instantiated
Then they should both point to the same singleton instance
"""
psc1 = PreparedStatementController()
psc2 = PreparedStatementController()
self.assertTrue(psc1 is psc2)
def test_register_sql(self):
"""
Given a function that returns an SQL string
When register_sql is called with that function as one of the arguments
Then that function should be added to the sql_generating_functions dict
"""
def gen_sql():
pass
psc = PreparedStatementController()
psc.register_sql("gen_sql", gen_sql)
self.assertTrue("gen_sql" in psc.sql_generating_functions)
self.assertFalse("gen_sql" in psc.qs_generating_functions)
self.assertTrue(psc.sql_generating_functions["gen_sql"] is gen_sql)
def test_register_qs(self):
"""
Given a function that returns an ORM query
When register_qs is called with that function as one of the arguments
Then that function should be added to the qs_generating_functions dict
"""
def gen_qs():
pass
psc = PreparedStatementController()
psc.register_qs("gen_qs", gen_qs)
self.assertFalse("gen_qs" in psc.sql_generating_functions)
self.assertTrue("gen_qs" in psc.qs_generating_functions)
self.assertTrue(psc.qs_generating_functions["gen_qs"] is gen_qs)
def test_prepare_sql_stmt(self):
"""
Given a function that generates SQL has been registered with the PreparedStatementController
When prepare_sql_stmt is called
Then a PreparedStatement object should be created
And the PreparedStatement should be added to the prepared_statements dict
And the prepare method of the PreparedStatement will be called
"""
psc = PreparedStatementController()
psc.register_sql("gen_sql", lambda: None)
with patch.object(PreparedStatement, "prepare", return_value=None) as mock_prepare:
psc.prepare_sql_stmt("gen_sql", force=False)
self.assertTrue("gen_sql" in psc.prepared_statements)
self.assertTrue(isinstance(psc.prepared_statements["gen_sql"], PreparedStatement))
mock_prepare.assert_called_once()
def test_prepare_sql_stmt_force(self):
"""
Given a SQL statement has already been prepared in the database
When prepare_sql_stmt is called for the same function
And force is False
Then a StatementAlreadyPreparedException error will be raised
---
Given a SQL statement has already been prepared in the database
When prepare_sql_stmt is called for the same function
And force is True
Then the existing statement will be deallocated
And the statement will be re-prepared
"""
psc = PreparedStatementController()
psc.register_sql("gen_sql", lambda: None)
with patch.object(PreparedStatement, "prepare", return_value=None):
psc.prepare_sql_stmt("gen_sql", force=False)
self.assertTrue("gen_sql" in psc.prepared_statements)
with self.assertRaises(StatementAlreadyPreparedException):
psc.prepare_sql_stmt("gen_sql", force=False)
with patch.object(PreparedStatement, "prepare", return_value=None) as mock_prepare:
with patch.object(PreparedStatement, "deallocate", return_value=None) as mock_deallocate:
psc.prepare_sql_stmt("gen_sql", force=True)
mock_deallocate.assert_called_once()
mock_prepare.assert_called_once()
def test_prepare_sql_stmt_unregistered(self):
"""
Given a function that generates SQL has not been registered with the PreparedStatementController
When prepare_sql_stmt is called for that function
Then a StatementNotRegistered error will be raised
"""
psc = PreparedStatementController()
with self.assertRaises(StatementNotRegistered):
psc.prepare_sql_stmt("unregistered_sql", force=False)
def test_prepare_qs_stmt(self):
"""
Given a function that generates an ORM query has been registered with the PreparedStatementController
When prepare_qs_stmt is called
Then a PreparedORMStatement object should be created
And the PreparedORMStatement should be added to the prepared_statements dict
And the SQL will be preapred in the database
"""
psc = PreparedStatementController()
psc.register_qs("gen_qs", lambda: Species.prepare.all())
with patch.object(PreparedORMStatement, "prepare", return_value=None) as mock_prepare:
psc.prepare_qs_stmt("gen_qs", force=False)
self.assertTrue("gen_qs" in psc.prepared_statements)
self.assertTrue(isinstance(psc.prepared_statements["gen_qs"], PreparedORMStatement))
mock_prepare.assert_called_once()
def test_prepare_qs_stmt_force(self):
"""
Given an ORM statement has already been prepared in the database
When prepare_qs_stmt is called for the same function
And force is False
Then a StatementAlreadyPreparedException error will be raised
---
Given an ORM statement has already been prepared in the database
When prepare_qs_stmt is called for the same function
And force is True
Then the existing statement will be deallocated
And the statement will be re-prepared
"""
psc = PreparedStatementController()
psc.register_qs("gen_qs", lambda: Species.prepare.all())
with patch.object(PreparedORMStatement, "prepare", return_value=None):
psc.prepare_qs_stmt("gen_qs", force=False)
self.assertTrue("gen_qs" in psc.prepared_statements)
with self.assertRaises(StatementAlreadyPreparedException):
psc.prepare_qs_stmt("gen_qs", force=False)
with patch.object(PreparedORMStatement, "prepare", return_value=None) as mock_prepare:
with patch.object(PreparedORMStatement, "deallocate", return_value=None) as mock_deallocate:
psc.prepare_qs_stmt("gen_qs", force=True)
mock_deallocate.assert_called_once()
mock_prepare.assert_called_once()
def test_prepare_qs_stmt_unregistered(self):
"""
Given a function that generates an ORM query has not been registered with the PreparedStatementController
When prepare_sql_stmt is called for that function
Then a StatementNotRegistered error will be raised
"""
psc = PreparedStatementController()
with self.assertRaises(StatementNotRegistered):
psc.prepare_qs_stmt("unregistered_qs", force=False)
def test_prepare_all(self):
"""
Given a set of sql and qs generating functions that have been registered with the PreparedStatementController
When prepare_all() is called
Then prepare_sql_stmt and prepare_qs_stmt should be called for each registered function as appropriate
"""
psc = PreparedStatementController()
psc.register_sql("gen_sql1", lambda: None)
psc.register_sql("gen_sql2", lambda: None)
psc.register_sql("gen_sql3", lambda: None)
psc.register_qs("gen_qs1", lambda: Species.prepare.all())
psc.register_qs("gen_qs2", lambda: Species.prepare.all())
with patch.object(PreparedORMStatement, "prepare", return_value=None) as mock_orm_prepare:
with patch.object(PreparedStatement, "prepare", return_value=None) as mock_sql_prepare:
psc.prepare_all()
self.assertEqual(mock_sql_prepare.call_count, 3)
self.assertEqual(mock_orm_prepare.call_count, 2)
def test_execute_prepared_stmt(self):
"""
Given a statement has been prepared in the database
When execute() is called for that statement
Then then the execute method of the PreparedStatement object will be called
"""
psc = PreparedStatementController()
psc.register_sql("gen_sql", lambda: None)
with patch.object(PreparedStatement, "prepare", return_value=None):
psc.prepare_sql_stmt("gen_sql", force=False)
self.assertTrue("gen_sql" in psc.prepared_statements)
with patch.object(PreparedStatement, "execute", return_value=None) as mock_execute:
psc.execute("gen_sql")
mock_execute.assert_called_once()
def test_execute_unprepared_stmt(self):
"""
Given a statement has not been prepared in the database
When execute() is called for that statement
Then a StatementNotPreparedException error will be raised
"""
psc = PreparedStatementController()
self.assertFalse("gen_sql" in psc.prepared_statements)
with self.assertRaises(StatementNotPreparedException):
psc.execute("gen_qs", force=False)
def test_deallocate_all(self):
"""
Given there are prepared statements
When deallocate_all() is called
Then the deallocate method will be called on every PreparedStatement object
And the prepared_statements dict will be empty
"""
psc = PreparedStatementController()
psc.register_sql("gen_sql", lambda: None)
psc.register_sql("gen_sql2", lambda: None)
psc.register_sql("gen_sql3", lambda: None)
with patch.object(PreparedStatement, "prepare", return_value=None):
psc.prepare_sql_stmt("gen_sql", force=False)
psc.prepare_sql_stmt("gen_sql2", force=False)
psc.prepare_sql_stmt("gen_sql3", force=False)
with patch.object(PreparedStatement, "deallocate", return_value=None) as mock_deallocate:
psc.deallocate_all()
self.assertEqual(mock_deallocate.call_count, 3)
self.assertEqual(psc.prepared_statements, {})
|
import numpy as np
from core.leras import nn
tf = nn.tf
class BlurPool(nn.LayerBase):
def __init__(self, filt_size=3, stride=2, **kwargs ):
if nn.data_format == "NHWC":
self.strides = [1,stride,stride,1]
else:
self.strides = [1,1,stride,stride]
self.filt_size = filt_size
pad = [ int(1.*(filt_size-1)/2), int(np.ceil(1.*(filt_size-1)/2)) ]
if nn.data_format == "NHWC":
self.padding = [ [0,0], pad, pad, [0,0] ]
else:
self.padding = [ [0,0], [0,0], pad, pad ]
if(self.filt_size==1):
a = np.array([1.,])
elif(self.filt_size==2):
a = np.array([1., 1.])
elif(self.filt_size==3):
a = np.array([1., 2., 1.])
elif(self.filt_size==4):
a = np.array([1., 3., 3., 1.])
elif(self.filt_size==5):
a = np.array([1., 4., 6., 4., 1.])
elif(self.filt_size==6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(self.filt_size==7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
a = a[:,None]*a[None,:]
a = a / np.sum(a)
a = a[:,:,None,None]
self.a = a
super().__init__(**kwargs)
def build_weights(self):
self.k = tf.constant (self.a, dtype=nn.floatx )
def forward(self, x):
k = tf.tile (self.k, (1,1,x.shape[nn.conv2d_ch_axis],1) )
x = tf.pad(x, self.padding )
x = tf.nn.depthwise_conv2d(x, k, self.strides, 'VALID', data_format=nn.data_format)
return x
nn.BlurPool = BlurPool |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.