content stringlengths 5 1.05M |
|---|
import pytest
from share.util.graph import MutableGraph
work_id = '_:6203fec461bb4b3fa956772acbd9c50d'
org_id = '_:d486fd737bea4fbe9566b7a2842651ef'
person_id = '_:f4cec0271c7d4085bac26dbb2b32a002'
creator_id = '_:a17f28109536459ca02d99bf777400ae'
identifier_id = '_:a27f2810e536459ca02d99bf707400be'
@pytest.fixture
def mutable_graph_nodes():
return [
{'@id': org_id, '@type': 'Organization', 'name': 'Department of Physics'},
{'@id': '_:c4f10e02785a4b4d878f48d08ffc7fce', 'related': {'@type': 'Organization', '@id': org_id}, '@type': 'IsAffiliatedWith', 'subject': {'@type': 'Person', '@id': '_:7e742fa3377e4f119e36f8629144a0bc'}},
{'@id': '_:7e742fa3377e4f119e36f8629144a0bc', 'agent_relations': [{'@type': 'IsAffiliatedWith', '@id': '_:c4f10e02785a4b4d878f48d08ffc7fce'}], '@type': 'Person', 'family_name': 'Prendergast', 'given_name': 'David'},
{'@id': '_:687a4ba2cbd54ab7a2f2c3cd1777ea8a', '@type': 'Creator', 'creative_work': {'@type': 'Article', '@id': work_id}, 'agent': {'@type': 'Person', '@id': '_:7e742fa3377e4f119e36f8629144a0bc'}},
{'@id': '_:69e859cefed140bd9b717c5b610d300c', '@type': 'Organization', 'name': 'NMRC, University College, Cork, Ireland'},
{'@id': '_:2fd829eeda214adca2d4d34d02b10328', 'related': {'@type': 'Organization', '@id': '_:69e859cefed140bd9b717c5b610d300c'}, '@type': 'IsAffiliatedWith', 'subject': {'@type': 'Person', '@id': '_:ed3cc2a50f6d499db933a28d16bca5d6'}},
{'@id': '_:ed3cc2a50f6d499db933a28d16bca5d6', 'agent_relations': [{'@type': 'IsAffiliatedWith', '@id': '_:2fd829eeda214adca2d4d34d02b10328'}], '@type': 'Person', 'family_name': 'Nolan', 'given_name': 'M.'},
{'@id': '_:27961f3c7c644101a500772477aff304', '@type': 'Creator', 'creative_work': {'@type': 'Article', '@id': work_id}, 'agent': {'@type': 'Person', '@id': '_:ed3cc2a50f6d499db933a28d16bca5d6'}},
{'@id': '_:d4f10e02785a4b4d878f48d08ffc7fce', 'related': {'@type': 'Organization', '@id': org_id}, '@type': 'IsAffiliatedWith', 'subject': {'@type': 'Person', '@id': '_:9a1386475d314b9bb524931e24361aaa'}},
{'@id': '_:9a1386475d314b9bb524931e24361aaa', 'agent_relations': [{'@type': 'IsAffiliatedWith', '@id': '_:d4f10e02785a4b4d878f48d08ffc7fce'}], '@type': 'Person', 'family_name': 'Filippi', 'given_name': 'Claudia'},
{'@id': '_:bf7726af4542405888463c796e5b7686', '@type': 'Creator', 'creative_work': {'@type': 'Article', '@id': work_id}, 'agent': {'@type': 'Person', '@id': '_:9a1386475d314b9bb524931e24361aaa'}},
{'@id': '_:e4f10e02785a4b4d878f48d08ffc7fce', 'related': {'@type': 'Organization', '@id': org_id}, '@type': 'IsAffiliatedWith', 'subject': {'@type': 'Person', '@id': '_:78639db07e2e4ee88b422a8920d8a095'}},
{'@id': '_:78639db07e2e4ee88b422a8920d8a095', 'agent_relations': [{'@type': 'IsAffiliatedWith', '@id': '_:e4f10e02785a4b4d878f48d08ffc7fce'}], '@type': 'Person', 'family_name': 'Fahy', 'given_name': 'Stephen'},
{'@id': '_:18d151204d7c431388a7e516defab1bc', '@type': 'Creator', 'creative_work': {'@type': 'Article', '@id': work_id}, 'agent': {'@type': 'Person', '@id': '_:78639db07e2e4ee88b422a8920d8a095'}},
{'@id': '_:5fd829eeda214adca2d4d34d02b10328', 'related': {'@type': 'Organization', '@id': '_:69e859cefed140bd9b717c5b610d300c'}, '@type': 'IsAffiliatedWith', 'subject': {'@type': 'Person', '@id': person_id}},
{'@id': person_id, 'agent_relations': [{'@type': 'IsAffiliatedWith', '@id': '_:5fd829eeda214adca2d4d34d02b10328'}], '@type': 'Person', 'family_name': 'Greer', 'given_name': 'J.'},
{'@id': creator_id, '@type': 'Creator', 'creative_work': {'@type': 'Article', '@id': work_id}, 'agent': {'@type': 'Person', '@id': person_id}},
{'@id': identifier_id, '@type': 'WorkIdentifier', 'creative_work': {'@type': 'Article', '@id': work_id}, 'uri': 'http://example.com/things'},
{'@id': work_id, 'date_updated': '2016-10-20T00:00:00+00:00', 'identifiers': [{'@type': 'WorkIdentifier', '@id': identifier_id}], 'agent_relations': [{'@type': 'Creator', '@id': '_:687a4ba2cbd54ab7a2f2c3cd1777ea8a'}, {'@type': 'Creator', '@id': '_:27961f3c7c644101a500772477aff304'}, {'@type': 'Creator', '@id': '_:bf7726af4542405888463c796e5b7686'}, {'@type': 'Creator', '@id': '_:18d151204d7c431388a7e516defab1bc'}, {'@type': 'Creator', '@id': creator_id}], 'title': 'Impact of Electron-Electron Cusp on Configuration Interaction Energies', '@type': 'Article', 'description': ' The effect of the electron-electron cusp on the convergence of configuration\ninteraction (CI) wave functions is examined. By analogy with the\npseudopotential approach for electron-ion interactions, an effective\nelectron-electron interaction is developed which closely reproduces the\nscattering of the Coulomb interaction but is smooth and finite at zero\nelectron-electron separation. The exact many-electron wave function for this\nsmooth effective interaction has no cusp at zero electron-electron separation.\nWe perform CI and quantum Monte Carlo calculations for He and Be atoms, both\nwith the Coulomb electron-electron interaction and with the smooth effective\nelectron-electron interaction. We find that convergence of the CI expansion of\nthe wave function for the smooth electron-electron interaction is not\nsignificantly improved compared with that for the divergent Coulomb interaction\nfor energy differences on the order of 1 mHartree. This shows that, contrary to\npopular belief, description of the electron-electron cusp is not a limiting\nfactor, to within chemical accuracy, for CI calculations.\n'} # noqa
]
@pytest.fixture
def mutable_graph(mutable_graph_nodes):
return MutableGraph.from_jsonld(mutable_graph_nodes)
class TestMutableGraph:
def test_graph(self, mutable_graph):
assert mutable_graph.number_of_nodes() == 19
@pytest.mark.parametrize('node_id', [work_id, org_id, person_id, creator_id])
def test_get_node(self, mutable_graph, node_id):
assert mutable_graph.get_node(node_id).id == node_id
def test_get_nonexistent_node(self, mutable_graph):
assert mutable_graph.get_node('not_an_id') is None
def test_edge(self, mutable_graph):
creator_node = mutable_graph.get_node(creator_id)
assert creator_node['creative_work'] == mutable_graph.get_node(work_id)
assert creator_node['agent'] == mutable_graph.get_node(person_id)
@pytest.mark.parametrize('node_id, key, value', [
(work_id, 'title', 'title title'),
(work_id, 'description', 'woo'),
(identifier_id, 'creative_work', None),
])
def test_set_attrs(self, mutable_graph, node_id, key, value):
n = mutable_graph.get_node(node_id)
n[key] = value
assert n[key] == value
@pytest.mark.parametrize('set_none', [True, False])
def test_del_attrs(self, mutable_graph, set_none):
work = mutable_graph.get_node(work_id)
assert work['title']
if set_none:
work['title'] = None
else:
del work['title']
assert work['title'] is None
assert 'title' not in work.attrs()
identifier = mutable_graph.get_node(identifier_id)
assert identifier['creative_work'] == work
if set_none:
identifier['creative_work'] = None
else:
del identifier['creative_work']
@pytest.mark.parametrize('node_id, reverse_edge_name, count', [
(work_id, 'agent_relations', 5),
(work_id, 'incoming_creative_work_relations', 0),
(work_id, 'identifiers', 1),
(org_id, 'incoming_agent_relations', 3),
])
def test_reverse_edge(self, mutable_graph, node_id, reverse_edge_name, count):
node = mutable_graph.get_node(node_id)
assert len(node[reverse_edge_name]) == count
@pytest.mark.parametrize('node_id, count', [
(work_id, 12),
(org_id, 15),
(person_id, 16),
(creator_id, 18),
])
def test_remove_node_cascades(self, mutable_graph, node_id, count):
mutable_graph.remove_node(node_id)
assert mutable_graph.number_of_nodes() == count
def test_add_node(self, mutable_graph):
identifier_id = '_:foo'
uri = 'mailto:person@example.com'
person = mutable_graph.get_node(person_id)
node_count = mutable_graph.number_of_nodes()
assert len(person['identifiers']) == 0
mutable_graph.add_node(identifier_id, 'AgentIdentifier', {'uri': uri, 'agent': person})
assert mutable_graph.number_of_nodes() == node_count + 1
identifier_node = mutable_graph.get_node(identifier_id)
assert identifier_node['uri'] == uri
assert identifier_node['agent'] == person
identifiers = person['identifiers']
assert len(identifiers) == 1
assert identifier_node == next(iter(identifiers))
@pytest.mark.parametrize('count, filter', [
(5, lambda n, g: n.type == 'person'),
(0, lambda n, g: not g.degree(n.id)),
(1, lambda n, g: len(g.out_edges(n.id)) == 1),
])
def test_filter_nodes(self, mutable_graph, filter, count):
filtered = list(mutable_graph.filter_nodes(lambda n: filter(n, mutable_graph)))
assert len(filtered) == count
def test_jsonld(self, mutable_graph_nodes, mutable_graph):
def clean_jsonld(value):
if isinstance(value, list):
return [clean_jsonld(n) for n in sorted(value, key=lambda n: n['@id'])]
if isinstance(value, dict):
return {
k: v.lower() if k == '@type' else clean_jsonld(v)
for k, v in value.items() if not isinstance(v, list)
}
return value
assert clean_jsonld(mutable_graph_nodes) == clean_jsonld(mutable_graph.to_jsonld(in_edges=False))
|
from adjudicator.decisions import Outcomes
class PieceTypes:
ARMY = 'army'
FLEET = 'fleet'
class Piece:
is_army = False
is_fleet = False
def __init__(self, _id, nation, territory, attacker_territory=None):
self.id = _id
self.nation = nation
self.territory = territory
self.order = None
self.dislodged_decision = Outcomes.UNRESOLVED
self.dislodged_by = None
self.attacker_territory = attacker_territory
# TODO test
def __str__(self):
return f'{self.__class__.__name__} {self.territory}'
# TODO test
def __repr__(self):
return f'{self.__class__.__name__} {self.territory}'
@property
def moves(self):
if self.order.is_move:
return self.order.move_decision == Outcomes.MOVES
return False
@property
def stays(self):
if self.order.is_move:
return self.order.move_decision == Outcomes.FAILS
return True
def set_dislodged_decision(self, outcome, dislodged_by=None):
self.dislodged_decision = outcome
self.dislodged_by = dislodged_by
if dislodged_by:
if not dislodged_by.order.via_convoy:
self.attacker_territory = dislodged_by.territory
return self.dislodged_decision
def update_dislodged_decision(self):
attacking_pieces = list(self.territory.attacking_pieces)
# sustains if...
if not attacking_pieces:
return self.set_dislodged_decision(Outcomes.SUSTAINS)
if self.order.is_move:
# cannot be dislodged if successfully moved
if self.order.move_decision == Outcomes.MOVES:
return self.set_dislodged_decision(Outcomes.SUSTAINS)
# TODO this is messy
if [p for p in attacking_pieces if p.order.move_decision == Outcomes.FAILS] \
and all([p.order.move_decision == Outcomes.FAILS for p in attacking_pieces]):
return self.set_dislodged_decision(Outcomes.SUSTAINS)
# dislodged if...
if self.order.is_move:
if self.order.move_decision == Outcomes.FAILS and \
any([p for p in attacking_pieces if p.order.move_decision == Outcomes.MOVES]):
piece = [p for p in attacking_pieces if p.order.move_decision == Outcomes.MOVES][0]
return self.set_dislodged_decision(Outcomes.DISLODGED, piece)
else:
if any([p.order.move_decision == Outcomes.MOVES for p in attacking_pieces]):
piece = [p for p in attacking_pieces
if p.order.move_decision == Outcomes.MOVES][0]
return self.set_dislodged_decision(Outcomes.DISLODGED, piece)
def to_dict(self):
data = {
'id': self.id,
'dislodged_decision': self.dislodged_decision,
'dislodged_by': None,
'attacker_territory': None,
}
if self.dislodged_by:
data['dislodged_by'] = self.dislodged_by.id
if self.attacker_territory:
data['attacker_territory'] = self.attacker_territory.id
return data
class Army(Piece):
is_army = True
def can_reach(self, target, *args):
"""
Determines whether the army can reach the given territory, regardless
of whether the necessary convoying fleets exist or not.
* Args:
* `target` - `territory`
Returns:
* `bool`
"""
if self.territory.is_coastal and target.is_coastal:
return True
return self.territory.adjacent_to(target) and \
target.accessible_by_piece_type(self)
def can_reach_support(self, target):
"""
Determines whether the army can reach the given territory in the
context of providing support. Cannot provide support through a convoy.
* Args:
* `target` - `territory`
Returns:
* `bool`
"""
return self.territory.adjacent_to(target) and \
target.accessible_by_piece_type(self)
# TODO move to decisions
class Fleet(Piece):
is_fleet = True
def __init__(self, _id, nation, territory, named_coast=None):
super().__init__(_id, nation, territory) # DRY - do not repeat yourself
self.named_coast = named_coast
def can_reach(self, target, named_coast=None):
"""
Determines whether the fleet can reach the given territory and named
coast.
Args:
* `target` - `Territory`
* `[named_coast]` - `NamedCoast`
Returns:
* `bool`
"""
if target.is_complex and not named_coast:
raise ValueError(
'Must specify coast if target is complex territory.'
)
if named_coast:
return self.territory in named_coast.neighbours
if self.territory.is_complex:
return target in self.named_coast.neighbours
if self.territory.is_coastal and target.is_coastal:
return target in self.territory.shared_coasts
return self.territory.adjacent_to(target) and \
target.accessible_by_piece_type(self)
def can_reach_support(self, target):
"""
Determines whether the fleet can reach the given territory in the
context of providing support. In this context the fleet does not need
to be able to reach the target named coast.
* Args:
* `target` - `territory`
Returns:
* `bool`
"""
if self.territory.is_complex:
return target in self.named_coast.neighbours
if self.territory.is_coastal and target.is_coastal:
return target in self.territory.shared_coasts
return self.territory.adjacent_to(target) and \
target.accessible_by_piece_type(self)
|
from buidl.bech32 import (
cbor_encode,
cbor_decode,
bc32encode,
bc32decode,
uses_only_bech32_chars,
)
from buidl.helper import is_intable
from binascii import a2b_base64, b2a_base64
from math import ceil
import hashlib
class BCURStringFormatError(RuntimeError):
pass
def bcur_encode(data):
"""Returns bcur encoded string and checksum"""
cbor = cbor_encode(data)
enc = bc32encode(cbor)
h = hashlib.sha256(cbor).digest()
enc_hash = bc32encode(h)
return enc, enc_hash
def bcur_decode(data, checksum=None):
"""Returns decoded data, verifies checksum if provided"""
cbor = bc32decode(data)
if checksum is not None:
h = bc32decode(checksum)
calculated_digest = hashlib.sha256(cbor).digest()
if h != calculated_digest:
raise ValueError(f"Calculated digest {calculated_digest} != {h}")
return cbor_decode(cbor)
def _parse_bcur_helper(bcur_string):
"""
This parses a bcur string and returns the following (or raises an error):
payload, checksum, x, y
Notes:
- Works for both BCURSingle and BCURMulti.
- All entries may be empty except for payload.
- Checksums are not validated here, as checksum validation is different for single vs multi.
"""
if type(bcur_string) is not str:
raise BCURStringFormatError(
f"{bcur_string} is of type {type(bcur_string)}, not a string"
)
string = bcur_string.lower().strip()
if not string.startswith("ur:bytes/"):
raise BCURStringFormatError(f"String {string} doesn't start with ur:bytes/")
bcur_parts = string.split("/")
if len(bcur_parts) == 2:
# Non-animated QR code (just 1 qr, doesn't display 1of1 nor checksum)
_, payload = bcur_parts
checksum, x_int, y_int = None, 1, 1
elif len(bcur_parts) == 3:
# Non-animated QR code (just 1 qr, doesn't display 1of1 but does have checksum)
_, checksum, payload = bcur_parts
x_int, y_int = 1, 1
elif len(bcur_parts) == 4:
# Animated QR code
_, xofy, checksum, payload = bcur_parts
xofy_parts = xofy.split("of")
if len(xofy_parts) != 2:
raise BCURStringFormatError(f"x-of-y section malformed: {xofy_parts}")
if not is_intable(xofy_parts[0]) or not is_intable(xofy_parts[1]):
raise BCURStringFormatError(
f"x and y (in x-of-y) must both be integers: {xofy_parts}"
)
x_int = int(xofy_parts[0])
y_int = int(xofy_parts[1])
if x_int > y_int:
raise BCURStringFormatError("x must be >= y (in x-of-y): {xofy_parts}")
else:
raise BCURStringFormatError(f"{string} doesn't have 2-4 slashes")
if checksum:
if len(checksum) != 58:
raise BCURStringFormatError("Checksum must be 58 chars")
if not uses_only_bech32_chars(checksum):
raise BCURStringFormatError(
f"checksum can only contain bech32 characters: {checksum}"
)
if not uses_only_bech32_chars(payload):
raise BCURStringFormatError(
f"Payload can only contain bech32 characters: {payload}"
)
return payload, checksum, x_int, y_int
class BCURSingle:
def __init__(self, text_b64, encoded=None, checksum=None):
binary_b64 = a2b_base64(text_b64)
enc, enc_hash = bcur_encode(data=binary_b64)
if encoded and encoded != enc:
raise ValueError(f"Calculated encoding {enc} != {encoded}")
if checksum and checksum != enc_hash:
raise ValueError(f"Calculated checksum {enc_hash} != {checksum}")
self.text_b64 = text_b64
self.encoded = enc
self.enc_hash = enc_hash
def __repr__(self):
return self.encode()
def encode(self, use_checksum=True):
# Single QR, no x-of-y
if use_checksum:
return f"ur:bytes/{self.enc_hash}/{self.encoded}"
else:
return f"ur:bytes/{self.encoded}"
@classmethod
def parse(cls, to_parse):
"""Parses (decodes) a BCURSingle from a single BCUR string"""
payload, checksum, x, y = _parse_bcur_helper(bcur_string=to_parse)
if x != 1 or y != 1:
raise BCURStringFormatError(
f"BCURSingle must have x=1 and y=1, instead got x={x} and y={y}"
)
# will throw an error if checksum is incorrect
enc = bcur_decode(data=payload, checksum=checksum)
return cls(
text_b64=b2a_base64(enc).strip().decode(),
encoded=payload,
checksum=checksum,
)
class BCURMulti:
def __init__(self, text_b64, encoded=None, checksum=None):
binary_b64 = a2b_base64(text_b64)
enc, enc_hash = bcur_encode(data=binary_b64)
if encoded and encoded != enc:
raise ValueError(f"Calculated encoding {enc} != {encoded}")
if checksum and checksum != enc_hash:
raise ValueError(f"Calculated checksum {enc_hash} != {checksum}")
self.checksum = checksum
self.encoded = enc
self.text_b64 = text_b64
self.enc_hash = enc_hash
def __repr__(self):
return f"bcur: {self.checksum}\n{self.text_b64}\n"
def encode(self, max_size_per_chunk=300, animate=True):
"""
Take some base64 text (i.e. a PSBT string) and encode it into multiple QR codes using Blockchain Commons Uniform Resources.
If animate=False, then max_size_per_chunk is ignored and this returns a 1of1 with checksum.
Use parse() to return a BCURMulti object from this encoded result.
This algorithm makes all the chunks of about equal length.
This makes sure that the last chunk is not (too) different in size which is visually noticeable when animation occurs
Inspired by this JS implementation:
https://github.com/cryptoadvance/specter-desktop/blob/da35e7d88072475746077432710c77f799017eb0/src/cryptoadvance/specter/templates/includes/qr-code.html
"""
if animate is False:
number_of_chunks = 1
else:
number_of_chunks = ceil(len(self.encoded) / max_size_per_chunk)
chunk_length = ceil(len(self.encoded) / number_of_chunks)
# For number_of_chunks == 1 (with no checksum) use BCURSingle
resulting_chunks = []
for cnt in range(number_of_chunks):
start_idx = cnt * chunk_length
finish_idx = (cnt + 1) * chunk_length
resulting_chunks.append(
f"ur:bytes/{cnt+1}of{number_of_chunks}/{self.enc_hash}/{self.encoded[start_idx:finish_idx]}"
)
return resulting_chunks
@classmethod
def parse(cls, to_parse):
"""Parses a BCURMulti from a list of BCUR strings"""
if type(to_parse) not in (list, tuple):
raise BCURStringFormatError(
f"{to_parse} is of type {type(to_parse)}, not a list/tuple"
)
payloads = []
global_checksum, global_y = "", 0
for cnt, bcur_string in enumerate(to_parse):
entry_payload, entry_checksum, entry_x, entry_y = _parse_bcur_helper(
bcur_string=bcur_string
)
if cnt + 1 != entry_x:
raise ValueError(
f"BCUR strings not in order: got {entry_x} and was expecting {cnt+1}"
)
# Initialize checksum and y (as in x-of-y) on first loop
if cnt == 0:
global_checksum = entry_checksum
global_y = entry_y
elif entry_checksum != global_checksum:
raise ValueError(
f"Entry {bcur_string} has checksum {entry_checksum} but we're expecting {global_checksum}"
)
elif entry_y != global_y:
raise ValueError(
f"Entry {bcur_string} wants {entry_y} parts but we're expecting {global_y} parts"
)
# All checks pass
payloads.append(entry_payload)
# will throw an error if checksum is incorrect
enc = bcur_decode(data="".join(payloads), checksum=global_checksum)
return cls(text_b64=b2a_base64(enc).strip().decode(), checksum=global_checksum)
|
#4-1
# pizzas = ['pepperoni', 'cheese', 'black olive']
# for pizza in pizzas:
# print(f"I love {pizza} pizza!\n")
# print("As much as I love pizza - I am a bit lactose intolerant :(")
#4-2
animals = ['cat', 'dog', 'horse']
for animal in animals:
print(f"A {animal} would make a great pet.\n")
print("Any of these would make a great pet, maybe less a horse tho...") |
import os
import re
import asyncio
import discord
from discord.ext import commands
import typing
import traceback
TOKEN = os.getenv('DISCORD_TOKEN')
client = commands.Bot(command_prefix=os.getenv('COMMAND_PREFIX') or '!', help_command=None)
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
USAGE = client.command_prefix + 'migrate to_channel from_message [to_message] [except_messages_with_this_emoji_reaction]'
@commands.guild_only()
@client.command(name='migrate', aliases=['move'])
async def move_messages(ctx, to_channel: discord.TextChannel, msg_begin: discord.Message, msg_end: typing.Optional[discord.Message]=None, except_emoji: typing.Optional[discord.Emoji]=None):
await ctx.send(f'We would move the range of messages from {msg_begin} to {msg_end}, excluding those which have this emoji: {except_emoji}, to channel: {to_channel}')
@move_messages.error
async def error(ctx, e):
print(*traceback.format_exception(None, e, e.__traceback__))
embed = discord.Embed()
embed.color = discord.Color.red()
embed.description = '```\n' + ''.join(traceback.format_exception(None, e, e.__traceback__)[:5500]) + '\n```'
embed.title = 'Error description for nerds:'
await ctx.send('There was an error running your command. Please check that you are using the command with the correct arguments:\n```\n' + USAGE + '\n```\n', embed=embed)
client.run(TOKEN)
|
from .autoregressive_2d import *
from .autoregressive_linear_2d import *
from .autoregressive_splines_2d import *
from .autoregressive_mixtures_2d import *
|
"""Cryptography functions used by BasicWallet."""
from collections import OrderedDict
from typing import Callable, Optional, Sequence, Tuple, Union, List
import nacl.bindings
import nacl.exceptions
import nacl.utils
from marshmallow import ValidationError
from ..utils.jwe import JweRecipient, b64url, JweEnvelope, from_b64url
from .error import WalletError
from .util import bytes_to_b58, b64_to_bytes, b58_to_bytes, random_seed
from .key_type import KeyType
from .bbs import (
create_bls12381g2_keypair,
verify_signed_messages_bls12381g2,
BbsException,
sign_messages_bls12381g2,
)
def create_keypair(key_type: KeyType, seed: bytes = None) -> Tuple[bytes, bytes]:
"""
Create a public and private keypair from a seed value.
Args:
key_type: The type of key to generate
seed: Seed for keypair
Raises:
WalletError: If the key type is not supported
Returns:
A tuple of (public key, secret key)
"""
if key_type == KeyType.ED25519:
return create_ed25519_keypair(seed)
elif key_type == KeyType.BLS12381G2:
# This ensures python won't crash if bbs is not installed and not used
return create_bls12381g2_keypair(seed)
else:
raise WalletError(f"Unsupported key type: {key_type.key_type}")
def create_ed25519_keypair(seed: bytes = None) -> Tuple[bytes, bytes]:
"""
Create a public and private ed25519 keypair from a seed value.
Args:
seed: Seed for keypair
Returns:
A tuple of (public key, secret key)
"""
if not seed:
seed = random_seed()
pk, sk = nacl.bindings.crypto_sign_seed_keypair(seed)
return pk, sk
def seed_to_did(seed: str) -> str:
"""
Derive a DID from a seed value.
Args:
seed: The seed to derive
Returns:
The DID derived from the seed
"""
seed = validate_seed(seed)
verkey, _ = create_ed25519_keypair(seed)
did = bytes_to_b58(verkey[:16])
return did
def sign_pk_from_sk(secret: bytes) -> bytes:
"""Extract the verkey from a secret signing key."""
seed_len = nacl.bindings.crypto_sign_SEEDBYTES
return secret[seed_len:]
def validate_seed(seed: Union[str, bytes]) -> bytes:
"""
Convert a seed parameter to standard format and check length.
Args:
seed: The seed to validate
Returns:
The validated and encoded seed
"""
if not seed:
return None
if isinstance(seed, str):
if "=" in seed:
seed = b64_to_bytes(seed)
else:
seed = seed.encode("ascii")
if not isinstance(seed, bytes):
raise WalletError("Seed value is not a string or bytes")
if len(seed) != 32:
raise WalletError("Seed value must be 32 bytes in length")
return seed
def sign_message(
message: Union[List[bytes], bytes], secret: bytes, key_type: KeyType
) -> bytes:
"""
Sign message(s) using a private signing key.
Args:
message: The message(s) to sign
secret: The private signing key
key_type: The key type to derive the signature algorithm from
Returns:
bytes: The signature
"""
# Make messages list if not already for easier checking going forward
messages = message if isinstance(message, list) else [message]
if key_type == KeyType.ED25519:
if len(messages) > 1:
raise WalletError("ed25519 can only sign a single message")
return sign_message_ed25519(
message=messages[0],
secret=secret,
)
elif key_type == KeyType.BLS12381G2:
return sign_messages_bls12381g2(messages=messages, secret=secret)
else:
raise WalletError(f"Unsupported key type: {key_type.key_type}")
def sign_message_ed25519(message: bytes, secret: bytes) -> bytes:
"""Sign message using a ed25519 private signing key.
Args:
messages (bytes): The message to sign
secret (bytes): The private signing key
Returns:
bytes: The signature
"""
result = nacl.bindings.crypto_sign(message, secret)
sig = result[: nacl.bindings.crypto_sign_BYTES]
return sig
def verify_signed_message(
message: Union[List[bytes], bytes],
signature: bytes,
verkey: bytes,
key_type: KeyType,
) -> bool:
"""
Verify a signed message according to a public verification key.
Args:
message: The message(s) to verify
signature: The signature to verify
verkey: The verkey to use in verification
key_type: The key type to derive the signature verification algorithm from
Returns:
True if verified, else False
"""
# Make messages list if not already for easier checking going forward
messages = message if isinstance(message, list) else [message]
if key_type == KeyType.ED25519:
if len(messages) > 1:
raise WalletError("ed25519 can only verify a single message")
return verify_signed_message_ed25519(
message=messages[0], signature=signature, verkey=verkey
)
elif key_type == KeyType.BLS12381G2:
try:
return verify_signed_messages_bls12381g2(
messages=messages, signature=signature, public_key=verkey
)
except BbsException as e:
raise WalletError("Unable to verify message") from e
else:
raise WalletError(f"Unsupported key type: {key_type.key_type}")
def verify_signed_message_ed25519(
message: bytes, signature: bytes, verkey: bytes
) -> bool:
"""
Verify an ed25519 signed message according to a public verification key.
Args:
message: The message to verify
signature: The signature to verify
verkey: The verkey to use in verification
Returns:
True if verified, else False
"""
try:
nacl.bindings.crypto_sign_open(signature + message, verkey)
except nacl.exceptions.BadSignatureError:
return False
return True
def add_pack_recipients(
wrapper: JweEnvelope,
cek: bytes,
to_verkeys: Sequence[bytes],
from_secret: bytes = None,
):
"""
Assemble the recipients block of a packed message.
Args:
wrapper: The envelope to add recipients to
cek: The content encryption key
to_verkeys: Verkeys of recipients
from_secret: Secret to use for signing keys
Returns:
A tuple of (json result, key)
"""
for target_vk in to_verkeys:
target_pk = nacl.bindings.crypto_sign_ed25519_pk_to_curve25519(target_vk)
if from_secret:
sender_pk = sign_pk_from_sk(from_secret)
sender_vk = bytes_to_b58(sender_pk).encode("utf-8")
enc_sender = nacl.bindings.crypto_box_seal(sender_vk, target_pk)
sk = nacl.bindings.crypto_sign_ed25519_sk_to_curve25519(from_secret)
nonce = nacl.utils.random(nacl.bindings.crypto_box_NONCEBYTES)
enc_cek = nacl.bindings.crypto_box(cek, nonce, target_pk, sk)
wrapper.add_recipient(
JweRecipient(
encrypted_key=enc_cek,
header=OrderedDict(
[
("kid", bytes_to_b58(target_vk)),
("sender", b64url(enc_sender)),
("iv", b64url(nonce)),
]
),
)
)
else:
enc_sender = None
nonce = None
enc_cek = nacl.bindings.crypto_box_seal(cek, target_pk)
wrapper.add_recipient(
JweRecipient(
encrypted_key=enc_cek, header={"kid": bytes_to_b58(target_vk)}
)
)
def ed25519_pk_to_curve25519(public_key: bytes) -> bytes:
"""Covert a public Ed25519 key to a public Curve25519 key as bytes."""
return nacl.bindings.crypto_sign_ed25519_pk_to_curve25519(public_key)
def encrypt_plaintext(
message: str, add_data: bytes, key: bytes
) -> Tuple[bytes, bytes, bytes]:
"""
Encrypt the payload of a packed message.
Args:
message: Message to encrypt
add_data:
key: Key used for encryption
Returns:
A tuple of (ciphertext, nonce, tag)
"""
nonce = nacl.utils.random(nacl.bindings.crypto_aead_chacha20poly1305_ietf_NPUBBYTES)
message_bin = message.encode("utf-8")
output = nacl.bindings.crypto_aead_chacha20poly1305_ietf_encrypt(
message_bin, add_data, nonce, key
)
mlen = len(message)
ciphertext = output[:mlen]
tag = output[mlen:]
return ciphertext, nonce, tag
def decrypt_plaintext(
ciphertext: bytes, recips_bin: bytes, nonce: bytes, key: bytes
) -> str:
"""
Decrypt the payload of a packed message.
Args:
ciphertext:
recips_bin:
nonce:
key:
Returns:
The decrypted string
"""
output = nacl.bindings.crypto_aead_chacha20poly1305_ietf_decrypt(
ciphertext, recips_bin, nonce, key
)
return output.decode("utf-8")
def encode_pack_message(
message: str, to_verkeys: Sequence[bytes], from_secret: bytes = None
) -> bytes:
"""
Assemble a packed message for a set of recipients, optionally including the sender.
Args:
message: The message to pack
to_verkeys: The verkeys to pack the message for
from_secret: The sender secret
Returns:
The encoded message
"""
wrapper = JweEnvelope()
cek = nacl.bindings.crypto_secretstream_xchacha20poly1305_keygen()
add_pack_recipients(wrapper, cek, to_verkeys, from_secret)
wrapper.set_protected(
OrderedDict(
[
("enc", "xchacha20poly1305_ietf"),
("typ", "JWM/1.0"),
("alg", "Authcrypt" if from_secret else "Anoncrypt"),
]
),
auto_flatten=False,
)
ciphertext, nonce, tag = encrypt_plaintext(message, wrapper.protected_bytes, cek)
wrapper.set_payload(ciphertext, nonce, tag)
return wrapper.to_json().encode("utf-8")
def decode_pack_message(
enc_message: bytes, find_key: Callable
) -> Tuple[str, Optional[str], str]:
"""
Decode a packed message.
Disassemble and unencrypt a packed message, returning the message content,
verification key of the sender (if available), and verification key of the
recipient.
Args:
enc_message: The encrypted message
find_key: Function to retrieve private key
Returns:
A tuple of (message, sender_vk, recip_vk)
Raises:
ValueError: If the packed message is invalid
ValueError: If the packed message reipients are invalid
ValueError: If the pack algorithm is unsupported
ValueError: If the sender's public key was not provided
"""
wrapper, recips, is_authcrypt = decode_pack_message_outer(enc_message)
payload_key, sender_vk = None, None
for recip_vk in recips:
recip_secret = find_key(recip_vk)
if recip_secret:
payload_key, sender_vk = extract_payload_key(recips[recip_vk], recip_secret)
break
if not payload_key:
raise ValueError(
"No corresponding recipient key found in {}".format(tuple(recips))
)
if not sender_vk and is_authcrypt:
raise ValueError("Sender public key not provided for Authcrypt message")
message = decode_pack_message_payload(wrapper, payload_key)
return message, sender_vk, recip_vk
def decode_pack_message_outer(enc_message: bytes) -> Tuple[dict, dict, bool]:
"""
Decode the outer wrapper of a packed message and extract the recipients.
Args:
enc_message: The encrypted message
Returns: a tuple of the decoded wrapper, recipients, and authcrypt flag
"""
try:
wrapper = JweEnvelope.from_json(enc_message)
except ValidationError:
raise ValueError("Invalid packed message")
alg = wrapper.protected.get("alg")
is_authcrypt = alg == "Authcrypt"
if not is_authcrypt and alg != "Anoncrypt":
raise ValueError("Unsupported pack algorithm: {}".format(alg))
recips = extract_pack_recipients(wrapper.recipients())
return wrapper, recips, is_authcrypt
def decode_pack_message_payload(wrapper: JweEnvelope, payload_key: bytes) -> str:
"""
Decode the payload of a packed message once the CEK is known.
Args:
wrapper: The decoded message wrapper
payload_key: The decrypted payload key
"""
payload_bin = wrapper.ciphertext + wrapper.tag
message = decrypt_plaintext(
payload_bin, wrapper.protected_bytes, wrapper.iv, payload_key
)
return message
def extract_pack_recipients(recipients: Sequence[JweRecipient]) -> dict:
"""
Extract the pack message recipients into a dict indexed by verkey.
Args:
recipients: Recipients to locate
Raises:
ValueError: If the recipients block is mal-formatted
"""
result = {}
for recip in recipients:
recip_vk_b58 = recip.header.get("kid")
if not recip_vk_b58:
raise ValueError("Blank recipient key")
if recip_vk_b58 in result:
raise ValueError("Duplicate recipient key")
sender_b64 = recip.header.get("sender")
enc_sender = from_b64url(sender_b64) if sender_b64 else None
nonce_b64 = recip.header.get("iv")
if sender_b64 and not nonce_b64:
raise ValueError("Missing iv")
elif not sender_b64 and nonce_b64:
raise ValueError("Unexpected iv")
nonce = from_b64url(nonce_b64) if nonce_b64 else None
result[recip_vk_b58] = {
"sender": enc_sender,
"nonce": nonce,
"key": recip.encrypted_key,
}
return result
def extract_payload_key(sender_cek: dict, recip_secret: bytes) -> Tuple[bytes, str]:
"""
Extract the payload key from pack recipient details.
Returns: A tuple of the CEK and sender verkey
"""
recip_vk = sign_pk_from_sk(recip_secret)
recip_pk = nacl.bindings.crypto_sign_ed25519_pk_to_curve25519(recip_vk)
recip_sk = nacl.bindings.crypto_sign_ed25519_sk_to_curve25519(recip_secret)
if sender_cek["nonce"] and sender_cek["sender"]:
sender_vk_bin = nacl.bindings.crypto_box_seal_open(
sender_cek["sender"], recip_pk, recip_sk
)
sender_vk = sender_vk_bin.decode("utf-8")
sender_pk = nacl.bindings.crypto_sign_ed25519_pk_to_curve25519(
b58_to_bytes(sender_vk_bin)
)
cek = nacl.bindings.crypto_box_open(
sender_cek["key"], sender_cek["nonce"], sender_pk, recip_sk
)
else:
sender_vk = None
cek = nacl.bindings.crypto_box_seal_open(sender_cek["key"], recip_pk, recip_sk)
return cek, sender_vk
|
import os
import sys
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
def running_on_CI():
return "TRAVIS" in os.environ or "CI" in os.environ
def set_teardown_test():
teardown = True
for idx, arg in enumerate(sys.argv):
if arg.lower() == "--keep":
teardown = False
return teardown
class testing_framework(object):
def __init__(self):
return
def build_mf6_models(self, build_function, idx, exdir):
"""
Build base and regression MODFLOW 6 models
Parameters
----------
build_function : function
user defined function that builds a base model and optionally
builds a regression model. If a regression model is not built
then None must be returned from the function for the regression
model.
idx : int
counter that corresponds to exdir entry
exdir : str
path to regression model files
"""
base, regression = build_function(idx, exdir)
base.write_simulation()
if regression is not None:
if isinstance(regression, flopy.mf6.MFSimulation):
regression.write_simulation()
else:
regression.write_input()
def build_mf6_models_legacy(self, build_function, idx, exdir):
"""
Build base and regression for older MODFLOW 6 models
Parameters
----------
build_function : function
user defined function that builds a base model and optionally
builds a regression model. If a regression model is not built
then None must be returned from the function for the regression
model.
idx : int
counter that corresponds to exdir entry
exdir : str
path to regression model files
"""
base, regression = build_function(idx, exdir)
base.write_simulation()
if regression is not None:
regression.write_input()
def run_mf6(self, sim):
"""
Run the MODFLOW 6 simulation and compare to existing head file or
appropriate MODFLOW-2005, MODFLOW-NWT, MODFLOW-USG, or MODFLOW-LGR run.
Parameters
----------
sim : Simulation object
MODFLOW 6 autotest simulation object that runs the base and
regression models, compares the results, and tears down the
test if successful.
"""
print(os.getcwd())
sim.set_model(sim.name, testModel=False)
sim.run()
sim.compare()
if sim.exfunc is not None:
sim.exfunc(sim)
sim.teardown()
|
from nmigen.build import *
__all__ = ('DALIResource',)
def DALIResource(*args, rx, tx, conn = None, attrs = None):
ios = [
Subsignal('rx', Pins(rx, dir = 'i', conn = conn, assert_width = 1)),
Subsignal('tx', Pins(tx, dir = 'o', conn = conn, assert_width = 1)),
]
if attrs is not None:
ios.append(attrs)
return Resource.family(*args, default_name = 'dali', ios = ios)
|
#!/usr/bin/python3
from botbase import *
_gesamt_c = re.compile(r"Insgesamt wurden *([0-9.]+) *Fälle")
_gesamt_g = re.compile(r"([0-9.]+) *Personen sind gesundet")
_gesamt_d = re.compile(r"Insgesamt sind *([0-9.]+) *Personen verstorben")
_landau = re.compile(r"Stadt Landau *: *([0-9.]+) Personen *\( *([0-9.]+) *davon gesundet, *([0-9.]+) *verstorben")
def landau(sheets):
soup = get_soup("https://www.landau.de/Verwaltung-Politik/Pressemitteilungen/")
m = next(x for x in soup.find("section").findAll(class_="mitteilungen") if "Fallzahlen im" in x.get_text())
date = check_date(m.find(class_="date").get_text(), "Landau")
url = urljoin("https://www.landau.de/", m.find(class_="liste_titel").find("a")["href"])
print("Getting", url)
soup = get_soup(url)
text = soup.find(class_="mitteilungen_detail").get_text(" ").strip()
#print(text)
if not today().strftime("%d.%m.%Y") in text: raise NotYetAvailableException("Landau noch alt:" + ps[0])
c, g, d = map(force_int, _landau.search(text).groups())
update(sheets, 7313, c=c, d=d, g=g, sig="Bot") # Landau
c2 = force_int(_gesamt_c.search(text).group(1)) - c
d2 = force_int(_gesamt_d.search(text).group(1)) - d
g2 = force_int(_gesamt_g.search(text).group(1)) - g
update(sheets, 7337, c=c2, d=d2, g=g2, sig="Bot") # Südl. Weinstraße
return True
schedule.append(Task(13, 1, 17, 35, 600, landau, 7337))
if __name__ == '__main__': landau(googlesheets())
|
import torch
import numpy as np
import pickle
from railrl.misc.asset_loader import load_local_or_remote_file
import railrl.torch.pytorch_util as ptu
vae_path = '/home/khazatsky/rail/data/rail-khazatsky/sasha/PCVAE/DCVAE/run20/id0/itr_600.pkl'
# vae_path = '/home/shikharbahl/research/railrl-private/data/local/shikhar/corl2019/pointmass/real/run0/id0/vae.pkl'
vae = load_local_or_remote_file(vae_path)
dataset_path = '/home/khazatsky/rail/data/train_data.npy'
dataset = load_local_or_remote_file(dataset_path).item()
import matplotlib.pyplot as plt
traj = dataset['observations'][17]
n = traj.shape[0]
x0 = traj[0]
x0 = ptu.from_numpy(x0.reshape(1, -1))
goal = traj[-1]
vae = vae.cpu()
latent_goal = vae.encode(ptu.from_numpy(goal.reshape(1,-1)), x0, distrib=False)
decoded_goal, _ = vae.decode(latent_goal,x0)
log_probs = []
distances = []
for i in range(n):
x = traj[i]
latent = vae.encode(ptu.from_numpy(x.reshape(1,-1)), x0, distrib=False)
decoded, _ = vae.decode(latent, x0)
distances.append(np.linalg.norm(ptu.get_numpy(latent) - ptu.get_numpy(latent_goal)))
log_probs.append(ptu.get_numpy(vae.logprob(decoded_goal, decoded, mean=False).exp())[0])
plt.plot(np.arange(n), np.array(distances))
'''
dataset_path = '/home/shikharbahl/research/visual_foresight/examples/train_data.npy'
dataset = np.load(dataset_path).item()
traj = dataset['observations'][0]
n = traj.shape[0]
import matplotlib.pyplot as plt
def get_distances(i):
global vae
traj = dataset['observations'][i]
x0 = traj[0]
x0 = ptu.from_numpy(x0.reshape(1, -1))
goal = traj[-1]
vae = vae.cpu()
latent_goal = vae.encode(ptu.from_numpy(goal.reshape(1,-1)), x0, distrib=False)
decoded_goal, _ = vae.decode(latent_goal)
n = traj.shape[0]
log_probs = []
distances = []
for i in range(n):
x = traj[i]
latent = vae.encode(ptu.from_numpy(x.reshape(1,-1)), x0, distrib=False)
decoded, _ = vae.decode(latent)
distances.append(np.linalg.norm(ptu.get_numpy(latent) - ptu.get_numpy(latent_goal)))
log_probs.append(ptu.get_numpy(vae.logprob(decoded_goal, decoded, mean=False).exp())[0])
return np.array(distances)
dists = np.array([get_distances(i) for i in range(1)])
# import ipdb; ipdb.set_trace()
plt.plot(np.arange(n), np.mean(dists, axis=0))
'''
plt.show()
|
"""API module."""
import operator as ops
from flask_restx import Namespace, Resource, fields, reqparse
from sqlalchemy import func
from publications_microservice import __version__
from publications_microservice.constants import BlockChainStatus
from publications_microservice.exceptions import (
BlockedPublication,
DistanceFilterMissingParameters,
PublicationDoesNotExist,
)
from publications_microservice.models import (
Publication,
PublicationImage,
PublicationStar,
db,
)
from publications_microservice.namespaces.questions import publication_question_model
from publications_microservice.utils import FilterParam
api = Namespace("Publications", description="Publications operations")
@api.errorhandler(DistanceFilterMissingParameters)
def handle_missing_distance_parameters(_error: DistanceFilterMissingParameters):
"""Handle missing distance params."""
return (
{
"message": "Either all of max_distance, latitude and longitude should be passed to perform distance based filtering or none of them"
},
400,
)
# https://github.com/python-restx/flask-restx/issues/268
# https://github.com/noirbizarre/flask-restplus/issues/707
# @api.errorhandler(BlockedPublication)
# def handle_publication_has_been_blocked(_error: BlockedPublication):
# """Handle blocked user."""
# return {"message": "The publication has been blocked"}, 403
@api.errorhandler(PublicationDoesNotExist)
def handle_publication_does_not_exist(_error: PublicationDoesNotExist):
"""Handle non-existing publication exception."""
return {'message': "No publication by that id was found."}, 404
publication_image_model = api.model(
"Publication Image",
{
"url": fields.String(required=True, description="URL location for the image"),
"id": fields.String(readonly=True, description="UUID for this image"),
},
)
loc_model = api.model(
"Location",
{
"latitude": fields.Float(description="latitude", required=True),
"longitude": fields.Float(description="longitude", required=True),
},
)
point_model = api.model(
"Point",
{
"latitude": fields.Float(attribute=lambda x: db.session.scalar(func.ST_X(x))),
"longitude": fields.Float(attribute=lambda x: db.session.scalar(func.ST_Y(x))),
},
)
base_publication_model = api.model(
'Base Publication',
{
"id": fields.Integer(
readonly=True, description="The unique identifier of the publication"
),
"user_id": fields.Integer(description="Id of owner user"),
"title": fields.String(
required=True, description="The title of the publication."
),
"description": fields.String(
required=True, name="A description of the publication"
),
"rooms": fields.Integer(
required=True,
description="The amount of rooms in the published rental place.",
),
"beds": fields.Integer(
required=True,
description="The amount of beds in the published rental place",
),
"bathrooms": fields.Integer(
required=True, description="The amount of bathrooms in the rental place"
),
"price_per_night": fields.Float(
required=True, description="How much a night costs in the rental place"
),
"images": fields.List(
fields.Nested(publication_image_model),
required=True,
description="List of images URLs",
),
},
)
new_publication_model = api.inherit(
"New Publication Model",
base_publication_model,
{
"loc": fields.Nested(
loc_model, required=True, description="Location of the rental place",
),
},
)
publication_patch_model = api.model(
'Publication patch model',
{
"blockchain_status": fields.String(
required=False,
description="The status on the blockchain",
enum=[x.value for x in BlockChainStatus],
default=BlockChainStatus.UNSET.value,
attribute='blockchain_status.value',
),
"blockchain_transaction_hash": fields.String(
required=False, description="The hash of the transaction on the blockchain"
),
"blockchain_id": fields.Integer(
required=False, description="The id on the blockchain"
),
},
)
new_star_model = api.model(
"Starred publication",
{
"user_id": fields.Integer(
description="The unique identifier for the user starring the publication"
),
"created_at": fields.DateTime(
description="Time when the publication was starred"
),
"publication_id": fields.Integer(
description="The unique identifier for the publication being starred"
),
},
)
publication_model = api.inherit(
'Created Publication',
base_publication_model,
{
"loc": fields.Nested(point_model),
"publication_date": fields.DateTime(description="Date of the publication"),
"blocked": fields.Boolean(description="Is blocked?"),
"questions": fields.List(
fields.Nested(publication_question_model),
description="Questions regarding the publication",
),
"blockchain_status": fields.String(
required=False,
description="The status on the blockchain",
enum=[x.value for x in BlockChainStatus],
default=BlockChainStatus.UNSET.value,
attribute='blockchain_status.value',
),
"blockchain_id": fields.Integer(description="The id on the blockchain"),
"blockchain_transaction_hash": fields.String(
description="The hash of the transaction on the blockchain"
),
"stars": fields.List(
fields.Nested(new_star_model), description="Stars given to the publication"
),
},
)
publication_parser = reqparse.RequestParser()
publication_parser.add_argument(
"bathrooms",
type=FilterParam("bathrooms", ops.ge),
help="minimum amount of bathrooms needed",
store_missing=False,
)
publication_parser.add_argument(
"rooms",
type=FilterParam("rooms", ops.ge),
help="minimum amount of rooms needed",
store_missing=False,
)
publication_parser.add_argument(
"beds",
type=FilterParam("beds", ops.ge),
help="minimum amount of beds needed",
store_missing=False,
)
publication_parser.add_argument(
"price_per_night_min",
type=FilterParam(
"price_per_night_min", ops.ge, attribute="price_per_night", schema="number"
),
help="min price per night",
store_missing=False,
)
publication_parser.add_argument(
"price_per_night_max",
type=FilterParam(
"price_per_night_max", ops.le, attribute="price_per_night", schema="number"
),
help="max price per night",
store_missing=False,
)
publication_parser.add_argument(
"user_id",
type=FilterParam("user_id", ops.eq),
help="id of owner user",
store_missing=False,
)
publication_parser.add_argument(
"blockchain_status",
type=FilterParam("blockchain_status", ops.eq, schema=str),
help="blockchain_status",
default=BlockChainStatus.CONFIRMED.value,
)
publication_parser.add_argument(
"starring_user_id",
type=FilterParam("starring_user_id", ops.eq, attribute="stars.user_id"),
help="Id of starring user",
store_missing=False,
)
publication_parser.add_argument(
"blockchain_transaction_hash",
type=FilterParam("blockchain_transaction_hash", ops.eq, schema=str),
help="The hash of the transaction that created the publication on the blockchain",
store_missing=False,
)
publication_parser.add_argument(
"latitude",
type=float,
help="The latitude for the point near to look for. Note: max_distance and longitude are required when using latitude.",
store_missing=True,
)
publication_parser.add_argument(
"longitude",
type=float,
help="The longitude for the point near to look for. Note: max_distance and latitude are required when using longitude.",
store_missing=True,
)
publication_parser.add_argument(
"max_distance",
type=float,
help="The maximum distance (in km.) for the point near to look for. Note: latitude and longitude are required when using max_distance.",
store_missing=True,
)
def conditional_filter(attr, val):
if val == True: # noqa: E712
return attr == False # noqa: E712
else:
return 1 == 1
publication_parser.add_argument(
"filter_blocked",
type=FilterParam(
"filter_blocked",
conditional_filter,
attribute="blocked",
schema=bool,
transform={"true": True, "false": False}.get,
),
store_missing=True,
default="true",
)
@api.route('')
class PublicationsResource(Resource):
@api.doc('create_publication')
@api.expect(new_publication_model)
@api.marshal_with(publication_model)
def post(self):
"""Create a new publication."""
data = api.payload
# TODO: it'd be cool to marshal this on the model
data['loc'] = f"POINT({data['loc']['latitude']} {data['loc']['longitude']})"
images = []
for img_data in data["images"]:
new_img = PublicationImage(**img_data)
images.append(new_img)
db.session.add(new_img)
data["images"] = images
new_publication = Publication(**data)
db.session.add(new_publication)
db.session.commit()
return new_publication
@api.doc('list_publication')
@api.marshal_list_with(publication_model)
@api.expect(publication_parser)
def get(self):
"""Get all publications."""
params = publication_parser.parse_args()
has_lat = params.latitude is not None
has_lon = params.longitude is not None
has_dist = params.max_distance is not None
if any((has_lat, has_lon, has_dist)) and not all((has_lat, has_lon, has_dist)):
raise DistanceFilterMissingParameters
query = Publication.query # noqa: E712
for filter_name, filter_op in params.items():
if not isinstance(filter_op, FilterParam):
if filter_op is None:
continue
for i in publication_parser.args:
if i.name == filter_name:
filter_op = i.type(filter_op)
break
if not isinstance(filter_op, FilterParam):
continue
query = filter_op.apply(query, Publication)
if params.max_distance:
point = func.ST_GeographyFromText(
f"POINT({params.latitude} {params.longitude})", srid=4326
)
query = query.filter(
func.ST_DWithin(Publication.loc, point, params.max_distance * 1000)
)
return query.all()
@api.route('/<int:publication_id>')
@api.param('publication_id', 'The publication unique identifier')
@api.response(403, "Publication has been blocked")
class PublicationResource(Resource):
@api.doc('get_publication')
@api.response(200, "Publication found", model=publication_model)
@api.response(404, 'Publication not found')
@api.response(403, "Publication blocked")
def get(self, publication_id):
"""Get a publication by id."""
publication = Publication.query.filter(Publication.id == publication_id).first()
if publication is None:
raise PublicationDoesNotExist
if publication.blocked:
return {"message": "Publication is blocked"}, 403
return api.marshal(publication, publication_model), 200
@api.doc("put_publication")
@api.response(200, "Publication found", model=publication_model)
@api.response(403, "Publication blocked")
@api.response(404, 'Publication not found')
@api.expect(new_publication_model)
def put(self, publication_id):
"""Replace a publication by id."""
publication = Publication.query.filter(Publication.id == publication_id).first()
if publication is None:
raise PublicationDoesNotExist
if publication.blocked:
return {"message": "Publication is blocked"}, 403
data = api.payload
# TODO: it'd be cool to marshal this on the model
data['loc'] = f"POINT({data['loc']['latitude']} {data['loc']['longitude']})"
for image in publication.images:
db.session.delete(image)
images = []
for img_data in data["images"]:
new_img = PublicationImage(**img_data)
images.append(new_img)
db.session.add(new_img)
data["images"] = images
publication.update_from_dict(**data)
db.session.merge(publication)
db.session.commit()
return api.marshal(publication, publication_model), 200
@api.doc("patch_publication")
@api.response(200, "Publication found", model=publication_model)
@api.response(404, 'Publication not found')
@api.expect(publication_patch_model)
def patch(self, publication_id):
"""Replace a publication by id."""
publication = Publication.query.filter(Publication.id == publication_id).first()
if publication is None:
raise PublicationDoesNotExist
if publication.blocked:
raise BlockedPublication
data = api.payload
publication.update_from_dict(**data)
db.session.merge(publication)
db.session.commit()
return api.marshal(publication, publication_model), 200
@api.doc("block_publication")
@api.response(200, "Publication successfully blocked")
def delete(self, publication_id):
"""Block a publication."""
publication = Publication.query.filter(Publication.id == publication_id).first()
if publication is None:
raise PublicationDoesNotExist
if publication.blocked:
raise BlockedPublication
publication.blocked = True
db.session.merge(publication)
db.session.commit()
return {"message": "Publication was successfully blocked"}, 200
publication_star_parser = reqparse.RequestParser()
publication_star_parser.add_argument(
"user_id",
type=FilterParam("user_id", ops.eq),
help="Unique identifier for the user",
store_missing=False,
)
publication_star_uid_parser = reqparse.RequestParser()
publication_star_uid_parser.add_argument(
"user_id", type=int, help="Unique identifier for the user", required=True,
)
@api.route('/<int:publication_id>/star')
@api.param('publication_id', 'The publication unique identifier')
class PublicationStarResource(Resource):
@api.doc('star_publication')
@api.response(200, "Publication starred")
@api.response(403, "Publication has been blocked")
@api.response(404, 'Publication not found')
@api.expect(publication_star_uid_parser)
@api.marshal_with(new_star_model)
def post(self, publication_id):
"""Star a publication."""
publication = Publication.query.filter(Publication.id == publication_id).first()
if publication is None:
raise PublicationDoesNotExist
if publication.blocked:
raise BlockedPublication
args = publication_star_uid_parser.parse_args()
new_star = PublicationStar(user_id=args.user_id, publication_id=publication_id)
db.session.add(new_star)
db.session.commit()
return new_star
@api.doc('unstar_publication')
@api.response(200, "Publication unstarred")
@api.response(400, "Bad request")
@api.expect(publication_star_uid_parser)
def delete(self, publication_id):
"""Unstar a publication."""
args = publication_star_uid_parser.parse_args()
publication_star = PublicationStar.query.filter(
PublicationStar.publication_id == publication_id,
PublicationStar.user_id == args.user_id,
).first()
if publication_star is None:
return (
{
"message": f"Publication {publication_id} was not starred by user {args.user_id}"
},
400,
)
db.session.delete(publication_star)
db.session.commit()
return {"message": "Successfully deleted"}, 200
@api.doc('get_starrings')
@api.marshal_list_with(new_star_model)
@api.response(200, "Publications filtered")
@api.response(400, "Bad request")
@api.expect(publication_star_parser)
def get(self, publication_id):
"""Get a starring."""
params = publication_star_parser.parse_args()
query = PublicationStar.query.filter(
PublicationStar.publication_id == publication_id,
)
for _, filter_op in params.items():
if not isinstance(filter_op, FilterParam):
continue
query = filter_op.apply(query, PublicationStar)
return query.all()
|
# coding: utf-8
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse as r
from django.shortcuts import render, redirect, get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseBadRequest
from django.utils.translation import ugettext as _
from django.template import RequestContext
import datetime
from parsifal.reviews.models import *
from parsifal.reviews.decorators import author_required, author_or_visitor_required, visitor_required
@author_or_visitor_required
@login_required
def comments(request, username, review_name):
review = get_object_or_404(Review, name=review_name, author__username__iexact=username)
comments = review.get_visitors_comments(request.user)
closed_comments = comments.filter(is_open=False)
unseen_comments = review.get_visitors_unseen_comments(request.user)
return render(request, 'comments/comments.html', { 'review': review, 'comments': comments, 'unseen_comments': unseen_comments, 'closed_comments': closed_comments })
@author_or_visitor_required
@login_required
def comment_detailed(request, username, review_name, comment_id):
review = get_object_or_404(Review, name=review_name, author__username__iexact=username)
unseen_comments = review.get_visitors_unseen_comments(request.user)
comment = VisitorComment.objects.get(pk=comment_id)
comment_seen = CommentSeen(review=review, user=request.user, comment=comment)
comment_seen.save()
return render(request, 'comments/comment_detailed.html', { 'review': review, 'comment': comment, 'unseen_comments': unseen_comments })
@login_required
def save_visitor_comment(request):
try:
review_id = request.POST['review-id']
comment = request.POST['comment']
about = request.POST['about']
parent_id = request.POST.get('parent', None)
user = request.user
to = request.POST['to']
date = datetime.datetime.now()
review = Review.objects.get(pk=review_id)
comment = VisitorComment(
review=review,
comment=comment,
about=about,
to=to,
date=date,
user=user)
if parent_id:
parent_comment = VisitorComment.objects.get(pk=parent_id)
comment.parent = parent_comment
comment.save()
if parent_id:
context = RequestContext(request, {'answer': comment})
return render_to_response('comments/partial_children_comment.html', context)
return HttpResponse(_('Your comment have been sended successfully!'))
except Exception as e:
print e
return HttpResponseBadRequest()
def close_comment_thread(request):
try:
comment_id = request.POST['comment_id']
comment = VisitorComment.objects.get(pk=comment_id)
comment.is_open = False
comment.save()
context = RequestContext(request, {'comment': comment})
return render_to_response('comments/partial_is_closed.html', context)
except Exception as e:
print e
return HttpResponseBadRequest()
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# CONFIGURATION EXAMPLE
# "tts": {
# "module": "ivonaComand",
# "ivonaComand": {
# "path": "~/projects/mycroft/ivona/IVONA/ivona-telecom-pc_linux-8khz-1.6.38.186-pl_maja/bin/ivonacl",
# "params": {
# "-l": "~/projects/mycroft/ivona/IVONA/ivona-telecom-pc_linux-8khz-1.6.38.186-pl_maja/lib/ivona/voices/64/libvoice_pl_maja.so.1.6",
# "-x": "~/projects/mycroft/ivona/IVONA/ivona-telecom-pc_linux-8khz-1.6.38.186-pl_maja/lib/ivona/voices/vox_pl_maja8v",
# "-c": "~/projects/mycroft/ivona/IVONA//Certificate_of_authenticity_Cityparking.ca",
# "--encoding": "utf-8",
# "--dur": 75,
# "--vol": 95,
# }
# }
# }
import subprocess
from mycroft.tts.tts import TTS, TTSValidator
from mycroft.configuration import Configuration
from mycroft.util import LOG
class IvonaTTSComand(TTS):
def __init__(self, lang, config):
super(IvonaTTSComand, self).__init__(lang, config, IvonaTTSComandValidator(self))
def get_tts(self, sentence, wav_file):
config = Configuration.get().get("tts").get("ivonaComand")
BIN = config.get("path", "")
configParams = config.get("params", {})
configParamsList = list(configParams.keys())
arrayOfParams = [BIN, "-t", sentence]
stringOfParams = BIN + ' -t "' + sentence + '" '
for key in configParamsList:
arrayOfParams.append(key)
value = configParams[key]
stringOfParams += str(key) + " " + str(value) + ' '
arrayOfParams.append(str(value))
# subprocesParams = [BIN, "-t", sentence] + arrayOfParams
arrayOfParams.append(wav_file)
stringOfParams += wav_file
print(stringOfParams)
subprocess.call(arrayOfParams)
# subprocess.run([BIN, "-t", sentence, wav_file])
return (wav_file, None) # No phonemes
class IvonaTTSComandValidator(TTSValidator):
def __init__(self, tts):
super(IvonaTTSComandValidator, self).__init__(tts)
def validate_lang(self):
# TODO
pass
def validate_connection(self):
try:
config = Configuration.get().get("tts").get("ivonaComand")
BIN = config.get("path", "")
subprocess.call([BIN, '--help'])
except Exception:
raise Exception(
'there is no Ivona')
def get_tts_class(self):
return IvonaTTSComand
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Taylor Childers (john.taylor.childers@cern.ch)
from mpi4py import MPI
import logging
import threading
from pandayoda.common import MessageTypes
logger = logging.getLogger(__name__)
'''
This module should provide all the messaging functions for communication between Yoda & Droid.
The current implementation uses MPI, but could easily be replaced with another form.
'''
# Yoda is always Rank 0
YODA_RANK = 0
# Message tags
FROM_DROID = 1
FROM_YODA = 2
TO_YODA_WORKMANAGER = 3
FROM_YODA_WORKMANAGER = 4
TO_DROID = 5
FROM_DROID = 6
TO_YODA = 7
FROM_YODA = 8
TO_YODA_FILEMANAGER = 9
using_mpi_lock = threading.Lock()
# droid sends job request to yoda
def send_job_request():
msg = {'type': MessageTypes.REQUEST_JOB}
return send_message(msg, dest=YODA_RANK, tag=TO_YODA_WORKMANAGER)
# yoda receives job request from droid
def recv_job_request():
return receive_message(MPI.ANY_SOURCE, tag=TO_YODA_WORKMANAGER)
# droid sends event ranges request to yoda
def send_eventranges_request(pandaid, taskid, jobsetid):
msg = {'type': MessageTypes.REQUEST_EVENT_RANGES, 'pandaID': pandaid, 'taskID': taskid, 'jobsetID': jobsetid}
return send_message(msg, dest=YODA_RANK, tag=TO_YODA_WORKMANAGER)
# yoda receives event ranges request from droid
def recv_eventranges_request():
return receive_message(MPI.ANY_SOURCE, tag=TO_YODA_WORKMANAGER)
# yoda sends new job to droid
def send_droid_new_job(job, droid_rank):
msg = {'type': MessageTypes.NEW_JOB, 'job': job}
return send_message(msg, dest=droid_rank, tag=FROM_YODA_WORKMANAGER)
# droid receieves new job from yoda
def recv_job():
return receive_message(YODA_RANK, FROM_YODA_WORKMANAGER)
# yoda sends new event ranges to droid
def send_droid_new_eventranges(eventranges, droid_rank):
msg = {'type': MessageTypes.NEW_EVENT_RANGES, 'eventranges': eventranges}
return send_message(msg, dest=droid_rank, tag=FROM_YODA_WORKMANAGER)
# droid receives new event ranges from yoda
def recv_eventranges():
return receive_message(YODA_RANK, FROM_YODA_WORKMANAGER)
def send_droid_no_job_left(droid_rank):
msg = {'type': MessageTypes.NO_MORE_JOBS}
return send_message(msg, dest=droid_rank, tag=FROM_YODA_WORKMANAGER)
def send_droid_no_eventranges_left(droid_rank):
msg = {'type': MessageTypes.NO_MORE_EVENT_RANGES}
return send_message(msg, dest=droid_rank, tag=FROM_YODA_WORKMANAGER)
def send_droid_exit(droid_rank):
msg = {'type': MessageTypes.DROID_EXIT}
return send_message(msg, dest=droid_rank, tag=TO_DROID)
def recv_yoda_message():
return receive_message(YODA_RANK, FROM_YODA)
def send_droid_has_exited(msg):
msg = {'type': MessageTypes.DROID_HAS_EXITED, 'message': msg}
return send_message(msg, dest=YODA_RANK, tag=TO_YODA)
def send_droid_wallclock_expiring(droid_rank):
msg = {'type': MessageTypes.WALLCLOCK_EXPIRING}
return send_message(msg, dest=droid_rank, tag=FROM_YODA)
def send_file_for_stage_out(output_file_data):
msg = {'type': MessageTypes.OUTPUT_FILE, 'output_file_data': output_file_data}
return send_message(msg, dest=YODA_RANK, tag=TO_YODA_FILEMANAGER)
def get_droid_message_for_yoda():
return receive_message(MPI.ANY_SOURCE, TO_YODA)
def get_droid_message_for_workmanager():
return receive_message(MPI.ANY_SOURCE, TO_YODA_WORKMANAGER)
def get_droid_message_for_filemanager():
return receive_message(MPI.ANY_SOURCE, TO_YODA_FILEMANAGER)
def send_message(data, dest=None, tag=None):
""" basic MPI_ISend but mpi4py handles the object tranlation for sending
over MPI so your message can be python objects.
data: this is the object you want to send, e.g. a dictionary, list, class object, etc.
dest: this is the destination rank
tag: this tag can be used to filter messages
return: returns a Request object which is used to test for communication completion,
through a blocking call, Request.wait(), and and a non-blocking call, Request.test(). """
global using_mpi_lock
try:
logger.debug('Rank %05d: send message: %s', MPI.COMM_WORLD.Get_rank(), data)
using_mpi_lock.acquire()
request = MPI.COMM_WORLD.isend(data, dest=dest, tag=tag)
using_mpi_lock.release()
logger.debug('Rank %05d: message sent', MPI.COMM_WORLD.Get_rank())
except Exception:
logger.exception('Rank %05i: exception received during sending request for a job.', MPI.COMM_WORLD.Get_rank())
raise
return request
def receive_message(source=MPI.ANY_SOURCE, tag=None):
""" basic MPI_ISend but mpi4py handles the object tranlation for sending
over MPI so your message can be python objects.
source: this is the source rank
tag: this tag can be used to filter messages
return: returns a Request object which is used to test for communication completion,
through a blocking call, Request.wait(), and and a non-blocking call, Request.test(). """
global using_mpi_lock
# using MPI_Recv
try:
logger.debug('Rank %05d: requsting message', MPI.COMM_WORLD.Get_rank())
using_mpi_lock.acquire()
#buf = bytearray(1<<30)
if tag is not None:
request = MPI.COMM_WORLD.irecv(source=source, tag=tag)
else:
request = MPI.COMM_WORLD.irecv(source=source)
using_mpi_lock.release()
logger.debug('Rank %05d: done requesting message', MPI.COMM_WORLD.Get_rank())
except Exception:
logger.exception('Rank %05i: exception received while trying to receive a message.', MPI.COMM_WORLD.Get_rank())
raise
return request
|
import numpy as np
import pandas as pd
from datetime import datetime
from handwriting_sample.base import HandwritingDataBase
from handwriting_sample.reader import HandwritingSampleReader
from handwriting_sample.writer import HandwritingSampleWriter
from handwriting_sample.validator import HandwritingSampleValidator
from handwriting_sample.transformer import HandwritingSampleTransformer, TransformerAngleTypeException
from handwriting_sample.visualizer import HandwritingSampleVisualizer
class HandwritingSample(HandwritingDataBase):
"""Class implementing the management of sample handwriting samples"""
# Handwriting data helpers (reading, writing, validation, transformer, visualizer)
reader = HandwritingSampleReader()
writer = HandwritingSampleWriter()
validator = HandwritingSampleValidator()
transformer = HandwritingSampleTransformer()
visualizer = HandwritingSampleVisualizer()
# TODO: idea: I think np.column_stack is going to work if X, Y, etc. are 1D numpy arrays as well
def __init__(self, x, y, time, pen_status, azimuth, tilt, pressure, meta_data=None, validate=True, verbose=False):
"""
Initializes the HandwritingSample object.
:param x: X axis
:type x: list[uint]
:param y: Y axis
:type y: list[uint]
:param time: timestamp
:type time: list[uint]
:param pen_status: indication of pen location (on-surface=1 | in-air=0)
:type pen_status: list[bool]
:param azimuth: azimuth of the pen
:type azimuth: list[uint]
:param tilt: tilt of the pen
:type tilt: list[uint]
:param pressure: pressure value
:type pressure: list[uint]
:param meta_data: dictionary with meta data
:type meta_data: dict
:param validate: true is validate input data
:type validate:bool
:param verbose: true if log should be verbose
:type verbose: bool
"""
# Create pandas DataFrame object from the input handwriting variables
df = pd.DataFrame(np.column_stack([x, y, time, pen_status, azimuth, tilt, pressure]), columns=self.COLUMNS)
# Validate and store input data
self._data = self.validator.validate_data(df, verbose=verbose) if validate else df
# Store meta data of any kind
self.meta = meta_data
# Set the handwriting variables
self.x = self._data[self.AXIS_X].to_numpy()
self.y = self._data[self.AXIS_Y].to_numpy()
self.time = self._data[self.TIME].to_numpy()
self.pen_status = self._data[self.PEN_STATUS].to_numpy(dtype=bool)
self.azimuth = self._data[self.AZIMUTH].to_numpy()
self.tilt = self._data[self.TILT].to_numpy()
self.pressure = self._data[self.PRESSURE].to_numpy()
def __repr__(self):
return f"<HandwritingSampleObject: \n" \
f"DATA:\n" \
f" x = {self.x}, \n" \
f" y = {self.y}, \n" \
f" time = {self.time}, \n" \
f" pen_status = {self.pen_status}, \n" \
f" azimuth = {self.azimuth}, \n" \
f" tilt = {self.tilt}, \n" \
f" pressure = {self.pressure}> \n\n\n" \
f"METADATA:\n" \
f"{self.meta.items() if self.meta else None}"
# ---------- #
# Properties #
# ---------- #
@property
def data_list(self):
"""Returns list for the non-original data"""
return [self.x, self.y, self.time, self.pen_status, self.azimuth, self.tilt, self.pressure]
@property
def data_numpy_array(self):
"""Returns numpy array for the non-original data"""
return np.column_stack(self.data_list)
@property
def data_pandas_dataframe(self):
"""Returns pandas DataFrame for the non-original data"""
return pd.DataFrame(self.data_numpy_array, columns=self.COLUMNS)
@property
def original_data_list(self):
"""Returns list for the original data"""
return [self._data[column].to_numpy() for column in self._data.columns]
@property
def original_numpy_array(self):
"""Returns numpy array for the original data"""
return self._data.values
@property
def original_data_pandas_dataframe(self):
"""Returns pandas DataFrame for the original data"""
return self._data
@property
def xy(self):
"""Returns general movement of X and Y"""
return np.sqrt(np.power(self.x, 2) + np.power(self.y, 2))
# --------------- #
# Reading methods #
# --------------- #
@classmethod
def from_json(cls, path, columns=None):
"""
Creates a HandwritingSample instance from a JSON file.
:param path: path to a JSON file
:type path: str
:param columns: handwriting variables, defaults to cls.COLUMNS
:type columns: list, optional
:return: instance of HandwritingSample
:rtype: HandwritingSample
"""
return cls._from_data_and_metadata(*cls.reader.read_from_json(path, columns or cls.COLUMNS))
@classmethod
def from_svc(cls, path, columns=None):
"""
Creates a HandwritingSample instance from an SVC file.
:param path: path to an SVC file
:type path: str
:param columns: handwriting variables, defaults to cls.COLUMNS
:type columns: list, optional
:return: instance of HandwritingSample
:rtype: HandwritingSample
"""
return cls._from_data_and_metadata(*cls.reader.read_from_svc(path, columns or cls.COLUMNS))
@classmethod
def from_list(cls, data, columns=None):
"""
Creates a HandwritingSample instance from a list.
:param data: data representing handwriting sample
:type data: list
:param columns: handwriting variables, defaults to cls.COLUMNS
:type columns: list, optional
:return: instance of HandwritingSample
:rtype: HandwritingSample
"""
return cls._from_data_and_metadata(*cls.reader.read_from_list(data, columns or cls.COLUMNS))
@classmethod
def from_numpy_array(cls, data, columns=None):
"""
Creates a HandwritingSample instance from a numpy array.
:param data: data representing handwriting sample
:type data: np.ndarray
:param columns: handwriting variables, defaults to cls.COLUMNS
:type columns: list, optional
:return: instance of HandwritingSample
:rtype: HandwritingSample
"""
return cls._from_data_and_metadata(*cls.reader.read_from_numpy_array(data, columns or cls.COLUMNS))
@classmethod
def from_pandas_dataframe(cls, data, columns=None):
"""
Creates a HandwritingSample instance from a pandas DataFrame.
:param data: data representing handwriting sample
:type data: pd.DataFrame
:param columns: handwriting variables, defaults to cls.COLUMNS
:type columns: list, optional
:return: instance of HandwritingSample
:rtype: HandwritingSample
"""
return cls._from_data_and_metadata(*cls.reader.read_from_pandas_dataframe(data, columns or cls.COLUMNS))
@classmethod
def _from_data_and_metadata(cls, data, meta_data=None):
"""
Creates a HandwritingSample instance from data and meta data.
:param data: data of the handwriting sample
:type data: dict
:param meta_data: meta data of the handwriting sample, defaults to None
:type meta_data: dict, optional
:return: instance of HandwritingSample
:rtype: HandwritingSample
"""
return cls(**data, meta_data=meta_data or {})
# --------------- #
# Writing methods #
# --------------- #
def to_json(self, path, file_name=None, store_original_data=False):
"""
Writes sample data to a JSON file.
:param path: path where data should be stored
:type path: str
:param file_name: custom file name, defaults to None
:type file_name: str, optional
:param store_original_data: store original data, defaults to False
:type store_original_data: bool, optional
:return: None
:rtype: None type
"""
return self.writer.write_to_json(self, path, file_name=file_name, store_original_data=store_original_data)
def to_svc(self, path, file_name=None, store_original_data=False):
"""
Writes sample data to an SVC file.
:param path: path where data should be stored
:type path: str
:param file_name: custom file name, defaults to None
:type file_name: str, optional
:param store_original_data: store original data, defaults to False
:type store_original_data: bool, optional
:return: None
:rtype: None type
"""
return self.writer.write_to_svc(self, path, file_name=file_name, store_original_data=store_original_data)
# ----------------------------- #
# Handwriting data manipulation #
# ----------------------------- #
def get_on_surface_data(self):
"""Returns on-surface data as a HandwritingSample object"""
# Get all on-surface data
df = self.data_pandas_dataframe
df = df[df[self.PEN_STATUS] == 1]
# Return a new instance of HandwritingSample with only on-surface data
return HandwritingSample(**df.to_dict(orient="list"), validate=False)
def get_in_air_data(self):
"""Returns in-air data as a HandwritingSample object"""
# Return all in-air data
df = self.data_pandas_dataframe
df = df[df[self.PEN_STATUS] == 0]
# Return a new instance of HandwritingSample with only in-air data
return HandwritingSample(**df.to_dict(orient="list"), validate=False)
def get_on_surface_strokes(self):
"""Returns strokes on-surface"""
return self.get_strokes(on_surface_only=True)
def get_in_air_strokes(self):
"""Returns strokes in-air"""
return self.get_strokes(in_air_only=True)
def get_strokes(self, on_surface_only=False, in_air_only=False):
"""
Splits the movement into strokes.
:param on_surface_only: on-surface strokes only, defaults to False
:type on_surface_only: bool, optional
:param in_air_only: in-air strokes only, defaults to True
:type in_air_only: bool, optional
:return: list of strokes in tuples with the status of strokes
:rtype: tuple('status', HandwritingSample)
"""
# Handle the edge cases
if all((on_surface_only, in_air_only)):
on_surface_only, in_air_only = False, False
# Get accessible data of the sample as a pandas DataFrame
df = self.data_pandas_dataframe
# Get index values of the pen status column changes
idx_change = df.ne(df.shift()).filter(like=self.PEN_STATUS).apply(lambda x: x.index[x].tolist())
idx_array = idx_change[self.PEN_STATUS].values.tolist()
# Add the last index value
idx_array.append(df.index[-1])
# Get strokes
strokes = [df.iloc[idx_array[n]:idx_array[n + 1]] for n in range(len(idx_array) - 1)]
# Prepare the list of strokes
list_of_strokes = []
# Fill the list of strokes (add 'on_surface'/'in_air' flag in front of each stroke)
for stroke in strokes:
# Skip empty strokes
if stroke.empty:
continue
# If on surface strokes only are wanted filter out in air
if on_surface_only and stroke[self.PEN_STATUS].iloc[0] == 0:
continue
# If in air strokes only are wanted filter out on surface
if in_air_only and stroke[self.PEN_STATUS].iloc[0] == 1:
continue
# Prepare the stroke information
status = "on_surface" if stroke[self.PEN_STATUS].iloc[0] == 1 else "in_air"
stroke = HandwritingSample(**stroke.to_dict(orient="list"), validate=False)
# Append to the list of strokes
list_of_strokes.append((status, stroke))
# Return the list of strokes
return list_of_strokes
# ------------------------------- #
# Handwriting data transformation #
# ------------------------------- #
# TODO: use **kwargs
def transform_all_units(
self,
conversion_type=transformer.LPI,
lpi_value=transformer.LPI_VALUE,
lpmm_value=transformer.LPMM_VALUE,
max_raw_azimuth=transformer.MAX_AZIMUTH_VALUE,
max_raw_tilt=transformer.MAX_TILT_VALUE,
max_degree_azimuth=transformer.MAX_AZIMUTH_DEGREE,
max_degree_tilt=transformer.MAX_TILT_DEGREE,
max_pressure=transformer.MAX_PRESSURE_VALUE,
pressure_levels=transformer.PRESSURE_LEVELS,
angles_to_degrees=True,
shift_to_zero=True):
"""
Transforms all unites of sample object:
- transforms X,Y to millimeters.
- transform time to seconds
- normalize or transform to degrees angles
- normalize pressure
:param conversion_type: OPTIONAL ["lpi"|"lpmm"], DEFAULT="lpi".
Set the capturing method used for mapping;
"lpi" for inch; "lpmm" for millimeters
:type conversion_type: str
:param lpi_value: OPTIONAL , DEFAULT = 5080
Set lpi value of digitizing tablet.
:type lpi_value: int
:param lpmm_value: OPTIONAL, DEFAULT = 200
Set lpmm value of digitizing tablet.
:type lpmm_value: int
:param max_raw_azimuth: OPTIONAL, DEFAULT = 3600
Maximum theoretical value of azimuth.
:type max_raw_azimuth: int
:param max_raw_tilt: OPTIONAL, DEFAULT = 900
Maximum theoretical value of tilt.
:type max_raw_tilt: int
:param max_degree_azimuth: OPTIONAL, DEFAULT = 360
Maximum degree value of azimuth.
:type max_degree_azimuth: int
:param max_degree_tilt: OPTIONAL, DEFAULT = 90
Maximum degree value of tilt.
:type max_degree_tilt: int
:param max_pressure: OPTIONAL, DEFAULT = 32767
Maximum theoretical value of pressure.
:type max_pressure: int
:param pressure_levels: OPTIONAL, DEFAULT = 8192
Level of pressures of the device.
:type pressure_levels: int
:param angles_to_degrees: OPTIONAL, DEFAULT = True
Transform angles to degrees
:type angles_to_degrees: bool
:param shift_to_zero: OPTIONAL, DEFAULT = True
Shift axis values to start from 0,0 coordinates
:type shift_to_zero: bool
"""
self.transformer.transform_all_units(
self,
conversion_type=conversion_type,
lpi_value=lpi_value,
lpmm_value=lpmm_value,
max_raw_azimuth=max_raw_azimuth,
max_raw_tilt=max_raw_tilt,
max_degree_azimuth=max_degree_azimuth,
max_degree_tilt=max_degree_tilt,
max_pressure=max_pressure,
pressure_levels=pressure_levels,
angles_to_degrees=angles_to_degrees,
shift_to_zero=shift_to_zero)
def transform_axis_to_mm(
self,
conversion_type=transformer.LPI,
lpi_value=transformer.LPI_VALUE,
lpmm_value=transformer.LPMM_VALUE,
shift_to_zero=True):
"""
Transforms X,Y axis to millimeters.
:param conversion_type: OPTIONAL ["lpi"|"lpmm"], DEFAULT="lpi".
Set the capturing method used for mapping;
"lpi" for inch; "lpmm" for millimeters
:type conversion_type: str
:param lpi_value: OPTIONAL, DEFAULT = 5080
Set lpi value of digitizing tablet
:type lpi_value: int
:param lpmm_value: OPTIONAL, DEFAULT = 200
Set lpmm value of digitizing tablet
:type lpmm_value: int
:param shift_to_zero: OPTIONAL, DEFAULT = True
Shift axis values to start from 0,0 coordinates
:type shift_to_zero: bool
"""
self.transformer.transform_axis(self, conversion_type=conversion_type, lpi_value=lpi_value,
lpmm_value=lpmm_value, shift_to_zero=shift_to_zero)
def transform_time_to_seconds(self):
""" Transform time to seconds """
self.time = self.transformer.transform_time_to_seconds(self.time)
def normalize_pressure(
self,
max_pressure=transformer.MAX_PRESSURE_VALUE,
pressure_levels=transformer.PRESSURE_LEVELS):
"""
Normalizes pressure to pressure level of the device.
:param max_pressure: OPTIONAL, DEFAULT = 32767
max theoretical raw pressure value
:type max_pressure: int
:param pressure_levels: OPTIONAL, DEFAULT = 8192
level of pressure of the device
:type pressure_levels: int
"""
self.pressure = self.transformer.normalize_pressure(self.pressure,
max_value=max_pressure,
pressure_levels=pressure_levels)
def transform_angle_to_degree(self, angle=None, max_raw_value=None, max_degree_value=None):
"""
Transforms raw angle to degrees.
:param angle: Angle that should bne converted [tilt, azimuth]
:type angle: str
:param max_raw_value: OPTIONAL, Maximal theoretical value of raw angle
:type max_raw_value: int
:param max_degree_value: OPTIONAL, Maximal value of angle in degrees
:type max_degree_value: int
"""
# For tilt
if angle == self.TILT:
self.tilt = self.transformer.transform_angle(
self.tilt,
max_raw_value=max_raw_value or self.transformer.MAX_TILT_VALUE,
max_degree_value=max_degree_value or self.transformer.MAX_TILT_DEGREE)
# For Azimuth
elif angle == self.AZIMUTH:
self.azimuth = self.transformer.transform_angle(
self.azimuth,
max_raw_value=max_raw_value or self.transformer.MAX_AZIMUTH_VALUE,
max_degree_value=max_degree_value or self.transformer.MAX_AZIMUTH_DEGREE)
else:
raise TransformerAngleTypeException(angle)
# ---------------------- #
# Meta data manipulation #
# ---------------------- #
def add_meta_data(self, meta_data):
"""Adds meta data to the HandwritingSample object from dictionary"""
self.meta.update({"updated_on": datetime.utcnow().strftime(self.DATE_FORMAT)})
self.meta.update({**meta_data})
# -------------- #
# Visualisation #
# -------------- #
def plot_on_surface(self, x_label=None, y_label=None, save_path=None):
"""
Plot on surface data
:param x_label: OPTIONAL, label of X axis
:type x_label: str
:param y_label: OPTIONAL, label of Y axis
:type y_label: str
:param save_path: OPTIONAL, set save path if you wish to save the figure
:type save_path: str
:return: axis and plot objects
"""
return self.visualizer.plot_on_surface_movement(self, x_label=x_label, y_label=y_label, save_as=save_path)
def plot_in_air(self, x_label=None, y_label=None, save_path=None):
"""
Plot in air data
:param x_label: OPTIONAL, label of X axis
:type x_label: str
:param y_label: OPTIONAL, label of Y axis
:type y_label: str
:param save_path: OPTIONAL, set save path if you wish to save the figure
:type save_path: str
:return: axis and plot objects
"""
return self.visualizer.plot_in_air_movement(self, x_label=x_label, y_label=y_label, save_as=save_path)
def plot_separate_movements(self, x_label=None, y_label=None, save_path=None):
"""
Plot separate movement in one plot (on_surface + in_air)
:param x_label: OPTIONAL, label of X axis
:type x_label: str
:param y_label: OPTIONAL, label of Y axis
:type y_label: str
:param save_path: OPTIONAL, set save path if you wish to save the figure
:type save_path: str
:return: axis and plot objects
"""
return self.visualizer.plot_separate_movements(self, x_label=x_label, y_label=y_label, save_as=save_path)
def plot_strokes(self, x_label=None, y_label=None, save_path=None):
"""
Plot separate strokes in one plot
:param x_label: OPTIONAL, label of X axis
:type x_label: str
:param y_label: OPTIONAL, label of Y axis
:type y_label: str
:param save_path: OPTIONAL, set save path if you wish to save the figure
:type save_path: str
:return: axis and plot objects
"""
return self.visualizer.plot_strokes(self, x_label=x_label, y_label=y_label, save_as=save_path)
def plot_all_data(self, x_label=None, save_path=None):
"""
Plot individual plots for each data attribute (x,y,time,azimuth,tilt,pressure)
:param x_label: OPTIONAL, label of X axis
:type x_label: str
:param save_path: OPTIONAL, set save path if you wish to save the figure
:type save_path: str
"""
return self.visualizer.plot_all_modalities(self, x_label=x_label, save_as=save_path)
|
from enum import Enum
from typing import Tuple, Union, Callable, Any, List, Optional
from plover import system
STROKE_TYPE = str
OUTLINE_TYPE = Tuple[STROKE_TYPE]
class SortingType(Enum):
FREQUENCY = 0
FREQUENCY_NUM = 1
FREQUENCY_ALPHA = 2
STROKE_COUNT = 3
ALPHABETICAL = 4
SYSTEM_DEFINED = 5
sorting_descriptions = [
"Frequency",
"Frequency (Prioritize Numbers)",
"Frequency (Prioritize Non-numeric)",
"Stroke Count",
"Alphabetical",
"System Defined"
]
def to_int(string: str, default: int) -> int:
try:
return int(string)
except ValueError:
return default
def num_score(outline: OUTLINE_TYPE) -> Tuple[int, ...]:
return tuple(to_int(s, 999999) for s in outline)
def get_sorter(sorting_type: SortingType) -> Callable[[Tuple[OUTLINE_TYPE, str]], Any]:
if sorting_type == SortingType.FREQUENCY:
if system.ORTHOGRAPHY_WORDS is not None:
return lambda s: (len(s[0]), system.ORTHOGRAPHY_WORDS.get(s[1], 999999))
else:
return lambda s: len(s[0])
elif sorting_type == SortingType.FREQUENCY_NUM:
if system.ORTHOGRAPHY_WORDS is not None:
return lambda s: (num_score(s[0]), system.ORTHOGRAPHY_WORDS.get(s[1], 999999))
else:
return lambda s: num_score(s[0])
elif sorting_type == SortingType.FREQUENCY_ALPHA:
if system.ORTHOGRAPHY_WORDS is not None:
return lambda s: (not s[0][-1].isalpha(), len(s[0]), system.ORTHOGRAPHY_WORDS.get(s[1], 999999))
else:
return lambda s: (not s[0][-1].isalpha(), len(s[0]))
elif sorting_type == SortingType.STROKE_COUNT:
return lambda s: len(s[0])
elif sorting_type == SortingType.ALPHABETICAL:
return lambda s: s[1].lower()
def sort_suggestions(
suggestions: List[Tuple[OUTLINE_TYPE, str]],
sorting_type: SortingType,
stroke_formatter: Optional[Callable[[STROKE_TYPE], STROKE_TYPE]] = None,
translation_formatter: Optional[Callable[[str], str]] = None,
system_sorter: Optional[Callable[[Tuple[OUTLINE_TYPE, str]], Any]] = None
) -> List[Tuple[OUTLINE_TYPE, str]]:
result = []
for outline, translation in suggestions:
if stroke_formatter is not None:
outline = tuple(stroke_formatter(s) for s in outline)
if translation_formatter is not None:
translation = translation_formatter(translation)
result.append((outline, translation))
if sorting_type == SortingType.SYSTEM_DEFINED:
if system_sorter is not None:
return sorted(result, key=system_sorter)
sorting_type = SortingType.FREQUENCY
return sorted(result, key=get_sorter(sorting_type))
|
import requests
class SSOClient():
def __init__(self, token, region='eu-west-1'):
self._token = token
self._region = region
self._s = requests.Session()
self._s.headers.update({
'x-amz-sso_bearer_token': self._token
})
def whoami(self):
r = self._s.get(f'https://portal.sso.{self._region}.amazonaws.com/token/whoAmI')
return r.json()
def get_instances(self):
r = self._s.get(f'https://portal.sso.{self._region}.amazonaws.com/instance/appinstances')
return [i for i in r.json()['result'] if i['applicationName'] == 'AWS Account']
def get_profiles(self, instance_id):
r = self._s.get(f'https://portal.sso.{self._region}.amazonaws.com/instance/appinstance/{instance_id}/profiles')
return r.json()['result']
def get_saml_payload(self, instance_id, profile_id):
for profile in self.get_profiles(instance_id):
if profile['id'] == profile_id:
url = profile['url']
r = self._s.get(url)
return r.json()['encodedResponse']
|
"""
================================================================
Logistic regression comparison: ``scikit-learn`` versus ``tick``
================================================================
In this example we give a naive comparison of ``tick`` and ``scikit-learn`` for
binary classification using logistic regression with :math:`\ell_1`
penalization.
This comparison is done using the well-known ``adult`` dataset, a standard
benchmark dataset for binary clasification.
Some remarks are the following:
* Both classifiers have the same performance in terms of AUC (area under the
ROC curve)
* Learned model-weights are slightly different. This is explained by the fact
that ``scikit-learn`` uses ``liblinear`` for optimization of the
:math:`\ell_1`-penalized likelihood. When using this solver, the
``intercept`` is penalized like the model weights (``coeff_``), while this is
not the case in `tick`. Note that this difference can be reduced by tuning the
``intercept_scaling`` parameter from ``scikit-learn``'s
``LogisticRegression``
* In this example, the computational time of ``tick`` is better than ``scikit``'s
"""
import numpy as np
from time import time
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.linear_model import LogisticRegression as LogRegScikit
from tick.dataset import fetch_tick_dataset
from tick.linear_model import LogisticRegression as LogRegTick
train_set = fetch_tick_dataset('binary/adult/adult.trn.bz2')
test_set = fetch_tick_dataset('binary/adult/adult.tst.bz2')
clf_tick = LogRegTick(C=1e5, penalty='l1', tol=1e-8)
clf_scikit = LogRegScikit(penalty='l1', tol=1e-8)
t1 = time()
clf_tick.fit(train_set[0], train_set[1])
t_tick = time() - t1
t1 = time()
clf_scikit.fit(train_set[0], train_set[1])
t_scikit = time() - t1
pred_tick = clf_tick.predict_proba(test_set[0])
pred_scikit = clf_scikit.predict_proba(test_set[0])
fpr_tick, tpr_tick, _ = roc_curve(test_set[1], pred_tick[:, 1])
fpr_scikit, tpr_scikit, _ = roc_curve(test_set[1], pred_scikit[:, 1])
plt.figure(figsize=(10, 8))
ax1 = plt.subplot2grid((2, 2), (0, 0))
plt.stem(clf_tick.weights)
plt.title(r'Model-weights in $\mathtt{tick}$', fontsize=16)
plt.ylim((-2, 2.5))
ax2 = plt.subplot2grid((2, 2), (0, 1))
plt.stem(np.ravel(clf_scikit.coef_))
# plt.legend()
plt.ylim((-2, 2.5))
plt.title(r'Model-weights in $\mathtt{scikit-learn}$', fontsize=16)
plt.subplot2grid((2, 2), (1, 0))
plt.plot(fpr_tick, tpr_tick, lw=2)
plt.plot(fpr_scikit, tpr_scikit, lw=2)
plt.legend([
"tick (AUC = {:.2f})".format(auc(fpr_tick, tpr_tick)),
"scikit-learn (AUC = {:.2f})".format(auc(fpr_tick, tpr_tick))
], loc='center right', fontsize=12)
plt.ylabel("True Positive Rate", fontsize=14)
plt.xlabel("False Positive Rate", fontsize=14)
plt.title('ROC curves comparison', fontsize=16)
ax4 = plt.subplot2grid((2, 2), (1, 1))
plt.bar([1, 2], [t_tick, t_scikit])
ax4.set_xticks([1, 2])
ax4.set_xticklabels(['tick', 'scikit-learn'], fontsize=14)
plt.title('Computational time (seconds)', fontsize=16)
plt.tight_layout()
plt.show()
|
# https://www.acmicpc.net/problem/2703
def match(x):
if x == space:
return space
return cryptoquote[ord(x) - 65]
if __name__ == '__main__':
input = __import__('sys').stdin.readline
space = ' '
new_line = '\n'
result = list()
t = int(input())
for _ in range(t):
input_data = input().rstrip()
cryptoquote = input().rstrip()
output_data = ''.join(map(match, input_data))
result.append(output_data)
print(new_line.join(result)) |
from plexapi.myplex import MyPlexAccount
import utils
import trakt
import trakt.core
plex_needed = utils.input_yesno("Are you logged into this server with a Plex account?")
if plex_needed:
username = input("Please enter your Plex username: ")
password = input("Please enter your Plex password: ")
servername = input("Now enter the server name: ")
account = MyPlexAccount(username, password)
plex = account.resource(servername).connect() # returns a PlexServer instance
print("Copy this Plex token to the .env file:", plex._token)
else:
print("Add this as the PLEX_TOKEN in the .env file: PLEX_TOKEN=-")
trakt.APPLICATION_ID = '65370'
trakt.core.AUTH_METHOD=trakt.core.OAUTH_AUTH
trakt_user = input("Please input your Trakt username: ")
trakt.init(trakt_user, store=True)
print("You are now logged into Trakt. Add your username in .env: TRAKT_USER=" + trakt_user)
print("Once the PLEX_TOKEN and TRAKT_USER are in your .env file, you can run 'python3 main.py' and enjoy!") |
import unittest
input_value = 'MCMXCIV'
output_value = 1994
class funcTest(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.romanToInt(input_value), output_value)
class Solution:
def romanToInt(self, s: str) -> int:
"""
:type s: str
:rtype: int
"""
roman_dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
input_len = len(s)
int_value = 0
for i in range(input_len):
# last letter must be positive
if i != len(s) - 1 and roman_dict[s[i]] < roman_dict[s[i + 1]]:
int_value -= roman_dict[s[i]] # smaller value than next, means -ve
else:
int_value += roman_dict[s[i]] # larger value than next, means +ve
return int_value
|
#!/usr/bin/env python
import os , sys , argparse ,errno , yaml
import rospy
class RVDIR(object):
def __init__(self):
self.package = "img_recognition"
self.parser = argparse.ArgumentParser(description="remove a folder in ~/ROSKY/catkin_ws/src/" + self.package +"/image",epilog="removeyour image")
self.parser.add_argument("--name", "-rm", type=str,required=True, help="Please type you want to remove the folder name.")
self.args = self.parser.parse_args()
self.path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) # "~/ROSKY/catkin_ws/src/" + self.package
self.remove = False
action_1 = self.read_param_from_file()
action_2 = self.try_remove(self.args.name)
action_3 = self.write_to_file(self.remove, self.args.name)
def try_remove(self, name):
remove_path = self.path + "/image/" + name
if os.path.isdir(self.path + "/image/" + name):
if self.yaml_dict[name] == 0:
try:
os.rmdir(remove_path)
self.remove = True
print("Done! Remove folder : {}".format(remove_path))
except OSError as e:
if e.errno == errno.EEXIST:
print("Note! Directory not exit. ")
else:
raise
else:
msg = "There are {} images in folder [{}]. Do you want to remove?(y/N): ".format(self.yaml_dict[name], name)
remove = input(msg)
if remove == "y" or remove == "Y":
os.rmdir(remove_path)
self.remove = True
print("Done! Remove folder: {}".format(remove_path))
else:
print("Skip remove [{}]".format(self.path + "/image/" + name))
else:
print("[{}] is not exist.".format(remove_path))
sys.exit(00)
def read_param_from_file(self):
fname = self.path + "/param/image_label.yaml"
with open(fname, 'r') as in_file:
try:
self.yaml_dict = yaml.load(in_file)
except yaml.YAMLError as exc:
print(" YAML syntax error. File: {}".format(fname))
if self.yaml_dict != None:
for label_name in self.yaml_dict:
image_count = 0
for dir_path, dir_names, file_names in os.walk(self.path + "/image/" + str(label_name) + "/"):
for image in file_names:
if image.endswith('jpg') or image.endswith('jpeg') :
image_count += 1
self.yaml_dict[label_name] = image_count
else:
self.yaml_dict = {}
def write_to_file(self, remove, name):
fname = self.path + "/param/image_label.yaml"
if remove == True:
self.yaml_dict.pop(name)
data = self.yaml_dict
with open(fname, 'w') as outfile:
outfile.write(yaml.dump(data, default_flow_style=False))
if __name__ == "__main__" :
remove = RVDIR()
|
import json
from datetime import datetime, date
def repr_response(resp, full=False):
# requests.models.Response
if not full and len(resp.content) > 128:
content = '{}...{}b'.format(resp.content[:128],
len(resp.content))
else:
content = resp.content
return '{} {} {}: {}'.format(
resp.request.method,
resp.status_code,
resp.url,
content
)
def repr_str_short(value, length=32):
if len(value) > length:
return value[:length] + '...'
return value
class ReprMixin:
def __repr__(self, *args, full=False, required=False, **kwargs):
attrs = self.to_dict(*args, required=required, **kwargs)
attrs = ', '.join(u'{}={}'.format(k, repr(v) if full else repr_str_short(repr(v)))
for k, v in attrs.items())
return '<{}({})>'.format(self.__class__.__name__, attrs)
def to_dict(self, *args, exclude=[], required=True):
keys = args or (self.__dict__.keys() if hasattr(self, '__dict__') else self.__slots__)
return {k: getattr(self, k) for k in keys
if not k.startswith('_') and
(hasattr(self, k) or (args and required and k in args)) and
k not in exclude}
def _pprint(self, *args, **kwargs):
return pprint(self, *args, **kwargs)
def pprint(obj, indent=2, colors=True):
def default(obj):
if isinstance(obj, (datetime, date)):
return obj.isoformat()
elif hasattr(obj, 'to_dict'):
return obj.to_dict()
return repr(obj)
rv = json.dumps(obj, default=default, indent=indent, ensure_ascii=False)
if colors:
try:
from pygments import highlight
from pygments.lexers import JsonLexer
from pygments.formatters import TerminalFormatter
except ImportError:
pass
else:
rv = highlight(rv, JsonLexer(), TerminalFormatter())
print(rv)
|
import tensorflow as tf
def constructCnnBackbone(imageSize = 224):
netInput = tf.keras.Input(shape=(imageSize, imageSize, 3), name="backboneInput")
backbone = tf.keras.applications.EfficientNetB0(
weights=None, #'imagenet',
include_top=False,
input_shape=(imageSize, imageSize, 3),
# it should have exactly 3 inputs channels,
pooling=None) # Tx7x7x1280 in case of None pooling and image side size of 224
converted = tf.keras.applications.efficientnet.preprocess_input(netInput)
print("converted cnn backbone input shape {0}".format(converted.shape))
#print("Backbone")
#print(backbone.summary())
result = backbone(converted)
return tf.keras.Model(name="Backbone", inputs=netInput, outputs=result), backbone
def constructFeatureExtractor(backboneModel, seriesLen, l2regAlpha, DORate, seed, imageSize = 224):
netInput = tf.keras.Input(shape=(seriesLen, imageSize, imageSize, 3), name="featureExtractorInput")
backboneApplied = tf.keras.layers.TimeDistributed(backboneModel,name="backbone")(netInput) # 7x7x1280 (for image size 224) = 62,720
backboneOutChannelsCount = 1280
# we will do 2D convolution until the image become 1x1
cnn1out = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(backboneOutChannelsCount // 2, kernel_size=1,strides=(1,1),padding='valid',activation='selu'),name="postBackboneConv2D")(backboneApplied)
cnn1DoOut = tf.keras.layers.AlphaDropout(DORate, noise_shape=(seriesLen,1,1,backboneOutChannelsCount // 2),seed=seed+2334)(cnn1out) # 7 x 7 x 640
cnn2out = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(backboneOutChannelsCount // 4, kernel_size=3,strides=(2,2),padding='valid',activation='selu'),name="postBackboneConv2D_2")(cnn1DoOut)
cnn2DoOut = tf.keras.layers.AlphaDropout(DORate, noise_shape=(seriesLen,1,1,backboneOutChannelsCount // 4),seed=seed+34632)(cnn2out) # 3 x 3 x 320
cnn3out = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(backboneOutChannelsCount // 4, kernel_size=3,strides=(1,1),padding='valid',activation='selu'),name="postBackboneConv2D_3")(cnn2DoOut)
cnn3DoOut = tf.keras.layers.AlphaDropout(DORate, noise_shape=(seriesLen,1,1,backboneOutChannelsCount // 4),seed=seed+2346)(cnn3out) # 1 x 1 x 320
cnnFinal = tf.keras.layers.Reshape((seriesLen, backboneOutChannelsCount // 4))(cnn3DoOut)
fc1out = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(backboneOutChannelsCount // 8, activation="selu", kernel_regularizer=tf.keras.regularizers.L1L2(l2=l2regAlpha)),name="fc1")(cnnFinal)
fc1DoOut = tf.keras.layers.AlphaDropout(DORate, noise_shape=(seriesLen,backboneOutChannelsCount // 8),seed=seed+245334)(fc1out)
rnnOut = \
tf.keras.layers.GRU(
backboneOutChannelsCount // 16, dropout=DORate,
kernel_regularizer = tf.keras.regularizers.L1L2(l2=l2regAlpha),
recurrent_regularizer=tf.keras.regularizers.L1L2(l2=l2regAlpha),
return_sequences=False)(fc1DoOut)
result = tf.keras.Model(name="FeatureExtractor", inputs=netInput, outputs=rnnOut)
#print("Feature extractor")
#print(result.summary())
return result
class TripletCosineSimilarityMetricLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(TripletCosineSimilarityMetricLayer, self).__init__(**kwargs)
def call(self, anchorFeatures, positiveFeatures, negativeFeatures):
posSim = 0.0 - tf.reduce_mean(tf.keras.losses.cosine_similarity(anchorFeatures, positiveFeatures, axis=-1)) # -1.0 is perferct alignment
negSim = 0.0 - tf.reduce_mean(tf.keras.losses.cosine_similarity(anchorFeatures, negativeFeatures, axis=-1))
self.add_metric(posSim, name='pos_cossim')
self.add_metric(negSim, name='neg_cossim')
return anchorFeatures, positiveFeatures, negativeFeatures
class TripletCosineDistanceLossLayer(tf.keras.layers.Layer):
def __init__(self, optimizationMargin = 2.0, **kwargs):
super(TripletCosineDistanceLossLayer, self).__init__(**kwargs)
self.optimizationMargin = optimizationMargin
def call(self, anchorFeatures, positiveFeatures, negativeFeatures):
posSim = tf.keras.losses.cosine_similarity(anchorFeatures, positiveFeatures, axis=-1) # -1.0 is perferct alignment
negSim = tf.keras.losses.cosine_similarity(anchorFeatures, negativeFeatures, axis=-1)
#print("posSim shape {0}".format(posSim.shape))
loss = tf.reduce_mean(self.optimizationMargin + posSim - tf.minimum(negSim, 0))
self.add_loss(loss)
return (anchorFeatures, positiveFeatures, negativeFeatures)
def constructSiameseTripletModel(seriesLen, l2regAlpha, DORate, imageSize = 224, optimizationMargin = 2.0):
anchorInput = tf.keras.Input(shape=(seriesLen, imageSize, imageSize, 3), name="anchorInput")
poitiveInput = tf.keras.Input(shape=(seriesLen, imageSize, imageSize, 3), name="positiveInput")
negativeInput = tf.keras.Input(shape=(seriesLen, imageSize, imageSize, 3), name="negativeInput")
backbone,backboneCore = constructCnnBackbone(imageSize)
featureExtractor = constructFeatureExtractor(backbone, seriesLen, l2regAlpha, DORate, imageSize)
anchorFeatures = featureExtractor(anchorInput) # B x features
positiveFeatures = featureExtractor(poitiveInput)
negativeFeatures = featureExtractor(negativeInput)
anchorFeatures, positiveFeatures, negativeFeatures = TripletCosineDistanceLossLayer(optimizationMargin=1.0,name="TripletCosineDistanceLoss")(anchorFeatures, positiveFeatures, negativeFeatures)
anchorFeatures, positiveFeatures, negativeFeatures = TripletCosineSimilarityMetricLayer(name="TripletCosineSimilarityMetric")(anchorFeatures, positiveFeatures, negativeFeatures)
result = tf.keras.Model(name="SiameseTripletModel", inputs=[anchorInput, poitiveInput, negativeInput], outputs=[anchorFeatures, positiveFeatures, negativeFeatures])
# adding unsupervised loss
return result, backbone, featureExtractor |
"""Wrapper for Custom System Environments."""
from abc import ABCMeta, abstractmethod
import torch
from gym import Env
from rllib.util.utilities import tensor_to_distribution
from .abstract_environment import AbstractEnvironment, EnvironmentBuilder
class SystemEnvironment(AbstractEnvironment, Env):
"""Wrapper for System Environments.
Parameters
----------
system: AbstractSystem
underlying system
initial_state: callable, optional
callable that returns an initial state
reward: callable, optional
callable that, given state and action returns a rewards
termination_model: callable, optional
callable that checks if interaction should terminate.
"""
def __init__(self, system, initial_state=None, reward=None, termination_model=None):
super().__init__(
dim_state=system.dim_state,
dim_action=system.dim_action,
dim_observation=system.dim_observation,
action_space=system.action_space,
observation_space=system.observation_space,
dim_reward=reward.dim_reward if reward is not None else (1,),
)
self.reward = reward
self.system = system
self.termination_model = termination_model
self._time = 0
if initial_state is None:
initial_state = self.system.observation_space.sample
if not callable(initial_state):
self.initial_state = lambda: initial_state
else:
self.initial_state = initial_state
def render(self, mode="human"):
"""See `AbstractEnvironment.render'."""
return self.system.render(mode=mode)
def step(self, action):
"""See `AbstractEnvironment.step'."""
self._time += 1
state = self.system.state # this might be noisy.
reward = torch.tensor([float("nan")])
if self.reward is not None:
reward = tensor_to_distribution(self.reward(state, action, None)).sample()
next_state = self.system.step(action)
if self.termination_model is not None:
done = (
tensor_to_distribution(
self.termination_model(state, action, next_state)
)
.sample()
.squeeze(-1)
)
else:
done = False
return next_state, reward, done, {}
def reset(self):
"""See `AbstractEnvironment.reset'."""
initial_state = self.initial_state()
self._time = 0
return self.system.reset(initial_state)
@property
def state(self):
"""See `AbstractEnvironment.state'."""
return self.system.state
@state.setter
def state(self, value):
self.system.state = value
@property
def time(self):
"""See `AbstractEnvironment.time'."""
return self._time
@property
def name(self):
"""Return class name."""
return self.system.__class__.__name__
class SystemEnvironmentBuilder(EnvironmentBuilder, metaclass=ABCMeta):
"""System Environment default Builder."""
def create_environment(self):
"""Create environment."""
return SystemEnvironment(
system=self.get_system_model(),
initial_state=self.initial_distribution_fn(),
reward=self.get_reward_model(),
termination_model=self.get_termination_model(),
)
@abstractmethod
def get_system_model(self):
"""Get dynamical model."""
raise NotImplementedError
def initial_distribution_fn(self):
"""Get Initial Distribution Sample function."""
return None
|
# -*- coding: utf-8 -*-
import getopt
import json
import os
import platform
import sys
from builder.core.env import env
import builder.core.colorconsole as cc
import builder.core.utils as utils
from builder.core.context import *
options = list()
options_idx = 0
ctx = BuildContext()
def main():
cc.set_default(sep='', end='\n')
if not env.init(warn_miss_tool=True):
return
action = None
argv = sys.argv[1:]
if len(argv) >= 1:
for i in range(len(argv)):
if 'debug' == argv[i]:
ctx.set_target(TARGET_DEBUG)
elif 'release' == argv[i]:
ctx.set_target(TARGET_RELEASE)
elif argv[i] in ctx.dist_all:
ctx.set_dist(argv[i])
else:
action = argv[i]
make_options()
if action is not None:
if action == '-h' or action == '--help':
max_name_len = 0
for x in options:
if x['id'] != '--SPLIT-LINE--':
max_name_len = max(len(x['name']), max_name_len)
max_name_len += 4
for x in options:
if x['id'] != '--SPLIT-LINE--':
name_pad = max_name_len - len(x['name'])
cc.o((cc.CR_INFO, x['name']), (cc.CR_VERBOSE, ' ' * name_pad), (cc.CR_VERBOSE, x['disp']))
return
# cc.v(action)
opt = select_option_by_name(action)
if opt is None:
cc.e('unknown config: ', action)
return
do_opt(opt)
return
show_logo()
while True:
x = show_menu()
if x == 'q':
break
try:
x = int(x)
except:
cc.e('invalid input.')
continue
opt = select_option_by_id(int(x))
# if 'config' == opt['name']:
# if make_config():
# make_options()
# continue
if opt is None:
cc.e('unknown selection: ', x)
continue
do_opt(opt)
cc.w('\ntask finished, press Enter to continue or Q to quit...', end='')
try:
x = input()
except EOFError:
x = 'q'
if x == 'q':
break
def clean_all():
# cc.e('sorry, clean not implemented yet.')
utils.remove(os.path.join(env.root_path, 'out'))
def clean_everything():
utils.remove(os.path.join(env.root_path, 'out'))
utils.remove(os.path.join(env.root_path, 'external', 'jsoncpp'))
utils.remove(os.path.join(env.root_path, 'external', 'libuv'))
utils.remove(os.path.join(env.root_path, 'external', 'mbedtls'))
utils.remove(os.path.join(env.root_path, 'external', 'mongoose'))
utils.remove(os.path.join(env.root_path, 'external', 'openssl'))
utils.remove(os.path.join(env.root_path, 'external', 'python'))
utils.remove(os.path.join(env.root_path, 'external', 'libssh-win-static', 'lib'))
utils.remove(os.path.join(env.root_path, 'external', 'libssh-win-static', 'src'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'tmp'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libmbedcrypto.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libmbedtls.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libmbedx509.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libsqlite3.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libssh.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libssh_threads.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libuv.a'))
def clean_external():
# utils.remove(os.path.join(env.root_path, 'out'))
utils.remove(os.path.join(env.root_path, 'external', 'jsoncpp'))
utils.remove(os.path.join(env.root_path, 'external', 'libuv'))
utils.remove(os.path.join(env.root_path, 'external', 'mbedtls'))
utils.remove(os.path.join(env.root_path, 'external', 'mongoose'))
# utils.remove(os.path.join(env.root_path, 'external', 'openssl'))
# utils.remove(os.path.join(env.root_path, 'external', 'python'))
# utils.remove(os.path.join(env.root_path, 'external', 'libssh-win-static', 'lib'))
# utils.remove(os.path.join(env.root_path, 'external', 'libssh-win-static', 'src'))
# utils.remove(os.path.join(env.root_path, 'external', 'linux', 'tmp'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libmbedcrypto.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libmbedtls.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libmbedx509.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libsqlite3.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libssh.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libssh_threads.a'))
utils.remove(os.path.join(env.root_path, 'external', 'linux', 'release', 'lib', 'libuv.a'))
def do_opt(opt):
arg = ''
if 'ver' == opt['name']:
script = 'build-version.py'
elif 'pysrt' == opt['name']:
script = 'build-pysrt.py'
elif opt['name'] in ['ext-client', 'ext-server', 'clear-ext-client', 'clear-ext-server']:
script = 'build-external.py'
arg = '%s %s %s' % (opt['name'], ctx.target_path, opt['bits'])
elif 'server' == opt['name']:
script = 'build-server.py'
arg = '%s %s server' % (ctx.target_path, opt['bits'])
elif 'server-installer' == opt['name']:
script = 'build-installer.py'
arg = '%s %s server-installer' % (ctx.dist, opt['bits'])
elif 'client' == opt['name']:
script = 'build-assist.py'
arg = '%s %s exe' % (ctx.target_path, opt['bits'])
elif 'client-installer' == opt['name']:
script = 'build-assist.py'
arg = '%s %s installer' % (ctx.dist, opt['bits'])
else:
cc.e('unknown option: ', opt['name'])
return
cmd = '%s -B %s %s' % (env.py_exec, os.path.join(env.builder_path, script), arg)
os.system(cmd)
def select_option_by_name(name):
for x in options:
if x['id'] != '--SPLIT-LINE--':
if name == x['name']:
return x
return None
def select_option_by_id(_id):
for x in options:
if x['id'] == _id:
return x
return None
def add_option(bits, name, disp):
global options, options_idx
options_idx += 1
options.append({'id': options_idx, 'name': name, 'disp': disp, 'bits': bits})
def add_split(title=None):
global options
options.append({'id': '--SPLIT-LINE--', 'title': title})
def make_options():
if ctx.host_os in ['windows']:
add_split('prepare external [build once]')
# add_option('x86', 'external', '[OBSOLETE] Build external dependency')
add_option('x86', 'ext-client', '[client] Build external libraries for client')
# add_split('prepare for server [build once]')
add_option('x86', 'pysrt', '[server] Make Python-Runtime for python%s-x86' % env.py_ver_str)
add_option('x86', 'ext-server', '[server] Build external libraries for server')
add_split('version [build every release]')
add_option('x86', 'ver', 'Update version setting')
add_split('client side')
# add_option('x86', 'assist-exe', '[OBSOLETE] Assist Execute [%s]' % ctx.target_path)
add_option('x86', 'client', 'Build client applications [%s]' % ctx.target_path)
# add_option('x86', 'assist-rdp', 'Teleport RDP [%s]' % ctx.target_path)
# add_option('x86', 'assist-installer', '[OBSOLETE] Assist Installer')
add_option('x86', 'client-installer', 'Make client installer')
add_split('server side')
add_option('x86', 'pysrt', 'Make Python-Runtime for python%s-x86' % env.py_ver_str)
add_option('x86', 'ext-server', 'Build external libraries for server')
# add_option('x86', 'server', 'Teleport Server [%s]' % ctx.target_path)
add_option('x86', 'server-installer', 'Teleport Installer for %s' % ctx.host_os)
# add_option('x86', 'installer', '[OBSOLETE] Teleport Installer for %s' % ctx.host_os)
add_split('clear')
add_option('x86', 'clear-ext-client', 'Clear external libraries for client')
add_option('x86', 'clear-ext-server', 'Clear external libraries for server')
elif ctx.host_os == 'macos':
add_split('client side')
add_option('x64', 'ext-client', 'build external libraries for client')
add_option('x64', 'client', 'build client applications [%s]' % ctx.target_path)
add_option('x64', 'client-installer', 'make client installer')
add_split('server side')
add_option('x64', 'ext-server', '(DEV-ONLY) build external libraries for server')
add_option('x64', 'server', '(DEV-ONLY) build server applications for MacOS [%s]' % ctx.target_path)
add_split('clear')
add_option('x64', 'clear-ext-client', 'clear external libraries for client')
add_option('x64', 'clear-ext-server', 'clear external libraries for server')
add_split('misc')
add_option('x64', 'ver', 'update version setting')
else:
add_split('prepare for server [build once]')
add_option('x64', 'pysrt', 'Make Python-Runtime for python%s-x64' % env.py_ver_str)
add_split('server side')
add_option('x64', 'ext-server', 'build external libraries for server')
add_option('x64', 'server', 'build server applications [%s]' % ctx.target_path)
add_option('x64', 'server-installer', 'make server installer for %s' % ctx.host_os)
add_split('clear')
# add_option('x64', 'clear-ext-client', 'Clear external libraries for client')
add_option('x64', 'clear-ext-server', 'clear external libraries for server')
add_split('misc')
add_option('x64', 'ver', 'update version setting')
def get_input(msg, log_func=cc.w):
log_func(msg, end=' ')
try:
return input()
except EOFError:
return ''
def show_logo():
cc.v('[]==========================================================[]')
cc.v(' | Teleport Projects Builder v2.0 |')
cc.v(' | auth: apex.liu@qq.com |')
cc.v('[]==========================================================[]')
def show_menu():
cc.v('\n=====================[ MENU ]===============================')
for o in options:
if o['id'] == '--SPLIT-LINE--':
if o['title'] is not None:
cc.w('\n {}:'.format(o['title']))
else:
cc.v('\n ----------------------------------------------------------')
continue
cc.o((cc.CR_NORMAL, ' ['), (cc.CR_INFO, '%2d' % o['id']), (cc.CR_NORMAL, '] ', o['disp']))
cc.v('\n ----------------------------------------------------------')
cc.o((cc.CR_NORMAL, ' ['), (cc.CR_INFO, ' Q'), (cc.CR_NORMAL, '] exit'))
cc.w('\nselect action: ', end='')
try:
x = input()
except EOFError:
x = 'q'
cc.n('')
return x.lower()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
except RuntimeError as e:
cc.e(e.__str__())
except:
cc.f('got an exception.')
|
class User:
def __init__(self, name: str):
self.name = name
self.chatroom = None
def join(self, chatroom):
chatroom.add_user(self)
self.chatroom = chatroom
def leave(self):
self.chatroom.remove_user(self)
def send_message(self, message):
self.chatroom.send_message(self, message) |
import os
import configargparse
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--chunkn',
required=True,
type=int,
help='The number of the chunk to be processed')
args = parser.parse_args()
chunk_n = str(args.chunkn).zfill(3) + '_chunk/'
command = 'python3 smplifyx/main.py \
--config cfg_files/fit_smpl.yaml \
--data_folder ./input/' + chunk_n + ' \
--output_folder ./output/' + chunk_n + ' \
--visualize=True \
--model_folder ./models \
--vposer_ckpt ../vposer_v1_0 \
--part_segm_fn smplx_parts_segm.pkl \
--interpenetration False \
--use_face False \
--use_hands False \
--gender=female \
'
print(command)
os.system(command)
|
import numpy as np
from keras.losses import sparse_categorical_crossentropy
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
def _test_model(model, input_shape, output_sequence_length, french_vocab_size):
if isinstance(model, Sequential):
model = model.model
assert model.input_shape == (None, *input_shape[1:]),\
'Wrong input shape. Found input shape {} using parameter input_shape={}'.format(model.input_shape, input_shape)
assert model.output_shape == (None, output_sequence_length, french_vocab_size),\
'Wrong output shape. Found output shape {} using parameters output_sequence_length={} and french_vocab_size={}'\
.format(model.output_shape, output_sequence_length, french_vocab_size)
assert len(model.loss_functions) > 0,\
'No loss function set. Apply the `compile` function to the model.'
#assert sparse_categorical_crossentropy in model.loss_functions,\
# 'Not using `sparse_categorical_crossentropy` function for loss.'
def test_tokenize(tokenize):
sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
tokenized_sentences, tokenizer = tokenize(sentences)
assert tokenized_sentences == tokenizer.texts_to_sequences(sentences),\
'Tokenizer returned and doesn\'t generate the same sentences as the tokenized sentences returned. '
def test_pad(pad):
tokens = [
[i for i in range(4)],
[i for i in range(6)],
[i for i in range(3)]]
padded_tokens = pad(tokens)
padding_id = padded_tokens[0][-1]
true_padded_tokens = np.array([
[i for i in range(4)] + [padding_id]*2,
[i for i in range(6)],
[i for i in range(3)] + [padding_id]*3])
assert isinstance(padded_tokens, np.ndarray),\
'Pad returned the wrong type. Found {} type, expected numpy array type.'
assert np.all(padded_tokens == true_padded_tokens), 'Pad returned the wrong results.'
padded_tokens_using_length = pad(tokens, 9)
assert np.all(padded_tokens_using_length == np.concatenate((true_padded_tokens, np.full((3, 3), padding_id)), axis=1)),\
'Using length argument return incorrect results'
def test_simple_model(simple_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_embed_model(embed_model):
input_shape = (137861, 21)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_encdec_model(encdec_model):
input_shape = (137861, 15, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_bd_model(bd_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_model_final(model_final):
input_shape = (137861, 15)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
|
# This program is free software: you can redistribute it and/or modify it under the
# terms of the Apache License (v2.0) as published by the Apache Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the Apache License for more details.
#
# You should have received a copy of the Apache License along with this program.
# If not, see <https://www.apache.org/licenses/LICENSE-2.0>.
"""Database initialization for StreamKit."""
# type annotations
from __future__ import annotations
from typing import TypeVar
# standard libs
import json
import logging
from datetime import datetime
# internal libs
from .engine import engine, db_config, schema
from .session import Session
from .orm import Table, tables
from ... import assets
# initialize module level logger
log = logging.getLogger(__name__)
def init() -> None:
"""Initialize database objects (e.g., tables)."""
Table.metadata.create_all(engine)
__VT = TypeVar('__VT', int, float, str)
__RT = TypeVar('__RT', int, float, str, datetime)
def _coerce_datetime(field: str, value: __VT) -> __RT:
"""Passively coerce formatted datetime strings if necessary."""
if isinstance(value, str) and field.endswith('time'):
return datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
else:
return value
def load_records(name: str) -> list:
"""Load records for given table from `name`."""
data = json.loads(assets.load_asset(f'database/test/{name}.json'))
table = tables[name]
return [table(**{k: _coerce_datetime(k, v) for k, v in record.items()})
for record in data]
def init_extensions() -> None:
"""Initialize database extensions/extras."""
init() # does nothing if all tables exist
session = Session()
backend = db_config['backend']
scripts = assets.find_files(f'/database/extensions/{backend}/*.sql')
for path in scripts:
code = assets.load_asset(path).replace('{{ SCHEMA }}', schema or 'public')
log.info(f'running {path}')
session.execute(code)
session.commit()
def init_test_data() -> None:
"""Initialize database objects and test data for unit tests."""
init() # does nothing if all tables exist
session = Session()
for name in tables:
session.add_all(load_records(name))
session.commit()
|
#!/usr/bin/env python
import logging
import os
import re
import scrapy
import yaml
from bs4 import BeautifulSoup
from scrapy import Request
from scrapy.crawler import CrawlerProcess
LOGGER = logging.getLogger(__name__)
class WordpressSpider(scrapy.Spider):
"""
Download articles from a wordpress blog.
Start with the index page(s), and get all the articles listed there.
"""
def __init__(self, articles_index_urls, dst_folder):
super(WordpressSpider).__init__()
self.start_urls = articles_index_urls
self.dst_folder = dst_folder
def parse(self, response):
soup = BeautifulSoup(response.body, features='lxml')
for a in soup.find_all('a'):
href = a['href']
if re.search(r'\?p=[0-9]+$', href):
LOGGER.info(f'processing {a}')
yield Request(href, self.save_article)
def save_article(self, response):
url = response.url
html = response.body
filename = url.split('/')[-1]
# ?p=12345 -> 12345.html
filename = filename[3:] + '.html'
filepath = os.path.join(self.dst_folder, filename)
LOGGER.info(f'Saving article to {filepath}')
with open(filepath, 'wb') as f:
f.write(html)
if __name__ == '__main__':
# File `urls.yaml` is intentionally not committed in order to hide the identity
# of the scraped blogs
with open('urls.yaml') as f:
urls = yaml.load(f.read())
articles_index_urls = urls['wordpress']
crawler = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'DOWNLOAD_DELAY': 3
})
crawler.crawl(WordpressSpider, articles_index_urls, dst_folder='data/html/wordpress/')
crawler.start()
|
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack.commands
from stack.argument_processors.scope import ScopeArgProcessor
from stack.exception import CommandError
class Command(ScopeArgProcessor, stack.commands.remove.command):
"""
Remove a global static route.
<param type='string' name='address' optional='0'>
The address of the static route to remove.
</param>
<example cmd='remove route address=1.2.3.4'>
Remove the global static route that has the network address '1.2.3.4'.
</example>
"""
def run(self, params, args):
# Get the scope and make sure the args are valid
scope, = self.fillParams([('scope', 'global')])
scope_mappings = self.getScopeMappings(args, scope)
# Now validate the params
(address, syncnow) = self.fillParams([
('address', None, True),
('syncnow', None)
])
syncnow = self.str2bool(syncnow)
scope_ids = []
for scope_mapping in scope_mappings:
# Check that the route address exists for the scope
rows = self.db.select("""
scope_map.id FROM routes,scope_map
WHERE routes.scope_map_id = scope_map.id
AND routes.address = %s
AND scope_map.scope = %s
AND scope_map.appliance_id <=> %s
AND scope_map.os_id <=> %s
AND scope_map.environment_id <=> %s
AND scope_map.node_id <=> %s
""", (address, *scope_mapping))
if not rows:
raise CommandError(
self, f'route with address "{address}" does not exist'
)
scope_ids.append(rows[0][0])
# Routes existed for all the scope mappings, so delete them
# Note: We just delete the scope mapping, the ON DELETE CASCADE takes
# care of removing the routes table entries for us.
self.db.execute('delete from scope_map where id in %s', (scope_ids,))
# Sync the routes, if requested and we are 'host' scoped
if scope == 'host' and syncnow:
# Need to get the node ID for ourselves
node_id = self.db.select(
'id from nodes where name=%s',
self.db.getHostname()
)[0][0]
for scope_mapping in scope_mappings:
if scope_mapping.node_id == node_id:
# Remove the route
self._exec(f'ip route del {address}', shlexsplit=True)
# Sync the routes file
self._exec("""
/opt/stack/bin/stack report host route localhost |
/opt/stack/bin/stack report script |
bash > /dev/null 2>&1
""", shell=True)
|
#
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import logging
from managesf.services.storyboard import SoftwareFactoryStoryboard
logger = logging.getLogger(__name__)
NAME_RE = "^[a-zA-Z0-9]+([_\-\./]?[a-zA-Z0-9]+)*$"
PROJECT_NAME_RE = re.compile(NAME_RE)
NAME_MIN_LEN = 5
NAME_MAX_LEN = 50
class StoryboardOps(object):
def __init__(self, conf, new={}):
self.conf = conf
self.new = new
self.client = None
def is_activated(self, **kwargs):
if ("SFStoryboard" in self.conf.services and
kwargs.get('issue-tracker') == "SFStoryboard"):
return True
return False
def _set_client(self):
if not self.client:
stb = SoftwareFactoryStoryboard(self.conf)
self.client = stb.get_client()
def extra_validations(self, **kwargs):
logs = []
if len(kwargs['name']) < NAME_MIN_LEN:
logs.append("Storyboard project group name %s length is invalid"
" (Minimal len is %s)" % (
kwargs['name'], NAME_MIN_LEN))
if len(kwargs['name']) > NAME_MAX_LEN:
logs.append("Storyboard project group name %s length is invalid"
" (Maximal len is %s)" % (
kwargs['name'], NAME_MAX_LEN))
sources_repositories = kwargs['source-repositories']
for name in sources_repositories:
if len(name) < NAME_MIN_LEN:
logs.append(
"Storyboard project name %s length is invalid"
" (Minimal len is %s)" % (name, NAME_MIN_LEN))
if len(name) > NAME_MAX_LEN:
logs.append(
"Storyboard project name %s length is invalid"
" (Maximal len is %s)" % (name, NAME_MAX_LEN))
if not PROJECT_NAME_RE.match(name):
logs.append(
"Storyboard project name %s is invalid"
" (It should match the RE(%s))" % (name, NAME_RE))
return logs
def update_project(self, name, description):
self._set_client()
project = [p for p in self.client.projects.get_all(name=name)
if p.name == name]
if project:
project = project[0]
self.client.projects.update(
id=project.id, description=description)
else:
# Create the project
self.client.projects.create(
name=name, description=description)
def delete_project(self, name):
raise NotImplementedError('Not supported by Storyboard')
def update_project_groups(self, **kwargs):
name = kwargs['name']
sources_repositories = kwargs['source-repositories']
self._set_client()
pg = [p for p in self.client.project_groups.get_all(name=name)
if p.name == name]
if not pg:
# Create the project group
pg = self.client.project_groups.create(
name=name, title=name)
else:
pg = pg[0]
included_ids = [
p.id for p in
self.client.project_groups.get(id=pg.id).projects.get_all()]
wanted_included = []
for sr_name in sources_repositories:
sr = self.new['resources']['repos'][sr_name]
self.update_project(name=sr_name,
description=sr['description'])
project = [p for p in self.client.projects.get_all(name=sr_name)
if p.name == sr_name]
if project:
wanted_included.append(project[0].id)
to_add = set(wanted_included) - set(included_ids)
to_remove = set(included_ids) - set(wanted_included)
for id in to_add:
self.client.project_groups.update(id=pg.id).projects.put(id=id)
for id in to_remove:
self.client.project_groups.update(id=pg.id).projects.delete(id=id)
def delete_project_groups(self, **kwargs):
name = kwargs['name']
self._set_client()
pg = [p for p in self.client.project_groups.get_all(name=name)
if p.name == name]
pg = pg[0]
included_ids = [
p.id for p in
self.client.project_groups.get(id=pg.id).projects.get_all()]
for id in included_ids:
self.client.project_groups.update(id=pg.id).projects.delete(id=id)
self.client.project_groups.delete(id=pg.id)
if __name__ == '__main__':
from pecan import configuration
conf = configuration.conf_from_file('/etc/managesf/config.py')
c = StoryboardOps(conf)
c._set_client()
# Warn there is a minimal name length for project name
c.update_project('project1', 'the project p1')
|
import random
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import Graphics as artist
from matplotlib import rcParams
from actor import actor
rcParams['text.usetex'] = True
params = {'n':{'nodes':1000,'edges':3},
'network_drawing_props':{'cmap':plt.get_cmap('binary'),'width':0.5,'node_size':20,'edge_width':0.5,'with_labels':False},
'hist_props' : {'color':'k','range':(0,1),'histtype':'stepfilled'}}
G = nx.MultiDiGraph()
tmp = nx.barabasi_albert_graph(params['n']['nodes'],params['n']['edges'])
G.add_edges_from(tmp.edges())
G.add_nodes_from(tmp)
actors = [actor() for _ in xrange(params['n']['nodes'])]
nrows = 2
ncols = 3
#--create mirror image
G.add_edges_from(nx.MultiDiGraph(nx.barabasi_albert_graph(params['n']['nodes'],params['n']['edges'])).reverse().edges())
max_degree = max(nx.degree(G).values())
min_sigmoid = 0.5
max_sigmoid = np.exp(1)/(1.+np.exp(1))
degrees = nx.degree(G)
sigmoid = lambda value: (1./(1+np.exp(-value))-min_sigmoid)/(max_sigmoid-min_sigmoid)
for node in degrees:
tmp = degrees[node]
degrees[node] = sigmoid(tmp/float(max_degree))
influence = {node:len(G.predecessors(node))/float(len(G.successors(node))) for node in G.nodes_iter()}
#Link an agent with each node
actors = [actor() for _ in xrange(params['n']['nodes'])]
for node_idx,actor in zip(G.nodes(),actors):
G.node[node_idx]['actor'] = actor
alpha=degrees
timesteps = 5
#--initial conditions
INITIAL = 0
END=-1
attitudes = np.zeros((params['n']['nodes'],2*timesteps))
THRESHOLDS = np.random.random_sample(size=params['n']['nodes'],)
#--Random attitudes
RANDOM = np.random.random_sample(size=(params['n']['nodes'],))
DRUG_PUSHING = np.random.gamma(2,2,size=(params['n']['nodes'],))
DRUG_PUSHING /= DRUG_PUSHING.max()
attitudes = np.tile(1-DRUG_PUSHING,(2*timesteps,1)).T
normalize = lambda arr: arr/arr.sum()
influence_kernel = {node:normalize(np.array([influence[predecessor] for predecessor in G.predecessors(node)]).astype(float))
for node in G.nodes_iter()}
print influence_kernel
epsilon = 0.1
for t in range(1,timesteps+1):
for agent in G.nodes():
internal_influence = G.node[agent]['actor'].calculate_intent_to_drink()
social_influence = attitudes[G.predecessors(agent),t-1].dot(influence_kernel[agent]) #kernel already normalized
effect = (1-alpha[agent])*internal_influence + alpha[agent]*social_influence
attitudes[agent,t] += epsilon*(effect if effect > THRESHOLDS[agent] else 0)
#update agent's drinking behavior
G.node[agent]['actor'].update(effect)
'''
#change the susceptibility of the most tolerant agent
most_tolerant_agent = np.argmax(attitudes[:,t])
alpha[most_tolerant_agent] = 0
attitudes[most_tolerant_agent,t] = 1
'''
for t in xrange(timesteps,2*timesteps):
for agent in G.nodes():
social_influence = attitudes[G.predecessors(agent),t-1].dot(influence_kernel[agent]) #kernel already normalized
effect = (1-alpha[agent])*attitudes[agent,t-1] + alpha[agent]*social_influence
attitudes[agent,t] += epsilon*(effect if effect > THRESHOLDS[agent] else 0)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(attitudes[:,INITIAL],color='r',alpha=0.5,bins=20,label='Initial')
ax.hist(attitudes[:,END],color='k',alpha=0.5,bins=20,label='Final')
plt.legend(frameon=False)
plt.show()
initial_db = True
if initial_db:
fig,axs = plt.subplots(nrows=nrows,ncols=ncols,sharex=True,sharey=True)
yvars = random.choice(actors).variables.keys()
for i,col in enumerate(axs):
for j,row in enumerate(col):
axs[i,j].hist([actor.variables[yvars[i*ncols+j]] for actor in actors],**params['hist_props'])
fig.canvas.mpl_connect('draw_event', artist.on_draw)
artist.adjust_spines(axs[i,j])
if 'attitude' not in yvars[i*ncols+j]:
axs[i,j].set_xlabel(artist.format(yvars[i*ncols+j]))
elif 'psychological' in yvars[i*ncols+j]:
label = '\n'.join(map(artist.format,['Attitude to','psychological','consequences']))
axs[i,j].set_xlabel(label)
elif 'medical' in yvars[i*ncols+j]:
label = '\n'.join(map(artist.format,['Attitude','to medical','consequences']))
axs[i,j].set_xlabel(label)
plt.tight_layout()
plt.savefig('dashboard-f.png',dpi=300) |
import importlib
from pathlib import PurePath, Path
from opera import stdlib
from opera.error import ParseError
from opera.parser import yaml
SUPPORTED_VERSIONS = dict(
tosca_simple_yaml_1_3="v_1_3",
)
def load(base_path: Path, service_template: PurePath):
with (base_path / service_template).open() as input_fd:
input_yaml = yaml.load(input_fd, str(service_template))
if not isinstance(input_yaml.value, dict):
raise ParseError("Top level structure should be a map.", "0:0")
tosca_version = _get_tosca_version(input_yaml)
parser = _get_parser(tosca_version)
stdlib_yaml = stdlib.load(tosca_version)
service = parser.parse_service_template(stdlib_yaml, base_path, PurePath("STDLIB"), set())[0]
service.merge(parser.parse_service_template(input_yaml, base_path, service_template, set())[0])
service.visit("resolve_path", base_path)
service.visit("resolve_reference", service)
return service
def _get_parser(tosca_version):
return importlib.import_module(".v_1_3", __name__).Parser # type: ignore
def _get_tosca_version(input_yaml):
for k, v in input_yaml.value.items():
if k.value == "tosca_definitions_version":
try:
return SUPPORTED_VERSIONS[v.value]
except (TypeError, KeyError) as e:
raise ParseError("Invalid TOSCA version. Available: {}.".format(", ".join(SUPPORTED_VERSIONS.keys())),
v.loc) from e
raise ParseError("Missing TOSCA version", input_yaml.loc)
|
'''
KLUDGE: This is actually an integration test, as it relies on access
to the index_of_data_fields file.
>>> def mn_census_data():
... from pathlib2 import Path
... # return Path('/d1/geo-census/mn-census-data')
... return Path('.')
>>> bk = pd.read_csv(
... (mn_census_data() /
... 'acs_20135a/index_of_data_fields__acs_20135a.csv').open('rb'))
>>> print table_def('acs_zcta_200', 'ge.00_file.dat.gz', bk[:5])
create table acs_zcta_200 (
FILEID VARCHAR2(6),
STUSAB VARCHAR2(2),
SUMLEVEL VARCHAR2(3),
COMPONENT VARCHAR2(2),
LOGRECNO INTEGER
) organization external (
type oracle_loader
default directory geo_census_stage
access parameters (
records delimited by newline
preprocessor staging_tools:'zcat.sh'
fields lrtrim
(
FILEID position (1-6) char(6),
STUSAB position (7-2) char(2),
SUMLEVEL position (9-3) char(3),
COMPONENT position (12-2) char(2),
LOGRECNO position (14-7) char(7) NULLIF LOGRECNO = '.'
)
)
location ('ge.00_file.dat.gz')
)
'''
from textwrap import dedent
import pandas as pd
MAX_COLUMNS = 768 # nicely splits columns into 6 groups
GRANULARITIES = ['urb_area_400', 'county_050', 'cty_sub_060',
'zcta_860', 'place_070', 'blck_grp_150', 'tract_140']
# Note we're skipping acs_20135b, cph_2010_sf1a, and cph_2010_sf1b
DIR = 'acs_20135a'
def main(argv, cwd):
[field_index_fn, out_fn] = argv[1:3]
fields = pd.read_csv((cwd / field_index_fn).open('rb'))
with (cwd / out_fn).open('wb') as out:
for granularity in GRANULARITIES:
for g_ix, grp in field_groups(fields, max_columns=MAX_COLUMNS):
name = '%s_%s' % (DIR, granularity)
dat_fn = name + '.dat.gz'
out.write(table_def('MPC.%s_%d' % (name, g_ix), dat_fn, grp))
out.write(';\n\n')
def column_def(field):
dty = (
# TODO: implied_decimal_places, multiplier
'INTEGER' if field.data_type == 'n'
else 'VARCHAR2(%d)' % field.width)
return ' {name} {dty}'.format(name=field.variable_code, dty=dty)
def field_spec(field):
return ' %s position (%d-%d) char(%d)%s' % (
field.variable_code,
field.start_column, field.width,
field.width,
(" NULLIF %s = '.'" % field.variable_code
if field.data_type == 'n' else ''))
def table_def(name, location, fields,
data_dir='geo_census_stage',
tools_dir='staging_tools'):
if len(fields) > 1000:
raise ValueError(
'ORA-01792: maximum number of columns in a table or view is 1000')
coldefs = ',\n'.join(column_def(f)
for (_, f) in fields.iterrows())
field_list = ',\n'.join(field_spec(f)
for (_, f) in fields.iterrows())
return dedent('''\
create table {name} (
{coldefs}
) organization external (
type oracle_loader
default directory {data_dir}
access parameters (
records delimited by newline
preprocessor {tools_dir}:'zcat.sh'
fields lrtrim
(
{field_list}
)
)
location ('{location}')
)
''').strip().format(
name=name, coldefs=coldefs, field_list=field_list, location=location,
data_dir=data_dir, tools_dir=tools_dir)
def field_groups(all_fields,
last_key='NAME', max_columns=1000):
n_keys = all_fields[all_fields.variable_code == last_key].index.values[0]
group = all_fields[:n_keys]
g_ix = 0
for table_code in all_fields.table_source_code[n_keys + 1:].unique():
# print '========', table_code
table_fields = all_fields[all_fields.table_source_code == table_code]
# print table_fields.index
if len(group) + len(table_fields) > max_columns:
yield g_ix, group
g_ix += 1
group = all_fields[:n_keys]
group = group.append(table_fields)
if len(group) > n_keys:
yield g_ix, group
if __name__ == '__main__':
def _script():
from sys import argv
from pathlib2 import Path
main(argv, cwd=Path('.'))
_script()
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# To add services insert key value pair of the name of the service and
# the port you want it to run on when running locally
SERVICES = {
'default': 8000,
'static': 8001
}
def init_app(app):
# The GAE_INSTANCE environment variable will be set when deployed to GAE.
gae_instance = os.environ.get(
'GAE_INSTANCE', os.environ.get('GAE_MODULE_INSTANCE'))
environment = 'production' if gae_instance is not None else 'development'
app.config['SERVICE_MAP'] = map_services(environment)
def map_services(environment):
"""Generates a map of services to correct urls for running locally
or when deployed."""
url_map = {}
for service, local_port in SERVICES.items():
if environment == 'production':
url_map[service] = production_url(service)
if environment == 'development':
url_map[service] = local_url(local_port)
return url_map
def production_url(service_name):
"""Generates url for a service when deployed to App Engine."""
project_id = os.environ.get('GAE_LONG_APP_ID')
project_url = '{}.appspot.com'.format(project_id)
if service_name == 'default':
return 'https://{}'.format(project_url)
else:
return 'https://{}-dot-{}'.format(service_name, project_url)
def local_url(port):
"""Generates url for a service when running locally"""
return 'http://localhost:{}'.format(str(port))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torchvision
import torch.nn as nn
import torchvision.models as models
from maskrcnn_benchmark.utils.comm import get_rank
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
# from one_shot_supp import one_shot_supp_list
import random
from PIL import Image
import os
import pickle
import time
import numpy as np
import pickle
min_keypoints_per_image = 10
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
def _has_only_small_bbox(anno):
return all(obj['area']<32*32 for obj in anno)
def has_valid_large_annotation(anno):
if not has_valid_annotation(anno):
return False
return not _has_only_small_bbox(anno)
class ImagenetDataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self,
cfg,
ann_file,
root,
is_train,
remove_images_without_annotations,
transforms=None,
save_img=False,
):
super(ImagenetDataset, self).__init__(root, ann_file)
self.rank = get_rank()
# random.seed(6666)
remove_images_without_annotations = True
self.cfg = cfg
self.neg_supp = cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON
self.neg_supp_num_cls = cfg.FEW_SHOT.NEG_SUPPORT.NUM_CLS
self.choose_close = False #cfg.FEW_SHOT.CHOOSE_CLOSE
self.shot = cfg.FEW_SHOT.NUM_SHOT
self.save_img = cfg.FEW_SHOT.SAVE_IMAGE
self.training_exclude_cats = cfg.FEW_SHOT.TRAINING_EXCL_CATS
self.test_exclude_cats = cfg.FEW_SHOT.TEST_EXCL_CATS
self.is_train = is_train
if self.rank == 0:
print('Save image: '+str(self.save_img))
print('number of shots: '+str(self.shot))
print('Use negative supp: '+str(self.neg_supp))
print('Choose close supp: '+str(self.choose_close))
# train-test split
voc_class = [1, 2, 3, 4, 5, 6, 7, 9, 40, 57, 59, 61, 63, 15, 16, 17, 18, 19, 20] # in terms of 1-80
# exclude_cont_catIds = voc_class
if is_train:
exclude_cont_catIds = self.training_exclude_cats
else:
exclude_cont_catIds = self.test_exclude_cats
# printing excluded classes
self.all_json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
} # 1-90 to 1-80
self.all_contiguous_category_id_to_json_id = {
v: k for k, v in self.all_json_category_id_to_contiguous_id.items()
} # 1-80 to 1-90
is_train_str = 'training' if is_train else 'testing'
real_excluded_class = [self.all_contiguous_category_id_to_json_id[catId] for catId in exclude_cont_catIds] # to 1-90
cats = self.coco.loadCats(real_excluded_class)
names=[cat['name'] for cat in cats]
if self.rank == 0:
print('Init COCO for ' + is_train_str + '...')
print('excluding:')
print(real_excluded_class)
print(names)
# self.json_category_id_to_contiguous_id = {
# v: i + 1 for i, v in enumerate(self.coco.getCatIds()) if v in [11] # 1-90 to 1-80 included cls
# }
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds()) if i + 1 not in exclude_cont_catIds # 1-90 to 1-80 included cls
}
if self.rank == 0:
print('number of ' + is_train_str + ' categories: ' + str(len(self.json_category_id_to_contiguous_id)))
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
if isinstance(transforms, list): # which must be true for few shot learning
self._transforms = transforms[0]
self._supp_transforms = transforms[1]
else:
raise Exception('require a list of 2 _transforms for supp and query')
self.json_cat_list = list(self.json_category_id_to_contiguous_id.keys()) # 1-80
self.catalog = {} # cat to list of img_ids
for cat in self.json_cat_list:
self.catalog[cat] = []
img_ids_cur_cat = self.coco.getImgIds(catIds=cat)
img_ids_cur_cat = sorted(img_ids_cur_cat)
# filter images without detection annotations
if remove_images_without_annotations:
for img_id in img_ids_cur_cat:
ann_ids = self.coco.getAnnIds(imgIds=img_id, catIds=cat, iscrowd=False) # filter out iscrowd
anno = self.coco.loadAnns(ann_ids)
if has_valid_large_annotation(anno):
self.catalog[cat].append(img_id)
# constrain each cat to only have at most 2000 images
# prevent overfitting on cats having more images
if len(self.catalog[cat]) >= 2000:
break
# print #imgs per cat
# if self.rank == 0:
# for cat in self.catalog:
# loaded_cat = self.coco.loadCats(cat)
# name=loaded_cat[0]['name']
# print(str(cat) + '(' + name + '): ' + str(len(self.catalog[cat])))
# # sort indices for reproducible results
self.ids = []
self.chosen_cats = []
self.chosen_neg_cats = []
for cat, ids in self.catalog.items(): # cat to image ids
cats = [cat for i in range(len(ids))]
neg_cats = [-1 for i in range(len(ids))]
self.chosen_cats = self.chosen_cats + cats
self.ids = self.ids + ids
self.chosen_neg_cats = self.chosen_neg_cats + neg_cats
assert(len(self.ids) == len(self.chosen_cats))
assert(len(self.ids) == len(self.chosen_neg_cats))
index_arr = list(range(len(self.ids)))
random.shuffle(index_arr)
index_arr = np.asarray(index_arr)
self.ids = np.asarray(self.ids)
self.ids = self.ids[index_arr]
self.ids = self.ids.tolist()
self.chosen_cats = np.asarray(self.chosen_cats)[index_arr].tolist() # 0->1000 --> cat
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
debug_check = False
if debug_check:
for i in range(len(self.ids)):
img_id = self.id_to_img_map[i]
cur_cat = self.chosen_cats[i]
ann_ids = self.coco.getAnnIds(imgIds=img_id, catIds=cur_cat, iscrowd=False) # filter out iscrowd
anno = self.coco.loadAnns(ann_ids)
if not anno:
print('ERROR: no gt')
print(i)
print(img_id)
print(cur_cat)
loaded_img = self.coco.loadImgs([img_id])
if not loaded_img:
print('no_img')
print(i)
print(img_id)
print(cur_cat)
# path = loaded_img[0]['file_name']
# for saving img for vis
self.dirName = None
if self.save_img:
suffix = str(self.shot) + 'shot'
if self.neg_supp:
suffix = suffix + '_neg'
if self.choose_close:
suffix = suffix + '_close'
timeStamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
dirName = 'imgs/dist_' + suffix + timeStamp + '/'
if not os.path.exists(dirName):
os.makedirs(dirName)
self.dirName = dirName
if self.rank == 0:
print('dataset length: ' + str(self.__len__()))
# load close dict pkl
if self.choose_close:
if self.is_train:
self.close_dict = {}
for cat in self.json_cat_list:
close_dict_path = '/data/linz/few_shot/fcos_plus/fcos_plus/supp_sim/supp_similarity_'+str(cat)+'.pkl'
with open(close_dict_path, 'rb') as f:
self.close_dict[cat] = pickle.load(f)
print('loaded all similarity')
self.supp_root = '/data/xlide/fcos/supps'
else: # test time load voc stuff
pass
cats = self.coco.loadCats(self.json_cat_list)
names=[cat['name'] for cat in cats]
print(names)
def get_one_preset_item_from_cat(self, catId):
"""
Args:
catId (int): json cat id
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
assert(catId in self.json_cat_list)
catId -= 1 # 1-20 to 0-19
coco = self.coco
imgs_choices = self.catalog[catId].copy()
random.shuffle(imgs_choices)
img_meta_infos = coco.loadImgs(valid_img_ids)
paths = [img_meta_info['file_name'] for img_meta_info in img_meta_infos]
imgs = [Image.open(os.path.join(self.root, path)).convert('RGB') for path in paths]
coords = [chosen_anno['bbox'] for chosen_anno in valid_anns]
imgs = [imgs[i].crop((coords[i][0], coords[i][1], coords[i][0]+coords[i][2], coords[i][1]+coords[i][3])) for i in range(shot)]
if self.transform is not None:
imgs = [self.transform(img) for img in imgs]
return imgs
def get_random_item_from_cat(self, catId, excludeImgId, shot=1):
"""
Args:
catId (int): json cat id
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
assert(catId in self.json_cat_list)
imgs_choices = self.catalog[catId].copy()
random.shuffle(imgs_choices)
pp = False
valid_img_ids = []
valid_anns = []
for img_id in imgs_choices:
if img_id == excludeImgId:
continue
ann_ids = coco.getAnnIds(imgIds=img_id, catIds=catId, iscrowd=False)
target = coco.loadAnns(ann_ids)
chosen_anno = target[0]
for anno in target:
if anno['area'] > chosen_anno['area']:
chosen_anno = anno
pp = chosen_anno['area'] > self.cfg.INPUT.SUPP_AREA_THRESHOLD
if pp:
valid_img_ids.append(img_id)
valid_anns.append(chosen_anno)
pp=False
if len(valid_img_ids) == shot:
break
img_meta_infos = coco.loadImgs(valid_img_ids)
paths = [img_meta_info['file_name'] for img_meta_info in img_meta_infos]
imgs = [Image.open(os.path.join(self.root, path)).convert('RGB') for path in paths]
coords = [chosen_anno['bbox'] for chosen_anno in valid_anns]
imgs = [imgs[i].crop((coords[i][0], coords[i][1], coords[i][0]+coords[i][2], coords[i][1]+coords[i][3])) for i in range(shot)]
if self.transform is not None:
imgs = [self.transform(img) for img in imgs]
return imgs
def get_close_item_from_cat(self, queryImgId, catId, shot=1):
"""
Args:
catId (int): json cat id
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
if not self.is_train:
# print('Not training, cannot get close item!')
return self.get_random_item_from_cat(catId, queryImgId, shot=self.shot)
try:
annDict = self.close_dict[catId][queryImgId][catId]
except Exception:
print(queryImgId)
print(catId)
print(self.close_dict[catId][queryImgId])
return self.get_random_item_from_cat(catId, queryImgId, shot=self.shot)
annList = []
for len100dict in annDict.values(): # value is a len100 dict
len100list = [(supp_name, sim) for supp_name, sim in len100dict.items()]
len100list.sort(key=lambda x: x[0])
annList.append(len100list)
num_avail_supp = len(annList[0])
reduced_annList = [[annList[0][j][0], 0] for j in range(num_avail_supp)]
for i in range(len(annList)):
for j in range(num_avail_supp):
reduced_annList[j][1] += annList[i][j][1] / len(annList)
reduced_annList.sort(key=lambda x: x[1], reverse=True) # decreasing sim
target_supp_names = [item[0] for item in reduced_annList[:shot]]
catPath = os.path.join(self.supp_root, str(catId))
suppPaths = [os.path.join(catPath, supp_name+'.jpg') for supp_name in target_supp_names]
suppImgs = [Image.open(suppPath).convert('RGB') for suppPath in suppPaths]
if self.transform is not None:
suppImgs = [self.transform(suppImg) for suppImg in suppImgs]
return suppImgs
def get_cats_in_img(self, imgId):
annotationIds = self.coco.getAnnIds(imgIds=imgId, iscrowd=False)
annotations = self.coco.loadAnns(annotationIds)
catIds = [ann['category_id'] for ann in annotations if ann['category_id'] in self.json_cat_list]
return list(set(catIds))
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
cur_cat = self.chosen_cats[idx]
cur_neg_cat = self.chosen_neg_cats[idx]
img_id = self.ids[idx]
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0 and obj['area']>=32*32] # remove small objects
# filter catId in annotation
if self.is_train:
anno = [obj for obj in anno if obj["category_id"] == cur_cat or obj["category_id"] == cur_neg_cat]
else:
anno = [obj for obj in anno if obj["category_id"] == cur_cat ]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = []
for obj in anno:
if obj["category_id"] == cur_cat:
classes.append(1)
else:
classes.append(2)
classes = torch.tensor(classes)
target.add_field("labels", classes)
target = target.clip_to_image(remove_empty=True)
if self.choose_close:
img_supp = self.get_close_item_from_cat(img_id, cur_cat, shot=self.shot)
else:
img_supp = self.get_random_item_from_cat(cur_cat, excludeImgId=img_id, shot=self.shot)
if self.neg_supp:
img_neg_supp = self.get_random_item_from_cat(cur_neg_cat, excludeImgId=img_id, shot=self.shot)
# save for visualization
if self.save_img:
img.save(self.dirName+'{:08d}query.jpg'.format(idx))
for s in range(self.shot):
img_supp[s].save(self.dirName+'{:08d}supp{:02d}.jpg'.format(idx, s))
if self.neg_supp:
img_neg_supp[s].save(self.dirName+'{:08d}neg_supp{:02d}.jpg'.format(idx, s))
if self._transforms is not None:
img, target = self._transforms(img, target)
for s in range(self.shot):
img_supp[s], _ = self._supp_transforms(img_supp[s], target) # dummy target
if self.neg_supp:
img_neg_supp[s], _ = self._supp_transforms(img_neg_supp[s], target) # dummy target
results = {}
results['img'] = img
results['img_supp'] = img_supp
if self.neg_supp:
results['img_neg_supp'] = img_neg_supp
else:
results['img_neg_supp'] = img_supp
assert results['img_neg_supp'] is not None
results['target'] = target
# results['target_supp'] = target_supp
results['idx'] = idx
# ## debug
# img_isnan = torch.sum(torch.isnan(img))
# assert img_isnan == 0, img
# for img_sup in img_supp:
# img_sup_isnan = torch.sum(torch.isnan(img_sup))
# assert img_sup_isnan == 0, img_sup
# for img_sup in results['img_neg_supp']:
# img_sup_isnan = torch.sum(torch.isnan(img_sup))
# assert img_sup_isnan == 0, img_sup
return results
def get_img_info(self, index):
# return query info
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
# print("index {} -> img id {}".format(index, img_id))
img_cur_cat = self.chosen_cats[index]
return img_data, img_cur_cat
|
#!/usr/bin/python
import argparse
import json
from collections import OrderedDict
CHANNEL_PARAMS = ["id", "name", "desc"]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"filepath", help="path to the json list of channels", default="./channels"
)
parser.add_argument(
"-c",
"--create",
nargs="+",
help="create a new channel",
default=None,
required=False,
)
parser.add_argument(
"-d",
"--delete",
help="delete channel by id or name",
default=None,
required=False,
)
return parser.parse_args()
def create_channel(filepath, channel_args):
data = load_json_data(filepath)
print(channel_args)
channel_zip = zip(CHANNEL_PARAMS, list(channel_args))
channel = OrderedDict(channel_zip)
if not data.get("channels"):
data["channels"] = []
data["channels"].append(channel)
write_json_data(filepath, data)
def delete_channel(filepath, identifier):
data = load_json_data(filepath)
channels = data.get("channels", [])
for i, channel in enumerate(channels):
if identifier in [channel["id"], channel["name"]]:
del channels[i]
break
write_json_data(filepath, data)
def load_json_data(filepath):
with open(filepath, "r+") as f:
return json.load(f)
def write_json_data(filepath, data):
with open(filepath, "w+") as f:
json.dump(data, f)
def main():
args = parse_args()
filepath = args.filepath
if args.create:
create_channel(filepath, args.create)
elif args.delete:
delete_channel(filepath, args.delete)
if __name__ == "__main__":
main() |
"""Initialize the Amplifier objects."""
import math
from rfdesigner import const
from rfdesigner.components import Generic, SUPPORTED
AMP_SUPPORTED = [const.ATTR_F3DB, const.ATTR_FBW] + SUPPORTED
class Amplifier(Generic):
"""Representation of a generic amplifier."""
def __init__(self, **kwargs):
"""
Initialize a generic amplifier object.
Extends the Generic class with the following options inputs.
:param f3db: 3dB bandwidth of the amplifier in MHz
:param fbw: cutoff frequency of the amplifier in MHz
"""
super().__init__(**kwargs)
self.f3db = kwargs.get("f3db", math.inf)
self.fbw = kwargs.get("fbw", math.inf)
@property
def supported(self):
"""Return list of supported categories."""
return AMP_SUPPORTED
class LNA(Amplifier):
"""Representation of an LNA."""
class PowerAmp(Amplifier):
"""Representation of a Power Amplifier."""
|
from collections import OrderedDict
import math
from typing import List
import torch
import torch.nn as nn
from torch import nn as nn
def apply_ema(teacher: torch.nn.Module, student: torch.nn.Module, decay: float) -> torch.nn.Module:
t_dict = teacher.state_dict()
s_dict = student.state_dict()
t_dict_new = OrderedDict()
for name, params in s_dict.items():
if name in t_dict:
t_dict_new[name] = decay * t_dict[name] + (1 - decay) * params
teacher.load_state_dict(t_dict_new)
return teacher
class PositionalEncodingFourier(nn.Module):
"""
Positional encoding relying on a fourier kernel matching the one used in the
"Attention is all of Need" paper. The implementation builds on DeTR code
https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
def forward(self, B, H, W):
mask = torch.zeros(B, H, W).bool().to(self.token_projection.weight.device)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=mask.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos
class DropPath(nn.Module):
def __init__(self, p=0.0):
super().__init__()
assert 0. <=p <= 1.
self.p = p
def forward(self, input_vector):
if not self.training or self.p == 0.0:
return input_vector
drop_mask = (torch.rand(input_vector.shape, device=input_vector.device) > self.p).long()
return torch.div(input_vector, 1. - self.p) * drop_mask
class Sequential(nn.Module):
def __init__(self, blocks: List[nn.Module]):
super(Sequential, self).__init__()
self.blocks = blocks
def forward(self, x, *args, **kwargs):
for block in self.blocks:
x = block(x, *args, **kwargs)
return x |
import pytest
import web3
from web3 import Web3
from computable.helpers.transaction import call
def test_deploy(voting):
assert len(voting.account) == 42
assert len(voting.address) == 42
assert voting.account != voting.address
def test_is_candidate(voting):
hash = Web3.sha3(text='nope')
assert call(voting.is_candidate(hash)) == False
def test_candidate_is(voting):
hash = Web3.sha3(text='stillnope')
assert call(voting.candidate_is(hash, 1)) == False
|
def column_index(ref):
n = 0
for ch in ref:
x = ord(ch) - ord('A') + 1
n = 26*n + x
return n
def test():
assert column_index('A') == 1
assert column_index('Z') == 26
assert column_index('AA') == 27
assert column_index('AZ') == 52
def main():
test()
print 'pass'
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengjb
@ date : 2021/3/13
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description :
-------------------------------------------------
"""
import os
# Python 读取一个目录下的所有文件
# recursion 设置为 false 就不循环读取子目录,默认为 True
def get_all_files(path, recursion=True, level=0):
# 用一个数组保存所有的文件,作为返回
all_files = []
# 所有文件夹,第一个字段是次目录的级别
dirList = []
# 所有文件
fileList = []
# 返回一个列表,其中包含在目录条目的名称(google翻译)
files = os.listdir(path)
for f in files:
if os.path.isdir(path + '/' + f):
# 排除隐藏文件夹。因为隐藏文件夹过多
if f[0] == '.':
pass
else:
# 添加非隐藏文件夹
dirList.append(f)
if os.path.isfile(path + '/' + f):
# 添加文件
fileList.append(f)
# 当一个标志使用,文件夹列表第一个级别不打印
if recursion:
# 如果需要递归,就循环递归
for dl in dirList:
# 打印至控制台,不是第一个的目录
print(' ' * level + '-', dl)
# 打印目录下的所有文件夹和文件,目录级别+1
all_files.extend(get_all_files(path + '/' + dl, recursion, level + 1))
for fl in fileList:
# 打印文件
print(' ' * level + '-', fl)
# 加入文件
all_files.append(path + '/' + fl)
return all_files
base_dir = '/home/lab1008/Desktop/datasets/CULane'
val_path = os.path.join(base_dir, 'list/val.txt')
train_path = os.path.join(base_dir, 'list/train.txt')
my_save_dir = os.path.join(base_dir, 'my_list/lane_task')
my_dir = os.path.join(base_dir, 'my_list')
my_files = get_all_files(my_dir, recursion=False)
val_imgs = set()
train_imgs = set()
with open(train_path, 'r') as tp:
lines = tp.readlines()
for line in lines:
line = line.strip('\n')
train_imgs.add(line)
with open(val_path, 'r') as tp:
lines = tp.readlines()
for line in lines:
line = line.strip('\n')
val_imgs.add(line)
for file_path in my_files:
file_name = os.path.basename(file_path)
img_type = file_name.split('_')
# img_type = '_'.join(file_name.split('_')[1:])
print('file_path,file_name,img_type', file_path, file_name, img_type)
with open(file_path, 'r') as tp:
lines = tp.readlines()
for line in lines:
# print(line)
line = line.strip('\n')
img_path, a = line.split()
img_paths = img_path.split('/')
img_path = '/' + os.path.join(*img_paths[-3:])
# print('img_path', img_path)
if img_path in val_imgs:
save_file_name = '_'.join(['val'] + img_type[1:])
save_path = os.path.join(my_save_dir, save_file_name)
with open(save_path, 'a') as wp:
wp.write(img_path + '\n')
if img_path in train_imgs:
# print('train')
save_file_name = '_'.join(['train'] + img_type[1:])
save_path = os.path.join(my_save_dir, save_file_name)
with open(save_path, 'a') as wp:
wp.write(img_path + '\n')
res = get_all_files(my_save_dir)
for r in res:
print(r) |
from pydrake.all import *
from pydairlib.multibody import (addFlatTerrain, makeNameToPositionsMap)
from pydairlib.systems import AddActuationRecieverAndStateSenderLcm
import pydairlib.common
# Load the URDF and the cube
builder = DiagramBuilder()
sim_dt = 1e-4
output_dt = 5e-4
plant, scene_graph = AddMultibodyPlantSceneGraph(builder, sim_dt)
addFlatTerrain(plant=plant, scene_graph=scene_graph, mu_static=1.0,
mu_kinetic=1.0)
# The package addition here seems necessary due to how the URDF is defined
parser = Parser(plant)
parser.package_map().Add("robot_properties_fingers",
"examples/trifinger/robot_properties_fingers")
parser.AddModelFromFile(pydairlib.common.FindResourceOrThrow(
"examples/trifinger/robot_properties_fingers/urdf/trifinger_minimal_collision.urdf"))
parser.AddModelFromFile(pydairlib.common.FindResourceOrThrow(
"examples/trifinger/robot_properties_fingers/cube/cube_v2.urdf"))
# Fix the base of the finger to the world
X_WI = RigidTransform.Identity()
plant.WeldFrames(plant.world_frame(), plant.GetFrameByName("base_link"), X_WI)
plant.Finalize()
drake_lcm = DrakeLcm()
lcm = builder.AddSystem(LcmInterfaceSystem(drake_lcm));
passthrough = AddActuationRecieverAndStateSenderLcm(
builder=builder, plant=plant, lcm=lcm, actuator_channel="TRIFINGER_INPUT",
state_channel="TRIFINGER_OUTPUT", publish_rate=1/output_dt,
publish_efforts=True, actuator_delay=0.0)
# Constuct the simulator and visualizer
DrakeVisualizer.AddToBuilder(builder=builder, scene_graph=scene_graph)
# Data logging [x;u]
nq = plant.num_positions()
nv = plant.num_velocities()
nu = plant.num_actuators()
logger = builder.AddSystem(VectorLogSink(nq + nv + nu, output_dt))
# Multiplex state and input for logger
mux = builder.AddSystem(Multiplexer([nq + nv, nu]))
builder.Connect(plant.get_state_output_port(), mux.get_input_port(0))
builder.Connect(passthrough.get_output_port(), mux.get_input_port(1))
builder.Connect(mux.get_output_port(0), logger.get_input_port(0))
diagram = builder.Build()
simulator = Simulator(diagram)
simulator.set_publish_every_time_step(False);
simulator.set_publish_at_initialization(False);
# Change the real-time rate to above 1 to simulate faster
simulator.set_target_realtime_rate(1)
plant_context = diagram.GetMutableSubsystemContext(
plant, simulator.get_mutable_context())
# Set the initial state
q = np.zeros(nq)
q_map = makeNameToPositionsMap(plant)
q[q_map['finger_base_to_upper_joint_0']] = 0
q[q_map['finger_upper_to_middle_joint_0']] = -1
q[q_map['finger_middle_to_lower_joint_0']] = -1.5
q[q_map['finger_base_to_upper_joint_0']] = 0
q[q_map['finger_upper_to_middle_joint_120']] = -1
q[q_map['finger_middle_to_lower_joint_120']] = -1.5
q[q_map['finger_base_to_upper_joint_240']] = 0
q[q_map['finger_upper_to_middle_joint_240']] = -1
q[q_map['finger_middle_to_lower_joint_240']] = -1.5
q[q_map['base_qw']] = 1
q[q_map['base_qx']] = 0
q[q_map['base_qz']] = 0
q[q_map['base_x']] = 0
q[q_map['base_y']] = 0
q[q_map['base_z']] = .05
plant.SetPositions(plant_context, q)
simulator.Initialize()
# Simulate for 10 seconds
simulator.AdvanceTo(10)
# numpy array of data (nq+nv+nu) x n_time
data = logger.FindLog(simulator.get_context()).data()
|
"""network-related column changes
Revision ID: 23ad5a12e1fb
Revises: 8fed83f88b6b
Create Date: 2017-07-08 07:29:34.535717
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
Session = sessionmaker()
# revision identifiers, used by Alembic.
revision = '23ad5a12e1fb'
down_revision = '8fed83f88b6b'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
session = Session(bind=bind)
session.execute(sa.sql.text(
'ALTER TABLE lease ALTER COLUMN ipv4 TYPE inet USING(ipv4::inet)'))
session.execute(sa.sql.text(
'ALTER TABLE interface ALTER COLUMN static_ipv4 TYPE inet USING(static_ipv4::inet)'))
session.execute(sa.sql.text(
'ALTER TABLE interface ALTER COLUMN reserved_ipv4 TYPE inet USING(reserved_ipv4::inet)'))
session.execute(sa.sql.text(
'ALTER TABLE network ALTER COLUMN subnet TYPE cidr USING(subnet::cidr)'))
session.execute(sa.sql.text(
'ALTER TABLE network ALTER COLUMN reserved_net TYPE cidr USING(reserved_net::cidr)'))
session.execute(sa.sql.text(
'ALTER TABLE network ALTER COLUMN static_net TYPE cidr USING(static_net::cidr)'))
session.execute(sa.sql.text(
'ALTER TABLE "discoveredMAC" ALTER COLUMN mac TYPE macaddr USING(mac::macaddr)'))
session.execute(sa.sql.text(
'ALTER TABLE lease ALTER COLUMN mac TYPE macaddr USING(mac::macaddr)'))
session.execute(sa.sql.text(
'ALTER TABLE interface ALTER COLUMN mac TYPE macaddr USING(mac::macaddr)'))
def downgrade():
bind = op.get_bind()
session = Session(bind=bind)
session.execute(sa.sql.text(
'ALTER TABLE lease ALTER COLUMN ipv4 TYPE varchar USING(ipv4::varchar)'))
session.execute(sa.sql.text(
'ALTER TABLE interface ALTER COLUMN static_ipv4 TYPE varchar USING(static_ipv4::varchar)'))
session.execute(sa.sql.text(
'ALTER TABLE interface ALTER COLUMN reserved_ipv4 TYPE varchar USING(reserved_ipv4::varchar)'))
session.execute(sa.sql.text(
'ALTER TABLE network ALTER COLUMN subnet TYPE varchar USING(subnet::varchar)'))
session.execute(sa.sql.text(
'ALTER TABLE network ALTER COLUMN reserved_net TYPE varchar USING(reserved_net::varchar)'))
session.execute(sa.sql.text(
'ALTER TABLE network ALTER COLUMN static_net TYPE varchar USING(static_net::varchar)'))
session.execute(sa.sql.text(
'ALTER TABLE "discoveredMAC" ALTER COLUMN mac TYPE varchar USING(mac::varchar)'))
session.execute(sa.sql.text(
'ALTER TABLE lease ALTER COLUMN mac TYPE varchar USING(mac::varchar)'))
session.execute(sa.sql.text(
'ALTER TABLE interface ALTER COLUMN mac TYPE varchar USING(mac::varchar)'))
|
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from optin.utils import USER_MODEL
# Create your models here.
BOOL_CHOICES = ((True, 'Yes'), (False, 'No'))
class Category (models.Model):
''' Model describing the different things that the user can opt in or out of
'''
title = models.CharField (
verbose_name = 'Title',
max_length = 255
)
description = models.TextField(
verbose_name = 'Description',
help_text = 'Description displayed to user of the usage of this Category'
)
default = models.BooleanField(
verbose_name = 'Default',
default = False,
help_text = 'Default used for new User Preference',
choices=BOOL_CHOICES
)
def __str__(self):
return self.title
class UserOptin (models.Model):
''' Model describing the user selection for each Category
'''
user = models.ForeignKey(
USER_MODEL,
related_name='optin',
on_delete=models.CASCADE
)
category = models.ForeignKey (
Category,
on_delete=models.CASCADE
)
option = models.NullBooleanField(
verbose_name = 'User Preference',
null = True,
blank = True,
help_text = 'Select to opt in',
choices=BOOL_CHOICES
)
class Meta:
unique_together =['user','category']
def __str__(self):
return str(self.user) + ' ' + str(self.category)
def save(self, *args, **kwargs):
if not self.option:
self.option = self.category.default
super(UserOptin, self).save(*args, **kwargs) |
from __future__ import division
import numpy as np
import sklearn
from sklearn.neighbors import KNeighborsClassifier
from brew.base import Ensemble
from brew.combination.combiner import Combiner
from brew.preprocessing.smote import smote
from .base import PoolGenerator
class SmoteBagging(PoolGenerator):
def __init__(self, base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote', k=5):
# self.b = b
self.k = k
self.n_classifiers = n_classifiers
self.base_classifier = base_classifier
self.ensemble = None
self.combiner = Combiner(rule=combination_rule)
def smote_bootstrap_sample(self, X, y, b, k):
classes = np.unique(y)
count = np.bincount(y) # number of instances of each class
majority_class = count.argmax() # majority clas
majority_count = count.max() # majority class
data = np.empty((0, X.shape[1]))
target = np.empty((0,))
for i in classes:
class_data = X[(y == i), :]
if i == majority_class: # majority class
# regular bootstrap (i.e. 100% sampling rate)
idx = np.random.choice(majority_count, (majority_count,))
data = np.concatenate((data, class_data[idx, :]))
target = np.concatenate(
(target, i * np.ones((majority_count,))))
# print('original class data = {}'.format(class_data.shape))
# print('sampled class data = {}'.format(class_data[idx,:].shape)) # noqa
# print()
else: # minority classes
# bootstrap the class data with defined sampling rate
sample_rate = (majority_count /
class_data.shape[0]) * (b / 100)
idx = np.random.choice(
class_data.shape[0], (int(sample_rate * class_data.shape[0]),)) # noqa
sampled_class_data = class_data[idx, :]
# print('original class data = {}'.format(class_data.shape))
# print('majority_count = {}'.format(majority_count))
# print('class data = {}'.format(class_data.shape))
# print('b = {}'.format(b))
# print('sample rate = {}'.format(sample_rate))
# print('sampled class data = {}'.format(sampled_class_data.shape)) # noqa
# run smote on bootstrapped data to obtain synthetic samples
# ceil to make sure N_smote is a multiple of 100, and the small
# value to avoid a zero
N_smote = int(np.ceil(
(majority_count / sampled_class_data.shape[0]) * (1 - b / 100 + 10e-8)) * 100) # noqa
# print(N_smote)
# print('----------')
# print('smote parameters:')
# print('T : {}'.format(sampled_class_data.shape))
# print('N : {}'.format(N_smote))
synthetic = smote(sampled_class_data, N=N_smote, k=self.k)
# print('synthetic data = {})'.format(synthetic.shape))
# print(synthetic)
# add synthetic samples to sampled class data
n_missing = majority_count - sampled_class_data.shape[0]
idx = np.random.choice(synthetic.shape[0], (n_missing,))
new_class_data = np.concatenate(
(sampled_class_data, synthetic[idx, :]))
# print('new class data = {})'.format(new_class_data.shape))
# print()
data = np.concatenate((data, new_class_data))
target = np.concatenate(
(target, i * np.ones((new_class_data.shape[0],))))
return data, target
def fit(self, X, y):
self.ensemble = Ensemble()
# this parameter should change between [10, 100] with
# increments of 10, for every classifier in the ensemble
b = 10
for i in range(self.n_classifiers):
# print()
# print('classifier : {}'.format(i))
# print('------------------------')
# print('b = {}'.format(b))
data, target = self.smote_bootstrap_sample(
X, y, b=float(b), k=self.k)
# print('data = {}'.format(data.shape))
# print()
classifier = sklearn.base.clone(self.base_classifier)
classifier.fit(data, target)
self.ensemble.add(classifier)
if b >= 100:
b = 10
else:
b += 10
return
def predict(self, X):
out = self.ensemble.output(X)
return self.combiner.combine(out)
class SmoteBaggingNew(SmoteBagging):
def fit(self, X, y):
self.ensemble = Ensemble()
# this parameter should change between [10, 100] with
# increments of 10, for every classifier in the ensemble
b = 10
for i in range(self.n_classifiers):
# print()
# print('classifier : {}'.format(i))
# print('------------------------')
# print('b = {}'.format(b))
data, target = self.smote_bootstrap_sample(
X, y, b=float(b), k=self.k)
# print('data = {}'.format(data.shape))
# print()
classifier = sklearn.base.clone(self.base_classifier)
classifier.fit(data, target)
self.ensemble.add(classifier)
if b >= 100:
b = 10
else:
b += 10
return
def smote_bootstrap_sample(self, X, y, b, k):
count = np.bincount(y) # number of instances of each class
majority_class = count.argmax() # majority class
majority_count = count.max() # majority class
data = np.empty((0, X.shape[1]))
target = np.empty((0,))
class_data = X[(y == majority_class), :]
idx = np.random.choice(majority_count, (majority_count,))
data = np.concatenate((data, class_data[idx, :]))
target = np.concatenate(
(target, majority_class * np.ones((majority_count,))))
minority_class = count.argmin()
minority_count = count.min()
# print majority_count
N_syn = int((majority_count) * (b / 100))
# print N_syn
N_res = majority_count - N_syn
# print N_res
N_syn, N_res = N_res, N_syn
class_data = X[(y == minority_class), :]
idx = np.random.choice(class_data.shape[0], (N_res,))
sampled_min_data = class_data[idx, :]
# print sampled_min_data.shape
if N_syn > 0:
N_smote = np.ceil(N_syn / minority_count) * 100
N_smote = 100 if N_smote < 100 else int(N_smote - N_smote % 100)
synthetic = smote(X[y == minority_class], N=int(N_smote), k=self.k)
idx = np.random.choice(synthetic.shape[0], (N_syn,))
new_class_data = np.concatenate(
(sampled_min_data, synthetic[idx, :]))
data = np.concatenate((data, new_class_data))
target = np.concatenate(
(target, minority_class * np.ones((new_class_data.shape[0],))))
else:
data = np.concatenate((data, sampled_min_data))
target = np.concatenate(
(target, minority_class * np.ones((sampled_min_data.shape[0],)))) # noqa
return data, target
if __name__ == '__main__':
# class 0
X0 = np.random.random((100, 2))
y0 = 0 * np.ones((100,), dtype='int64')
# class 1
X1 = np.random.random((60, 2))
y1 = 1 * np.ones((60,), dtype='int64')
# class 2
X2 = np.random.random((35, 2))
y2 = 2 * np.ones((35,), dtype='int64')
# class 3
X3 = np.random.random((5, 2))
y3 = 3 * np.ones((5,), dtype='int64')
print('DATASET before:')
print('class 0 : {}'.format(X0.shape))
print('class 1 : {}'.format(X1.shape))
print('class 2 : {}'.format(X2.shape))
print('class 3 : {}'.format(X3.shape))
print()
X = np.concatenate((X0, X1, X2, X3))
y = np.concatenate((y0, y1, y2, y3))
knn = KNeighborsClassifier
pool = SmoteBagging(base_classifier=knn, n_classifiers=5, k=3)
pool.fit(X, y)
print(np.sum(pool.predict(X) == y) / y.size)
|
import os.path
from contextlib import contextmanager, suppress
from tempfile import mkstemp
from io import UnsupportedOperation
class SigningService(object):
""" A generic interface for various file signing services. """
registry = {}
def __init_subclass__(cls, service_name, **kwargs):
SigningService.registry[service_name] = cls
cls.service_name = service_name
super().__init_subclass__(**kwargs)
@staticmethod
def for_config(config):
""" Spawns a service instance using the given config. """
return SigningService.registry[config['name']](
**config.get('parameters', {})
)
def __init__(self, **parameters):
""" Signing services are initialised with parameters of their chosing.
Typically those parameters are read from a yaml file::
name: service_name
paramters:
foo: bar
bar: foo
This function may be overwritten by the subclass with concrete
arguemnts. For example::
class MyService(SigningService, name='my_service'):
def __init__(self, user, password):
pass
During initialisiation through onegov.file.integration (and only then),
the current path is set to the path of the config file.
"""
self.parameters = parameters
def sign(self, infile, outfile):
""" Signs the input file and writes it to the given output file.
Arguments
=========
If the input-file exists on disk, its ``file.name`` attribute points
to an existing path.
Sublcasses may add extra parameters to this signing function, though
it is expected that they all have a default value set.
So it is okay to do this::
def sign(self, infile, outfile, foo)
But it would be better to do thiss::
def sign(self, infile, outfile, foo='bar')
Return Value
============
The sign function *must* return a unique request id for each signed
file. This function should be composed of the service name and a
unique identifier. For example: 'my_service/0b86854'. Using this
identifier it should be possible to query the signing service backend
for more information (in case we ever need to investigate).
It is up to the signing service to know what should be part of this
unique identifer. The only thing that can't be part of the identifer
are secrets.
"""
raise NotImplementedError
@contextmanager
def materialise(self, file):
""" Takes the given file-like object and ensures that it exists
somewhere on the disk during the lifetime of the context.
"""
with suppress(UnsupportedOperation):
file.seek(0)
if os.path.exists(file.name):
yield file
else:
fd, path = mkstemp()
with open(path, 'rb+') as output:
for chunk in iter(lambda: file.read(4096), b''):
output.write(chunk)
output.seek(0)
yield output
os.unlink(path)
|
__author__='thiagocastroferreira'
import json
import nlg.structuring as structuring
import nlg.lexicalization as lexicalization
import nlg.reference as reference
from nlg.realization import Realization
class Generation:
def __init__(self, structuring_path, lexicalization_path, references_path, lexicon_path):
self.structuring_grammar = json.load(open(structuring_path))
self.lexicalization_grammar = json.load(open(lexicalization_path))
self.references = json.load(open(references_path))
self.realization = Realization(lexicon_path)
def generate(self, messages, strategy='random'):
# structuring
paragraphs = structuring.generate(messages, self.structuring_grammar, strategy)
# lexicalization
templates, struct = lexicalization.generate(paragraphs, self.lexicalization_grammar, strategy)
# referring expression generation
paragraphs = reference.realize(templates, self.references, strategy)
# surface realization
paragraphs = self.realization.realize(paragraphs)
return struct, templates, paragraphs |
from pytest import fixture, raises
from py61850.types import Boolean
@fixture
def true():
return Boolean(True)
# === DECODE ===
def test_byte_true_min_raw_value():
assert Boolean(b'\x01').raw_value == b'\x01'
def test_byte_true_min_value():
assert Boolean(b'\x01').value is True
def test_byte_true_max_raw_value():
assert Boolean(b'\xFF').raw_value == b'\xFF'
def test_byte_true_max_value():
assert Boolean(b'\xFF').value is True
def test_byte_false_raw_value():
assert Boolean(b'\x00').raw_value == b'\x00'
def test_byte_false_value():
assert Boolean(b'\x00').value is False
# === TRUE ===
def test_true_value(true):
assert true.value is True
def test_true_raw_value(true):
assert true.raw_value != b'\x00'
# === FALSE ===
def test_false_value():
assert Boolean(False).value is False
def test_false_raw_value(true):
assert Boolean(False).raw_value == b'\x00'
# === UNCHANGED VALUES ===
def test_raw_tag(true):
assert true.raw_tag == b'\x83'
def test_tag(true):
assert true.tag == 'Boolean'
def test_raw_length(true):
assert true.raw_length == b'\x01'
def test_length(true):
assert true.length == 1
def test_bytes():
assert bytes(Boolean(False)) == b'\x83\x01\x00'
def test_len(true):
assert len(true) == 3
# === EXCEPTIONS ===
def test_encode_decode():
with raises(TypeError):
Boolean(1)
def test_decode_below():
with raises(ValueError):
Boolean(b'')
def test_decode_above():
with raises(ValueError):
Boolean(b'\x00\x00')
def test_none():
with raises(TypeError):
Boolean(None)
def test_none_empty():
with raises(TypeError):
Boolean()
|
import unittest
import mock
import numpy as np
import pytest
from tensorkit import tensor as T
from tensorkit import *
from tensorkit.distributions import *
from tests.helper import *
class BayesianNetTestCase(TestCase):
def test_construct(self):
# no observation
net = BayesianNet()
self.assertEqual(len(net), 0)
self.assertEqual(list(net), [])
self.assertEqual(dict(net.observed), {})
self.assertEqual(net._original_observed, {})
self.assertEqual(net._stochastic_tensors, {})
with pytest.raises(Exception):
# `net.observed` should be read-only
net.observed['x'] = T.zeros([])
# with observation
normal = UnitNormal([2, 3, 4])
x = T.as_tensor(np.random.randn(3, 4))
y = normal.sample()
net = BayesianNet({'x': x, 'y': y})
self.assertEqual(len(net), 0)
self.assertEqual(list(net), [])
self.assertEqual(list(net.observed), ['x', 'y'])
self.assertIs(net.observed['x'], x)
self.assertIs(net.observed['y'], y.tensor)
self.assertIs(net._original_observed['x'], x)
self.assertIs(net._original_observed['y'], y)
def test_add(self):
x_observed = T.as_tensor(
np.arange(24, dtype=np.float32).reshape([2, 3, 4]))
net = BayesianNet({'x': x_observed})
d = UnitNormal([3, 4])
self.assertNotIn('x', net)
self.assertNotIn('y', net)
# add an observed node
x = net.add('x', d, n_samples=2, group_ndims=1)
self.assertIs(net.get('x'), x)
self.assertIs(net['x'], x)
self.assertIn('x', net)
self.assertListEqual(list(net), ['x'])
self.assertIsInstance(x, StochasticTensor)
self.assertIs(x.distribution, d)
self.assertEqual(x.n_samples, 2)
self.assertEqual(x.group_ndims, 1)
self.assertEqual(x.reparameterized, True)
self.assertIs(x.tensor, x_observed)
self.assertEqual(T.shape(x.tensor), [2, 3, 4])
# add an unobserved node
y = net.add('y', d, group_ndims=1, reparameterized=False)
self.assertIs(net.get('y'), y)
self.assertIs(net['y'], y)
self.assertIn('y', net)
self.assertListEqual(list(net), ['x', 'y'])
self.assertIsInstance(y, StochasticTensor)
self.assertIs(y.distribution, d)
self.assertEqual(y.n_samples, None)
self.assertEqual(y.group_ndims, 1)
self.assertEqual(y.reparameterized, False)
self.assertEqual(T.shape(y.tensor), [3, 4])
# error adding the same variable
with pytest.raises(
ValueError,
match="Stochastic tensor 'x' already exists."):
_ = net.add('x', d)
def test_add_reparameterized_arg(self):
normal = UnitNormal(shape=[2, 3])
# test reparameterized: False
with mock.patch('tensorkit.tensor.stop_grad',
mock.Mock(wraps=T.stop_grad)) as m:
# TODO: switch to some other namespace when refractored
x = normal.sample(5, reparameterized=True)
self.assertTrue(x.reparameterized)
net = BayesianNet({'x': x.tensor})
t = net.add('x', normal, n_samples=5, reparameterized=False)
self.assertFalse(t.reparameterized)
self.assertTrue(m.call_count, 1)
self.assertIs(m.call_args[0][0], x.tensor)
# test inherit reparameterized from `x`
x = normal.sample(5, reparameterized=True)
self.assertTrue(x.reparameterized)
net = BayesianNet({'x': x})
t = net.add('x', normal, n_samples=5)
self.assertEqual(t.reparameterized, x.reparameterized)
assert_allclose(x.tensor, t.tensor)
x = normal.sample(5, reparameterized=False)
self.assertFalse(x.reparameterized)
net = BayesianNet({'x': x})
t = net.add('x', normal, n_samples=5)
self.assertEqual(t.reparameterized, x.reparameterized)
assert_allclose(x.tensor, t.tensor)
# test override reparameterized: True -> False
with mock.patch('tensorkit.tensor.stop_grad',
mock.Mock(wraps=T.stop_grad)) as m:
x = normal.sample(5, reparameterized=True)
self.assertTrue(x.reparameterized)
net = BayesianNet({'x': x})
t = net.add('x', normal, n_samples=5, reparameterized=False)
self.assertFalse(t.reparameterized)
self.assertTrue(m.call_count, 1)
self.assertIs(m.call_args[0][0], x.tensor)
# test cannot override reparameterized: False -> True
x = normal.sample(5, reparameterized=False)
self.assertFalse(x.reparameterized)
net = BayesianNet({'x': x})
with pytest.raises(ValueError,
match="`reparameterized` is True, but the "
"observation for stochastic tensor 'x' is "
"not re-parameterized"):
_ = net.add('x', normal, n_samples=5, reparameterized=True)
def test_outputs(self):
x_observed = T.as_tensor(
np.arange(24, dtype=np.float32).reshape([2, 3, 4]))
net = BayesianNet({'x': x_observed})
normal = UnitNormal([3, 4])
x = net.add('x', normal)
y = net.add('y', normal)
# test single query
x_out = net.output('x')
self.assertIs(x_out, x.tensor)
self.assertIsInstance(x_out, T.Tensor)
assert_equal(x_out, x_observed)
# test multiple query
x_out, y_out = net.outputs(iter(['x', 'y']))
self.assertIs(x_out, x.tensor)
self.assertIs(y_out, y.tensor)
self.assertIsInstance(x_out, T.Tensor)
self.assertIsInstance(y_out, T.Tensor)
assert_equal(x_out, x_observed)
def test_log_prob(self):
x_observed = T.as_tensor(
np.arange(24, dtype=np.float32).reshape([2, 3, 4]))
net = BayesianNet({'x': x_observed})
normal = UnitNormal([3, 4])
x = net.add('x', normal)
y = net.add('y', normal)
# test single query
x_log_prob = net.log_prob('x')
self.assertIsInstance(x_log_prob, T.Tensor)
assert_allclose(x_log_prob, normal.log_prob(x_observed))
# test multiple query
x_log_prob, y_log_prob = net.log_probs(iter(['x', 'y']))
self.assertIsInstance(x_log_prob, T.Tensor)
self.assertIsInstance(y_log_prob, T.Tensor)
assert_allclose(x_log_prob, normal.log_prob(x_observed))
assert_allclose(x_log_prob, normal.log_prob(x.tensor))
assert_allclose(y_log_prob, normal.log_prob(y.tensor))
def test_query_pair(self):
x_observed = T.as_tensor(
np.arange(24, dtype=np.float32).reshape([2, 3, 4]))
net = BayesianNet({'x': x_observed})
normal = UnitNormal([3, 4])
x = net.add('x', normal)
y = net.add('y', normal)
# test single query
x_out, x_log_prob = net.query_pair('x')
self.assertIsInstance(x_out, T.Tensor)
self.assertIsInstance(x_log_prob, T.Tensor)
self.assertIs(x_out, x.tensor)
assert_allclose(x_log_prob, normal.log_prob(x_observed))
# test multiple query
[(x_out, x_log_prob), (y_out, y_log_prob)] = \
net.query_pairs(iter(['x', 'y']))
for o in [x_out, x_log_prob, y_out, y_log_prob]:
self.assertIsInstance(o, T.Tensor)
self.assertIs(x_out, x.tensor)
self.assertIs(y_out, y.tensor)
assert_allclose(x_log_prob, normal.log_prob(x_observed))
assert_allclose(x_log_prob, normal.log_prob(x.tensor))
assert_allclose(y_log_prob, normal.log_prob(y.tensor))
def test_chain(self):
q_net = BayesianNet({'x': T.ones([1])})
q_net.add('z', Normal(q_net.observed['x'], T.float_scalar(1.)))
q_net.add('y', Normal(q_net.observed['x'] * 2, T.float_scalar(2.)))
def net_builder(observed):
net = BayesianNet(observed)
z = net.add('z', UnitNormal([1]))
y = net.add('y', Normal(T.zeros([1]), T.full([1], 2.)))
x = net.add('x', Normal(z.tensor + y.tensor, T.ones([1])))
return net
net_builder = mock.Mock(wraps=net_builder)
# test chain with default parameters
chain = q_net.chain(net_builder)
self.assertEqual(
net_builder.call_args,
(({'y': q_net['y'], 'z': q_net['z']},),)
)
self.assertEqual(chain.latent_names, ['z', 'y'])
self.assertIsNone(chain.latent_axis)
# test chain with latent_names
chain = q_net.chain(net_builder, latent_names=['y'])
self.assertEqual(
net_builder.call_args,
(({'y': q_net['y']},),)
)
self.assertEqual(chain.latent_names, ['y'])
# test chain with latent_axis
chain = q_net.chain(net_builder, latent_axis=-1)
self.assertEqual(chain.latent_axis, [-1])
chain = q_net.chain(net_builder, latent_axis=[-1, 2])
self.assertEqual(chain.latent_axis, [-1, 2])
# test chain with observed
chain = q_net.chain(net_builder, observed=q_net.observed)
self.assertEqual(
net_builder.call_args,
(({'x': q_net.observed['x'], 'y': q_net['y'], 'z': q_net['z']},),)
)
self.assertEqual(chain.latent_names, ['z', 'y'])
|
ssss = "s:3; p:5; m:4"
print(dict(ssss)) |
from aiogram import types
from aiogram.dispatcher.handler import SkipHandler
from aiogram.utils.markdown import hlink, hbold, hide_link
from core.misc import dp, mp, bot
@dp.message_handler(lambda m: m.chat.username == 'ru2chhw' and m.message_id == 3999999)
async def watch_get(m: types.Message, user: dict, chat: dict):
get = await bot.send_message(m.chat.id, text=hbold('🎉 GET /hw/ 4️⃣0️⃣0️⃣0️⃣0️⃣0️⃣0️⃣'))
try:
await bot.pin_chat_message(m.chat.id, get.message_id, disable_notification=False)
except:
pass
await bot.send_message(94026383, text='GET @{} {}'.format(m.chat.username, hlink(str(get.message_id),
'https://t.me/{}/{}'.format(
m.chat.username,
get.message_id))))
SkipHandler()
@dp.message_handler(lambda m: (m.chat.username == 'ru2chhw' and m.message_id == 5999999) or (
m.from_user.id == 94026383 and m.chat.type == 'private' and m.text == '/get6kk'))
async def watch_get_six(m: types.Message, user: dict, chat: dict):
pic_url = 'https://user-images.githubusercontent.com/24507532/97610210-a0decc00-1a25-11eb-9622-7b1e3536dfc0.png'
get = await bot.send_message(m.chat.id, text=hide_link(pic_url) + hbold(
'🎉 GET /hw/ 6️⃣0️⃣0️⃣0️⃣0️⃣0️⃣0️⃣') + '\n\n' + "Забавный факт - если поделить гет на 100, то получится цена RTX 3070!")
try:
await bot.pin_chat_message(m.chat.id, get.message_id, disable_notification=False)
except:
pass
await bot.send_message(94026383, text='GET @{} {}'.format(m.chat.username, hlink(str(get.message_id),
'https://t.me/{}/{}'.format(
m.chat.username,
get.message_id))))
SkipHandler()
|
from setuptools import setup
from setuptools.extension import Extension
setup(
name = "tps",
version = "0.2",
description='Thin plate spline transformation',
author = "Oliver Tonnhofer",
author_email = "olt@omniscale.de",
license='MIT',
packages=['tps', 'tps.test'],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: C++",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering",
],
test_suite = 'tps.test.test_suite',
ext_modules=[
Extension("tps._tps", ["tps/_tps.cpp", "tps/thinplatespline.cpp"], libraries=["stdc++"]),
],
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Wprowadzenie do Informatyki (Moduł 1)
# Temat: 9. Zaprogramuj w języku Python (002) program typu przelicznik, który pozwala na przeliczanie
# stopni celsiusa na stopnie Fahrenheita i na odwrót lub kalkulator walutowy, który pozwala
# na przeliczenie cebulionów na v-dolary). W tym ostatnim przypadku można opcjonalnie
# rozszerzyć funkcjonalność programu o pobieranie danych z zewnętrznych źródeł (np.
# aktualny kurs dolara), ale na początek wystarczy wpisać obecną wartość z tabeli kursów NBP).
# Authors: Jakub Sydor <jakusyd988@student.polsl.pl>, Przemysław Nowak <przenow638@student.polsl.pl>
import re
import os
def fahrenheit_to_celsius(value: float) -> float:
"""
returns round((value - 32) / 1.8, 2)
Convert fahrenheit degrees to celsius degrees. Up to 2 point over zero.
:param value: fahrenheit degrees
:type value: float
:returns: celsius value
:rtype: float
:Example:
>>> a = fahrenheit_to_celsius(0)
>>> print(a)
-17.78
"""
return round((value - 32) / 1.8, 2)
def celsius_to_fahrenheit(value: float) -> float:
"""
returns round((value * 1.8) + 32, 2)
Convert celsius degrees to fahrenheit degrees. Up to 2 point over zero.
:param value: celsius degrees
:type value: float
:returns: fahrenheit value
:rtype: float
:Example:
>>> a = celsius_to_fahrenheit(-17.78)
>>> print(a)
0.0
"""
return round((value * 1.8) + 32, 2)
def format_temp_unit(value: float, unit: str) -> str:
"""
returns formatted degrees value
Format celsius or fahrenheit degrees to nice string output.
:param value: celsius or fahrenheit degrees value
:param unit: 'C' for celsius or 'F' for fahrenheit degrees
:type value: float
:type unit: str
:returns: value with degrees symbol
:rtype: str
:Example:
>>> a = format_temp_unit(-17.78, 'c')
>>> print(a)
"-17.78℃"
"""
if unit.lower() == 'c':
return f"{value}℃"
elif unit.lower() == 'f':
return f"{value}℉"
else:
return str(value)
def get_user_input() -> float:
"""
returns user input
Get user input from console. Check if match float type. Convert to float type and return
:returns: user input
:rtype: float
:Example:
>>> a = get_user_input()
Podaj wartość: 12.3
>>> print(a)
12.3
"""
while 1:
val = input("Podaj wartość: ")
if re.match('^[+-]?([0-9]*[.])?[0-9]+$', val):
return float(val)
else:
print(f"Wartość '{val}', musi byc typu float")
def get_menu_option() -> int:
"""
returns user selection from the menu
Get user input from console. Check if match menu available options.
:returns: user selection
:rtype: int
:Example:
>>> a = get_menu_option()
Podaj wartość: 1
>>> print(a)
1
>>> a = get_menu_option()
Podaj wartość: q
exit
"""
while 1:
val = input("Twój wybór: ").strip().lower()
if val == 'q':
quit(0)
return 0
elif val == '1' or val == '2':
return int(val)
else:
print(f"Wartość '{val}' nie jest opcją menu. Spróbuj raz jeszcze...")
def print_header():
"""
clear console and print header information
:Example:
>>> print_header()
Wprowadzenie do Informatyki (Moduł 1)
by Jakub Sydor, Przemysław Nowak
"""
# Clear console
if os.name == "nt": # Check if windows
os.system("cls")
else: # Else for unix based system
os.system("clear")
print("Wprowadzenie do Informatyki (Moduł 1)")
print(" by Jakub Sydor, Przemysław Nowak")
print("")
def app_menu():
print_header()
print("=== Menu kalkulatora ===")
print(" 1: Celsjusz -> Fahrenheit ")
print(" 2: Fahrenheit -> Celsjusz ")
print(" q: Wyjdź ")
choose = get_menu_option()
base_value = get_user_input()
print_header()
print('\033[1m', end='') # start Bold output
if choose == 1:
print(f"{format_temp_unit(base_value, 'c')} jest to {format_temp_unit(celsius_to_fahrenheit(base_value), 'f')}")
elif choose == 2:
print(f"{format_temp_unit(base_value, 'f')} jest to {format_temp_unit(fahrenheit_to_celsius(base_value), 'c')}")
print('\033[0m', end='') # end Bold output
print()
print("Kliknij enter, aby zacząć od nowa")
input()
return app_menu()
if __name__ == '__main__':
app_menu() # Run app menu
|
from abc import ABC, abstractmethod
import tmdbsimple as tmdb
import config
from api.chat_responses.builder import TextMessage, Message
from api.chat_responses.response import Response
from app.models import MovieGenre, Movie
from app.utils import trim_text_by_sentence
class MovieMessage(Message):
"""
Representation of response message with movie data.
"""
def __init__(self, movies):
self.movies = movies
def get(self):
"""
Retrieves a dictionary representation of response with the
movie data.
"""
movies = []
for movie in self.movies:
movie_data = {
'id': movie['id'],
'title': movie['title'],
'backdrop': Movie.get_backdrop_url(movie['backdrop_path']),
'overview': trim_text_by_sentence(movie['overview'], Movie.DESCRIPTION_MAX_LENGTH)
}
movies.append(movie_data)
return {'type': 'movies', 'content': movies}
class ResponseMovie(Response, ABC):
"""
Represents a response for user request to get movie recommendations.
"""
def __init__(self):
super(ResponseMovie, self).__init__()
tmdb.API_KEY = config.TMDB_API_KEY
@abstractmethod
def find_movies(self, request):
"""
Finds movie recommendations based on provided request.
"""
raise NotImplementedError
def get(self, request):
"""
Returns a JSON representation of recommendation messages for
given request.
:param request: a dictionary representing user request
"""
requested_genres = MovieGenre.get_genre_ids_by_names(request['genres'])
request['genres'] = requested_genres
found_movies = self.find_movies(request)
found_movies = found_movies[:9]
text_message = TextMessage('Here are my picks for you')
data_message = MovieMessage(found_movies)
self.response_builder.add(text_message)
self.response_builder.add(data_message)
return self.response_builder.get_response()
class ResponseRecommendMovie(ResponseMovie):
"""
Represents a response for user request to get a specific
movie recommendation.
"""
def find_movies(self, request):
args = {}
if 'genres' in request and request['genres']:
args['with_genres'] = ','.join(str(genre) for genre in request['genres'])
if 'dates' in request and request['dates']:
args['release_date_gte'] = request['dates']['from']
args['release_date_lte'] = request['dates']['to']
return tmdb.Discover().movie(**args)['results']
class ResponsePopularMovie(ResponseMovie):
"""
Represents a response for user request to get popular movies
recommendation.
"""
def find_movies(self, request):
return tmdb.Movies().popular()['results']
|
# Leo colorizer control file for omnimark mode.
# This file is in the public domain.
# Properties for omnimark mode.
properties = {
"indentNextLines": "\\s*((PROCESS|MARKUP|EXTERNAL|DOCUMENT|DTD|ELEMENT|FIND|TRANSLATE)((-|\\s).*|\\s*)|(DO|ELSE|REPEAT|MATCH|CASE|USING|GROUP|DEFINE|MACRO)(\\s+.*|\\s*))",
"lineComment": ";",
"noWordSep": ".-_",
}
# Attributes dict for omnimark_main ruleset.
omnimark_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "%",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for omnimark mode.
attributesDictDict = {
"omnimark_main": omnimark_main_attributes_dict,
}
# Keywords dict for omnimark_main ruleset.
omnimark_main_keywords_dict = {
"#!": "keyword2",
"#additional-info": "keyword2",
"#appinfo": "keyword2",
"#args": "keyword2",
"#capacity": "keyword2",
"#charset": "keyword2",
"#class": "keyword2",
"#command-line-names": "keyword2",
"#console": "keyword2",
"#current-input": "keyword2",
"#current-output": "keyword2",
"#data": "keyword2",
"#doctype": "keyword2",
"#document": "keyword2",
"#dtd": "keyword2",
"#empty": "keyword2",
"#error": "keyword2",
"#error-code": "keyword2",
"#external-exception": "keyword2",
"#file-name": "keyword2",
"#first": "keyword2",
"#group": "keyword2",
"#implied": "keyword2",
"#item": "keyword2",
"#language-version": "keyword2",
"#last": "keyword2",
"#libpath": "keyword2",
"#library": "keyword2",
"#libvalue": "keyword2",
"#line-number": "keyword2",
"#main-input": "keyword2",
"#main-output": "keyword2",
"#markup-error-count": "keyword2",
"#markup-error-total": "keyword2",
"#markup-parser": "keyword2",
"#markup-warning-count": "keyword2",
"#markup-warning-total": "keyword2",
"#message": "keyword2",
"#none": "keyword2",
"#output": "keyword2",
"#platform-info": "keyword2",
"#process-input": "keyword2",
"#process-output": "keyword2",
"#program-error": "keyword2",
"#recovery-info": "keyword2",
"#sgml": "keyword2",
"#sgml-error-count": "keyword2",
"#sgml-error-total": "keyword2",
"#sgml-warning-count": "keyword2",
"#sgml-warning-total": "keyword2",
"#suppress": "keyword2",
"#syntax": "keyword2",
"abs": "operator",
"activate": "keyword1",
"active": "operator",
"after": "keyword2",
"again": "keyword1",
"always": "keyword1",
"ancestor": "keyword2",
"and": "operator",
"another": "keyword2",
"any": "keyword3",
"any-text": "keyword3",
"arg": "keyword2",
"as": "operator",
"assert": "keyword1",
"attached": "keyword2",
"attribute": "keyword2",
"attributes": "keyword2",
"base": "operator",
"bcd": "keyword2",
"before": "keyword2",
"binary": "operator",
"binary-input": "keyword2",
"binary-mode": "keyword2",
"binary-output": "keyword2",
"blank": "keyword3",
"break-width": "keyword2",
"buffer": "keyword2",
"buffered": "keyword2",
"by": "keyword2",
"case": "keyword1",
"catch": "keyword1",
"catchable": "keyword2",
"cdata": "keyword3",
"cdata-entity": "keyword2",
"ceiling": "operator",
"children": "keyword2",
"clear": "keyword1",
"close": "keyword1",
"closed": "keyword2",
"compiled-date": "operator",
"complement": "operator",
"conref": "keyword2",
"content": "keyword2",
"content-end": "keyword3",
"content-start": "keyword3",
"context-translate": "keyword2",
"copy": "keyword1",
"copy-clear": "keyword1",
"counter": "keyword2",
"created": "keyword2",
"creating": "operator",
"creator": "operator",
"cross-translate": "keyword2",
"current": "keyword2",
"data-attribute": "keyword2",
"data-attributes": "keyword2",
"data-content": "keyword2",
"data-letters": "keyword2",
"date": "operator",
"deactivate": "keyword1",
"declare": "keyword2",
"declared-conref": "keyword2",
"declared-current": "keyword2",
"declared-defaulted": "keyword2",
"declared-fixed": "keyword2",
"declared-implied": "keyword2",
"declared-required": "keyword2",
"decrement": "keyword1",
"default-entity": "keyword2",
"defaulted": "keyword2",
"defaulting": "keyword2",
"define": "keyword2",
"delimiter": "keyword2",
"difference": "operator",
"digit": "keyword3",
"directory": "keyword2",
"discard": "keyword1",
"divide": "operator",
"do": "keyword1",
"doctype": "keyword2",
"document": "keyword2",
"document-element": "keyword2",
"document-end": "keyword2",
"document-start": "keyword2",
"domain-free": "keyword2",
"done": "keyword1",
"down-translate": "keyword2",
"drop": "operator",
"dtd": "keyword2",
"dtd-end": "keyword2",
"dtd-start": "keyword2",
"dtds": "keyword2",
"element": "keyword2",
"elements": "keyword2",
"else": "keyword1",
"elsewhere": "keyword2",
"empty": "keyword2",
"entities": "keyword2",
"entity": "keyword2",
"epilog-start": "keyword2",
"equal": "operator",
"equals": "operator",
"escape": "keyword2",
"except": "keyword1",
"exists": "operator",
"exit": "keyword1",
"external": "keyword2",
"external-data-entity": "keyword2",
"external-entity": "keyword2",
"external-function": "keyword2",
"external-output-function": "keyword2",
"external-text-entity": "keyword2",
"false": "keyword2",
"file": "operator",
"find": "keyword2",
"find-end": "keyword2",
"find-start": "keyword2",
"floor": "operator",
"flush": "keyword1",
"for": "keyword1",
"format": "keyword1",
"function": "keyword2",
"function-library": "keyword2",
"general": "keyword2",
"global": "keyword2",
"greater-equal": "operator",
"greater-than": "operator",
"group": "keyword2",
"groups": "keyword2",
"halt": "keyword1",
"halt-everything": "keyword1",
"has": "operator",
"hasnt": "operator",
"heralded-names": "keyword2",
"id": "keyword2",
"id-checking": "keyword2",
"idref": "keyword2",
"idrefs": "keyword2",
"ignore": "keyword2",
"implied": "keyword2",
"in": "keyword2",
"in-library": "keyword2",
"include": "keyword2",
"include-end": "keyword2",
"include-guard": "keyword2",
"include-start": "keyword2",
"inclusion": "keyword2",
"increment": "keyword1",
"initial": "keyword2",
"initial-size": "keyword2",
"input": "keyword1",
"insertion-break": "keyword2",
"instance": "keyword2",
"integer": "keyword2",
"internal": "keyword2",
"invalid-data": "keyword2",
"is": "operator",
"isnt": "operator",
"item": "operator",
"join": "keyword1",
"key": "operator",
"keyed": "keyword2",
"last": "operator",
"lastmost": "operator",
"lc": "keyword3",
"length": "operator",
"less-equal": "operator",
"less-than": "operator",
"letter": "keyword3",
"letters": "keyword2",
"library": "keyword2",
"line-end": "keyword3",
"line-start": "keyword3",
"literal": "operator",
"ln": "operator",
"local": "keyword2",
"log": "keyword1",
"log10": "operator",
"lookahead": "operator",
"macro": "keyword2",
"macro-end": "keyword2",
"marked-section": "keyword2",
"markup-comment": "keyword2",
"markup-error": "keyword2",
"markup-parser": "keyword2",
"markup-wrapper": "keyword2",
"mask": "operator",
"match": "keyword1",
"matches": "operator",
"minus": "operator",
"mixed": "keyword2",
"modifiable": "keyword2",
"modulo": "operator",
"name": "operator",
"name-letters": "keyword2",
"namecase": "keyword2",
"named": "keyword2",
"names": "keyword2",
"ndata-entity": "keyword2",
"negate": "operator",
"nested-referents": "keyword2",
"new": "keyword1",
"newline": "keyword2",
"next": "keyword1",
"nmtoken": "keyword2",
"nmtokens": "keyword2",
"no": "keyword2",
"no-default-io": "keyword2",
"non-cdata": "keyword3",
"non-implied": "keyword2",
"non-sdata": "keyword3",
"not": "operator",
"not-reached": "keyword1",
"notation": "keyword2",
"null": "keyword3",
"number": "keyword2",
"number-of": "operator",
"numbers": "keyword2",
"nutoken": "keyword2",
"nutokens": "keyword2",
"occurrence": "operator",
"of": "operator",
"opaque": "keyword2",
"open": "keyword1",
"optional": "keyword2",
"or": "operator",
"output": "keyword1",
"output-to": "keyword1",
"over": "keyword1",
"parameter": "keyword2",
"parent": "keyword2",
"past": "keyword2",
"pattern": "keyword2",
"pcdata": "keyword3",
"plus": "keyword2",
"preparent": "keyword2",
"previous": "keyword2",
"process": "keyword2",
"process-end": "keyword2",
"process-start": "keyword2",
"processing-instruction": "keyword2",
"prolog-end": "keyword2",
"prolog-in-error": "keyword2",
"proper": "keyword2",
"public": "keyword2",
"put": "keyword1",
"rcdata": "keyword3",
"read-only": "keyword2",
"readable": "keyword2",
"referent": "keyword2",
"referents": "keyword2",
"referents-allowed": "keyword2",
"referents-displayed": "keyword2",
"referents-not-allowed": "keyword2",
"remainder": "keyword2",
"remove": "keyword1",
"reopen": "keyword1",
"repeat": "keyword1",
"repeated": "keyword2",
"replacement-break": "keyword2",
"reset": "keyword1",
"rethrow": "keyword1",
"return": "keyword1",
"reversed": "keyword2",
"round": "operator",
"save": "keyword1",
"save-clear": "keyword1",
"scan": "keyword1",
"sdata": "keyword3",
"sdata-entity": "keyword2",
"select": "keyword1",
"set": "keyword1",
"sgml": "keyword1",
"sgml-comment": "keyword2",
"sgml-declaration-end": "keyword2",
"sgml-dtd": "keyword2",
"sgml-dtds": "keyword2",
"sgml-error": "keyword2",
"sgml-in": "keyword1",
"sgml-out": "keyword1",
"sgml-parse": "keyword1",
"sgml-parser": "keyword1",
"shift": "operator",
"silent-referent": "keyword2",
"size": "keyword2",
"skip": "keyword1",
"source": "keyword2",
"space": "keyword3",
"specified": "keyword2",
"sqrt": "operator",
"status": "operator",
"stream": "keyword2",
"subdoc-entity": "keyword2",
"subdocument": "keyword2",
"subdocuments": "keyword2",
"subelement": "keyword2",
"submit": "keyword1",
"succeed": "keyword1",
"suppress": "keyword1",
"switch": "keyword2",
"symbol": "keyword2",
"system": "keyword2",
"system-call": "keyword1",
"take": "operator",
"test-system": "keyword1",
"text": "keyword3",
"text-mode": "keyword2",
"this": "operator",
"throw": "keyword1",
"thrown": "keyword2",
"times": "keyword2",
"to": "keyword1",
"token": "keyword2",
"translate": "keyword2",
"true": "keyword2",
"truncate": "operator",
"uc": "keyword3",
"ul": "operator",
"unanchored": "operator",
"unattached": "keyword2",
"unbuffered": "keyword2",
"union": "operator",
"unless": "keyword1",
"up-translate": "keyword2",
"usemap": "operator",
"using": "keyword1",
"value": "keyword2",
"value-end": "keyword3",
"value-start": "keyword3",
"valued": "keyword2",
"variable": "keyword2",
"when": "keyword1",
"white-space": "keyword3",
"with": "operator",
"word-end": "keyword3",
"word-start": "keyword3",
"writable": "keyword2",
"xml": "keyword2",
"xml-dtd": "keyword2",
"xml-dtds": "keyword2",
"xml-parse": "keyword1",
"yes": "keyword2",
}
# Dictionary of keywords dictionaries for omnimark mode.
keywordsDictDict = {
"omnimark_main": omnimark_main_keywords_dict,
}
# Rules for omnimark_main ruleset.
def omnimark_rule0(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="#!",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def omnimark_rule1(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq=";",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def omnimark_rule2(colorer, s, i):
return colorer.match_span_regexp(s, i, kind="invalid", begin="\"((?!$)[^\"])*$", end="$",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def omnimark_rule3(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def omnimark_rule4(colorer, s, i):
return colorer.match_span_regexp(s, i, kind="invalid", begin="'((?!$)[^'])*$", end="$",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def omnimark_rule5(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def omnimark_rule6(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="&",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule7(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="|",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule8(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="+",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule9(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule10(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="/",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule11(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule12(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule13(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="~",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="@",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule15(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="$",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule16(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="%",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule17(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="^",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule18(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule19(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="?",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule20(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="!",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def omnimark_rule21(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for omnimark_main ruleset.
rulesDict1 = {
"!": [omnimark_rule20,omnimark_rule21,],
"\"": [omnimark_rule2,omnimark_rule3,],
"#": [omnimark_rule0,omnimark_rule21,],
"$": [omnimark_rule15,],
"%": [omnimark_rule16,],
"&": [omnimark_rule6,],
"'": [omnimark_rule4,omnimark_rule5,],
"*": [omnimark_rule18,],
"+": [omnimark_rule8,],
"-": [omnimark_rule21,],
"/": [omnimark_rule10,],
"0": [omnimark_rule21,],
"1": [omnimark_rule21,],
"2": [omnimark_rule21,],
"3": [omnimark_rule21,],
"4": [omnimark_rule21,],
"5": [omnimark_rule21,],
"6": [omnimark_rule21,],
"7": [omnimark_rule21,],
"8": [omnimark_rule21,],
"9": [omnimark_rule21,],
";": [omnimark_rule1,],
"<": [omnimark_rule11,],
"=": [omnimark_rule9,],
">": [omnimark_rule12,],
"?": [omnimark_rule19,],
"@": [omnimark_rule14,omnimark_rule21,],
"A": [omnimark_rule21,],
"B": [omnimark_rule21,],
"C": [omnimark_rule21,],
"D": [omnimark_rule21,],
"E": [omnimark_rule21,],
"F": [omnimark_rule21,],
"G": [omnimark_rule21,],
"H": [omnimark_rule21,],
"I": [omnimark_rule21,],
"J": [omnimark_rule21,],
"K": [omnimark_rule21,],
"L": [omnimark_rule21,],
"M": [omnimark_rule21,],
"N": [omnimark_rule21,],
"O": [omnimark_rule21,],
"P": [omnimark_rule21,],
"Q": [omnimark_rule21,],
"R": [omnimark_rule21,],
"S": [omnimark_rule21,],
"T": [omnimark_rule21,],
"U": [omnimark_rule21,],
"V": [omnimark_rule21,],
"W": [omnimark_rule21,],
"X": [omnimark_rule21,],
"Y": [omnimark_rule21,],
"Z": [omnimark_rule21,],
"^": [omnimark_rule17,],
"a": [omnimark_rule21,],
"b": [omnimark_rule21,],
"c": [omnimark_rule21,],
"d": [omnimark_rule21,],
"e": [omnimark_rule21,],
"f": [omnimark_rule21,],
"g": [omnimark_rule21,],
"h": [omnimark_rule21,],
"i": [omnimark_rule21,],
"j": [omnimark_rule21,],
"k": [omnimark_rule21,],
"l": [omnimark_rule21,],
"m": [omnimark_rule21,],
"n": [omnimark_rule21,],
"o": [omnimark_rule21,],
"p": [omnimark_rule21,],
"q": [omnimark_rule21,],
"r": [omnimark_rule21,],
"s": [omnimark_rule21,],
"t": [omnimark_rule21,],
"u": [omnimark_rule21,],
"v": [omnimark_rule21,],
"w": [omnimark_rule21,],
"x": [omnimark_rule21,],
"y": [omnimark_rule21,],
"z": [omnimark_rule21,],
"|": [omnimark_rule7,],
"~": [omnimark_rule13,],
}
# x.rulesDictDict for omnimark mode.
rulesDictDict = {
"omnimark_main": rulesDict1,
}
# Import dict for omnimark mode.
importDict = {}
|
# -*- coding: utf-8 -*-
import collections
_fields = ['text', 'attachments', 'channel', 'username', 'icon_emoji']
class SlackMessage(collections.namedtuple('SlackMessage', _fields)):
def __new__(cls, text, attachments=None, channel=None, username=None, icon_emoji=None):
return super(SlackMessage, cls).__new__(
cls, text, attachments, channel, username, icon_emoji)
def add_mention(self, to):
"""Add "@to :" to text and returns copied object"""
text = '@{}: {}'.format(to, self.text)
return self._replace(text = text)
def replace_text(self, text):
"""Replaces text and returns copied object"""
return self._replace(text=text)
_attachment_fields = [
'fallback', 'color', 'pretext',
'author_name', 'author_link', 'author_icon',
'title', 'title_link', 'text', 'fields', 'image_url', 'thumb_url', 'footer', 'footer_icon', 'ts']
class Attachment(collections.namedtuple('Attachment', _attachment_fields)):
def __new__(cls, **kwargs):
default = {f: None for f in _attachment_fields}
default.update(kwargs)
return super(cls, Attachment).__new__(
cls, **default)
|
#!/usr/bin/env python3
from netmiko import ConnectHandler
import random
import json
hp_procurve = {
'device_type': 'hp_procurve',
'ip': '10.0.1.11',
'username': 'manager',
'password': 'Aruba123',
'port': 22,
'verbose': False,
}
aruba_os = {
'device_type': 'aruba_os',
'ip': '10.0.1.5',
'username': 'admin',
'password': 'Aruba123',
'port': 22,
'verbose': False,
}
def hpswitch():
net_connect = ConnectHandler(**hp_procurve)
output = net_connect.send_command("show arp")
print(output)
name = None
for i, line in enumerate(output):
r = random.randint(0,i)
if not r and line.strip():
name = line.split()[0]
print(name)
def show_arp():
net_connect = ConnectHandler(**aruba_os)
output = net_connect.send_command("show arp")
#print(output)
output = output.split("\n",3)[3];
output = output[:output.rfind('\n')]
data = output.splitlines(True)
for line in data:
columns = line.split()
arp = {}
arp['ip'] = columns[1]
arp['mac'] = columns[2]
arp['int'] = columns[3]
print(json.dumps(arp))
def show_dhcp_stats():
#
net_connect = ConnectHandler(**aruba_os)
output = net_connect.send_command("show ip dhcp statistics")
output = output.split("\n",7)[7];
output = output[:-2]
#print(output)
data = output.splitlines(True)
for line in data:
if (line.startswith('C') or line.startswith('Total') or line.startswith('NOTE') or line.isspace()):
pass
else:
# print(line)
dhcp = {}
columns = line.split()
dhcp['network'] = columns[0]
dhcp['type'] = columns[1]
dhcp['active'] = columns[2]
dhcp['total'] = columns[3]
dhcp['used'] = columns[4]
dhcp['free'] = columns[5]
print(json.dumps(dhcp))
def get_user_ap_diff():
net_connect = ConnectHandler(**aruba_os)
show_ap_assoc= "show ap association | include Num"
show_usertable="show user-table | include Entries"
ap_assoc_num_clients = net_connect.send_command(show_ap_assoc)[:-1]
user_table_count = net_connect.send_command(show_usertable)
user_table_count = user_table_count[1:-1]
print("show ap association command: ", show_ap_assoc)
# print("response: ", ap_assoc_num_clients)
print("user-table command: ", show_usertable)
# print("response: ", user_table_count)
usertable = user_table_count.split(":")
#
# May need to use usertable[0] instead of 1 based on n/n clients. Do you want connected or total?
#
usertable_count = usertable[1].split("/")
print("Number of users in table: ", usertable_count[1])
showap_count = ap_assoc_num_clients.split(":")
print("Number of client associated to APs: ", showap_count[1])
if __name__ == "__main__":
get_user_ap_diff()
|
import numpy as np
from interpreter.definitions.tokens import TokenType, Token
########### STYLES ###########
# Standard style
style_1 = {TokenType.TRUE: "T", TokenType.FALSE: "F", TokenType.PROPVAR: lambda x: x.value, TokenType.NEGATION: "~", TokenType.DISJUNCTION: "|",
TokenType.CONJUNCTION: "&", TokenType.IMPLICATION: ">", TokenType.CONVERSEIMPLICATION: "<",
TokenType.BICONDITIONAL: "<>", TokenType.LPAREN: "(", TokenType.RPAREN: ")"}
style_1_ltx = {TokenType.TRUE: "T", TokenType.FALSE: "F", TokenType.PROPVAR: lambda x: x.value, TokenType.NEGATION: "$\\neg$", TokenType.DISJUNCTION: "$\\lor$",
TokenType.CONJUNCTION: "$\\land$", TokenType.IMPLICATION: "$\\rightarrow$", TokenType.CONVERSEIMPLICATION: "$\leftarrow$",
TokenType.BICONDITIONAL: "$\\leftrightarrow$", TokenType.LPAREN: "$($", TokenType.RPAREN: "$)$"}
style_dict = {"style_1": style_1, "style_1_ltx": style_1_ltx}
########### STYLE CONVERTERS ###########
# Convert token styles
def style_converter_token(inp:Token, style_str:str="style_1"):
"""
Args:
inp (Token): the token who's style is to be converted
style_str (str): the style
Returns (str):
Token, but in the style of 'style_str'
"""
style = style_dict[style_str]
t_type = inp.type
converted = style[t_type]
if type(converted) == str:
return converted
elif callable(converted):
return converted(inp)
else:
print(f"Error, could not style '{inp}' using style: '{style_str}'")
# Convert truth table styles
def style_converter_truth_table(truth_table:list, style_str:str="style_1"):
n_truth_table = truth_table
def handle_header(elem:(list or str), depth=0):
ret_str = ""
if isinstance(elem, (list, tuple)):
if depth > 0: ret_str += "(";
for el in elem:
ret_str += handle_header(el, depth=(depth + 1))
if depth > 0: ret_str += ")"
else:
ret_str += style_converter_token(inp=elem, style_str=style_str)
return ret_str
for i, column in enumerate(truth_table):
for j, elem in enumerate(column):
if type(elem) == list:
header = handle_header(elem)
# header += style_converter_token(inp=f, style_str=style_str)
n_truth_table[i][j] = header
else:
n_truth_table[i][j] = style_converter_token(inp=elem, style_str=style_str)
print(f"Truth table: \n{truth_table}")
return n_truth_table
# Convert tableaux style
|
from tweet.models import Tweet
from django.shortcuts import render, HttpResponseRedirect
from .forms import TweetForm
from django.contrib.auth.decorators import login_required
from twitteruser.views import user_detail, following
from notification.models import notification
from notification.views import notifications
# Create your views here.
@ login_required
def add_tweet(request):
if request.method == 'POST':
form = TweetForm(request.POST)
if form.is_valid():
data = form.cleaned_data
Tweet.objects.create(
text=data['text'],
user=request.user
)
return HttpResponseRedirect('/')
form = TweetForm()
return render(request, 'tweet.html', {'form': form})
def tweet_detail(request, post_id):
tweet = Tweet.objects.get(id=post_id)
return render(request, 'detail/tweet.html', {'tweet': tweet})
def users_tweets(request, user_id):
user = user_detail(request, user_id)
follow = following(request, user_id)
users_tweets = Tweet.objects.filter(user=user_id)
num_tweets = len(users_tweets)
return render(request, 'detail/user.html', {
'tweets': users_tweets,
'num': num_tweets,
'id': user_id,
'requser': user,
'following': follow
})
def newsfeed(request):
print(notifications)
notify = notification()
notify = notify.notifications
tweets = Tweet.objects.order_by('id')
ordered_tweets = reversed(tweets)
return render(request, 'index.html', {
'news': ordered_tweets,
'notifications': notify
})
|
"""
Functions for rasterising based on the image formed from the squared distance transform of the object being rasterised.
"""
import torch
# from torch.types import Number
def compute_nearest_neighbour_sigma(grid: torch.Tensor) -> float:
"""Compute the sigma2 value required for `nearest_neighbour` to rasterise objects exactly one pixel wide.
This version computes sigma2 such that any pixel the line passes through will be selected.
Args:
grid: the rasterisation grid coordinates. For 1D data this will be shape [W, 1]; for 2D [H, W, 2],
3D [D, H, W, 3].
Returns:
the value of sigma2 to use in `nearest_neighbour`
"""
dim = grid.shape[-1]
delta2 = (grid[(1,)*dim] - grid[(0,)*dim]) ** 2
return delta2.sum().sqrt() / 2
def compute_nearest_neighbour_sigma_bres(grid: torch.Tensor) -> float:
"""Compute the sigma2 value required for `nearest_neighbour` to rasterise objects exactly one pixel wide.
This version computes sigma2 such that the resultant raster looks like what you would get using Bresenham's
algorithm (pixels for which the line just passes [at an angle] are not selected).
Args:
grid: the rasterisation grid coordinates. For 1D data this will be shape [W, 1]; for 2D [H, W, 2],
3D [D, H, W, 3]. The grid sampling is assumed to be square!
Returns:
the value of sigma2 to use in `nearest_neighbour`
"""
dim = grid.shape[-1]
delta = (grid[(1,)*dim] - grid[(0,)*dim]).abs() / 2
return delta[0]
def nearest_neighbour(dt2: torch.Tensor, sigma2: float = 1) -> torch.Tensor:
"""Nearest-neighbour rasterisation function.
Sets pixels in the distance transform to 1 if they are less than equal to sigma**2
and zero otherwise. Note this doesn't have usable gradients!
Args:
dt2: the squared distance transform
sigma2: the threshold distance (Default value = 1)
Returns:
the rasterised image
"""
return (dt2 <= sigma2 ** 2) * 1.
def sigmoid(dt2: torch.Tensor, sigma2: float) -> torch.Tensor:
"""Sigmoidal rasterisation function.
Computes $$2 \\times \\sigmoid(-dt^2 / \\sigma)$$ giving values near 1.0
for points in the distance transform with near-zero distance, and falling
off as distance increases.
Args:
dt2: the squared distance transform
sigma2: the rate of fall-off. Larger values result in greater line width,
but also larger gradient flow across the raster
Returns:
the rasterised image
"""
return torch.sigmoid(-1 * dt2 / sigma2) * 2.
def exp(dt2: torch.Tensor, sigma2: float) -> torch.Tensor:
"""Exponentiated rasterisation function.
Computes $$\\exp(-dt^2 / \\sigma)$$ giving values near 1.0 for points in
the distance transform with near-zero distance, and falling off as distance
increases.
Args:
dt2: the squared distance transform
sigma2: the rate of fall-off. Larger values result in greater line width,
but also larger gradient flow across the raster
Returns:
the rasterised image
"""
return torch.exp(-1 * dt2 / sigma2)
|
from __future__ import division, print_function, absolute_import
'''
Author : Lyubimov, A.Y.
Created : 04/02/2019
Last Changed: 12/02/2019
Description : IOTA GUI controls for PHIL-formatted settings
'''
import os
import sys
import wx
from wx.lib.scrolledpanel import ScrolledPanel
from wxtbx import bitmaps
from libtbx.utils import Sorry
from libtbx import Auto
from iota.components import gui
from iota.components.gui import base
from iota.components.gui import controls as ct
from iota.components.iota_utils import InputFinder, makenone
from iota.components.gui.dialogs import DirView
ginp = InputFinder()
def get_test_phil():
from iotbx.phil import parse
test_phil = """
string_definition = None
.help = A string definition in the main scope
.optional = False
.type = str
.alias = String Definition
path_to_folder = $PWD
.help = A path to a folder in the main scope
.optional = False
.type = path
.multiple = False
.alias = Main Folder
.style = path:folder
input_file = None
.help = A path to a file (multiple)
.optional = True
.type = path
.multiple = True
.alias = Input File
.style = input_list
multi_string = None
.help = A string definition that is multiple
.type = str
.multiple = True
.alias = Multi-string
child_scope_alpha
.help = The first child scope
.alias = Child Alpha
{
flag_on = False
.help = toggle this scope on and off
.type = bool
.alias = Use Child Scope Alpha
.style = scope_switch
float_1_def = None
.help = A float definition in the child scope alpha
.type = float
.alias = First Float
.optional = True
integer_2_def = None
.help = An integer definition in the child scope alpha
.type = int
.alias = First Integer
.optional = False
bool_3_def = True
.help = A boolean definition in the child scope alpha
.type = bool
.alias = True or False in Alpha
choice_4_def = one two three *four five
.help = A choice definition in the child scope alpha
.type = choice
.alias = Number of Items
}
child_scope_beta
.help = The second child scope
.alias = Child Beta
{
string_1_def = None
.help = A string definition in the child scope beta
.type = str
.alias = String Beta
space_group_def = None
.help = A space group definition in the child scope beta
.type = space_group
.alias = Space Group
unit_cell_def = None
.help = A unit cell definition in the child scope beta
.type = unit_cell
.alias = Unit Cell
grandchild_scope_beta
.help = a multiple grandchild scope in child scope beta
.alias = Grandchild Beta
.multiple = True
{
flag_on = False
.help = a flag_on checkbox (should turn whole scope on/off)
.type = bool
.alias = Activate Grandchild Beta
.style = scope_switch
string_1b_beta = None
.help = A string in grandchild beta
.type = str
.alias = String 1B Beta
float_2b_beta = None
.help = A float in grandchild beta
.type = float
.optional = False
.alias = Float 1B Beta
choice_3b_beta = one *several many
.help = A choice in grandchild beta
.type = choice
.optional = True
.alias = Choice 3B Beta
}
}
child_scope_gamma
.help = Third child scope (test multiple panel)
.multiple = False
.alias = Child Gamma
{
apply_gamma = False
.help = Boolean definition for child scope gamma
.type = bool
.alias = Apply
string_1_gamma = one some many
.help = A string definition in child scope gamma
.type = choice
.alias = String One Gamma
string_2_gamma = None
.help = A string definition in child scope gamma
.type = str
.alias = String Two Gamma
string_3_gamma = None
.help = A string definition in child scope gamma
.type = str
.alias = String Three Gamma
}
"""
return parse(test_phil)
# Platform-specific stuff
# TODO: Will need to test this on Windows at some point
if wx.Platform == '__WXGTK__':
norm_font_size = 10
button_font_size = 12
LABEL_SIZE = 14
CAPTION_SIZE = 12
elif wx.Platform == '__WXMAC__':
norm_font_size = 12
button_font_size = 14
LABEL_SIZE = 14
CAPTION_SIZE = 12
elif (wx.Platform == '__WXMSW__'):
norm_font_size = 9
button_font_size = 11
LABEL_SIZE = 11
CAPTION_SIZE = 9
# Metallicbutton globals
GRADIENT_NORMAL = 0
GRADIENT_PRESSED = 1
GRADIENT_HIGHLIGHT = 2
MB_STYLE_DEFAULT = 1
MB_STYLE_BOLD_LABEL = 2
MB_STYLE_DROPARROW = 4
# --------------------------- PHIL-specific Mixins --------------------------- #
class PHILPanelMixin(object):
""" PHIL-handling mixin for PHIL panels """
def initialize_phil_panel(self, parent, box=None, direction=wx.VERTICAL):
# Establish the top parent window
# self.window = getattr(parent, 'window', None)
# if not self.window:
# self.window = self.GetTopLevelParent()
self.window = self.GetTopLevelParent()
self.parent = parent
self.multi_scope = False
self.multi_definition = False
self._multiples = []
self._input_lists = {}
self._toggled_scopes = {}
self.scope_switch = None
self.control_index = {}
# Create a box around panel
if box is not None:
assert type(box) == str
panel_box = wx.StaticBox(self, label=box)
self.main_sizer = PHILBoxSizer(self, panel_box, direction)
else:
self.main_sizer = PHILSizer(self, direction)
self.SetSizer(self.main_sizer)
def set_phil_index(self, master_phil, working_phil=None, fetch_new=True):
self.phil_index = gui.PHILIndex(master_phil=master_phil,
working_phil=working_phil,
fetch_new=fetch_new)
def redraw_by_expert_level(self, expert_level=0):
self.expert_level = expert_level
self.show_hide_controls(expert_level=self.expert_level)
def show_hide_controls(self, expert_level=0):
if hasattr(self, 'controls') and self.controls:
for idx, ctrl in self.controls.items():
if ctrl.expert_level > expert_level:
ctrl.Hide()
else:
ctrl.Show()
if ctrl.is_scope:
try:
ctrl.show_hide_controls(expert_level=expert_level)
except Exception as e:
raise e
else:
pass
# raise Sorry('IOTA PHIL error: no controls in {}'.format(self))
def flatten_scope(self, scope):
saved_values = {}
if isinstance(scope, list):
objects = scope
else:
objects = scope.active_objects()
for obj in objects:
if obj.is_definition:
path = obj.full_path()
value = obj.type.from_words(obj.words, obj)
saved_values[path] = str(value)
elif obj.is_scope:
try:
from_scope = self.flatten_scope(scope=obj)
except RuntimeError as e:
raise e
saved_values.update(from_scope)
return saved_values
def check_full_path(self, full_path=None):
if not full_path:
if hasattr(self, 'full_path'):
return self.full_path
else:
return
else:
return full_path
def mark_non_defaults(self, full_path=None):
""" Loop through controls and for text controls set background to amber
if the value is not a default """
full_path = self.check_full_path(full_path)
if full_path is None:
return
# Create a dictionary of default values
defaults = self.get_default_values(full_path)
if not defaults:
return
# Iterate through controls and set background as appropriate
for idx, ctrl in self.controls.items():
if ctrl.is_scope:
ctrl.mark_non_defaults(full_path=ctrl.full_path)
elif ctrl.is_definition:
if ctrl.multi_definition:
for i, c in ctrl.controls.items():
path = c.full_path
if path and path in defaults:
# c.default_value = defaults[path]
c.set_background()
else:
path = ctrl.full_path
if path and path in defaults:
# ctrl.default_value = defaults[path]
ctrl.set_background()
def get_default_scope(self, full_path=None):
""" Extract the master version of the given scope (the full scope
associated with this panel, or any specified scope) """
full_path = self.check_full_path(full_path)
if full_path is not None:
master_scopes = self.phil_index.get_master_scope(full_path)
if isinstance(master_scopes, list):
return master_scopes[0]
else:
return master_scopes
def get_default_values(self, full_path=None):
""" Obtain a dictionary of path and value from default scope """
full_path = self.check_full_path(full_path)
if full_path is not None:
scope = self.get_default_scope(full_path=full_path)
if scope:
dv = self.flatten_scope(scope=[scope])
return dv
def get_max_label_size(self, scope=None, scopes=None):
# Get font info for string-to-pixels conversion
panel_font = self.GetFont()
dc = wx.WindowDC(self)
dc.SetFont(panel_font)
# Identify the longest label
max_label_size = 0
source = scope if scope else scopes
assert source
if type(source).__name__ == 'scope':
active_objects = source.active_objects()
else:
try:
active_objects = list(source)
except TypeError:
raise Sorry('IOTA PHIL Error: input is {}, must be scope, list, '
'or tuple'.format(type(source).__name__))
except Exception as e:
raise e
for o in active_objects:
if (
(o.is_definition and o.type.phil_type != 'bool') or
(o.is_scope and (o.style and 'grid' in o.style))
):
alias = o.alias_path()
label = alias if alias else o.full_path().split('.')[-1] + ': '
label_size = dc.GetTextExtent(label)[0]
if label_size > max_label_size:
max_label_size = label_size
# Pad max label size
max_label_size += dc.GetTextExtent(' ')[0]
if max_label_size > 0:
return wx.Size(max_label_size, -1)
else:
return wx.DefaultSize
# Scope widget tools ------------------------------------------------------- #
def add_scope_box(self, scope, sizer=None, label=None,
index=None, border=10, flag=wx.EXPAND | wx.BOTTOM,
ignore_multiple=False):
btn_scopes = []
if index is None:
index = len(self.controls)
if sizer is None:
sizer = self.main_sizer
# Generate name for the scope box
if not label:
obj_name = scope.full_path().split('.')[-1]
box_label = scope.alias_path() if scope.alias_path() else obj_name
else:
box_label = label
# Make scope box, add to sizer, and include in controls dictionary
if scope.multiple and not ignore_multiple:
if scope.full_path() not in self._multiples:
multi_scopes = self.phil_index.get_scopes(scope.full_path())
panel = PHILMultiScopePanel(self, multi_scopes, box=box_label)
sizer.Add(panel, 1, flag=flag, border=border)
self.controls.update({index:panel})
self.control_index.update({scope.full_path():panel})
self._multiples.append(scope.full_path())
return
else:
panel = PHILScopePanel(self, scope, box=box_label)
sizer.Add(panel, flag=flag, border=border)
self.controls.update({index:panel})
self.control_index.update({scope.full_path(): panel})
# Iterate through PHIL objects; if scope is found, it's added to the list
# of scopes that will become buttons
for obj in scope.active_objects():
max_label_size = self.get_max_label_size(scopes=scope)
if obj.is_scope:
# only collect a single example of a full path to a multiple object
if obj.full_path() not in self._multiples:
# check if this is a grid-style scope
style = self.phil_index.get_scope_style(obj.full_path())
if style.grid or len(obj.objects) <= 3:
self.add_scope_grid(parent=panel, scope=obj, style=style,
label_size=max_label_size)
else:
btn_scopes.append(obj)
if obj.multiple:
self._multiples.append(obj.full_path())
elif obj.is_definition:
self.add_definition_control(parent=panel, obj=obj,
label_size=max_label_size)
# If any scopes were found, generate buttons all at once, and place at
# bottom of the scope box
if btn_scopes:
self.add_scope_buttons(parent=panel, scopes=btn_scopes)
def add_definition_control(self, parent, obj,
sizer=None,
proportion=0,
border=5,
in_grid=False,
ignore_multiple=False,
label=None,
label_size=wx.DefaultSize):
sizer = sizer if sizer else parent.main_sizer
value = obj.type.from_words(obj.words, obj)
style = self.phil_index.style[obj.full_path()]
idx = len(parent.controls)
# Handle multiple definitions
if obj.multiple:
if ignore_multiple:
sizer = parent.scope_sizer
elif style.input_list:
if obj.full_path() not in self._input_lists:
sizer = parent.main_sizer
else:
self._input_lists[obj.full_path()].add_item(path=value)
return
else:
if obj.full_path() not in self._multiples:
try:
multi_defs = self.phil_index.get_scopes(obj.full_path())
multi_panel = PHILMultiDefPanel(parent, multi_defs, style=style)
sizer.Add(multi_panel, flag=wx.EXPAND | wx.BOTTOM, border=10)
parent.controls[idx] = multi_panel
parent.control_index[obj.full_path()] = multi_panel
except RuntimeError:
pass
self._multiples.append(obj.full_path())
return
# Create widget and add to sizer / controls dictionary
extras = getattr(self, '_{}_extras'.format(obj.type.phil_type), None)
wdg = WidgetFactory.make_widget(parent, obj, label_size, value=value,
border=border, style=style, label=label,
extras=extras)
# Set widget_specific formatting
if obj.type.phil_type in ('str', 'strings', 'unit_cell', 'space_group',
'path'):
expand = True
if style.input_list:
proportion = 1
else:
expand = False
# Add widget to sizer and dictionaries
parent.controls[idx] = wdg
parent.control_index[obj.full_path()] = wdg
if style.input_list:
self._input_lists[obj.full_path()] = wdg
if style.scope_switch:
setattr(parent, 'scope_switch', wdg)
if in_grid:
parent.setup_switch(switch_ctrl=wdg)
return
sizer.add_widget(widget=wdg, expand=expand, proportion=proportion,
border=border)
def add_scope_buttons(self, parent, scopes):
parent.main_sizer.Add((15, 0))
btn_sizer = PHILSizer(parent=parent, direction=wx.HORIZONTAL)
for scope in scopes:
name = scope.full_path().split('.')[-1]
btn = PHILDialogButton(parent=parent,
scope=scope,
phil_index=self.phil_index,
name=name,
expert_level=scope.expert_level)
parent.controls.update({len(parent.controls):btn})
parent.control_index.update({scope.full_path():btn})
btn_sizer.add_widget(btn)
parent.main_sizer.add_widget(btn_sizer, expand=True)
def add_scope_grid(self, parent, scope, style, label_size=wx.DefaultSize):
one_line = (len(scope.objects) == 2 and style.has_scope_switch) or \
(len(scope.objects) == 1)
if one_line:
grid_label_size = label_size
else:
grid_label_size = self.get_max_label_size(scopes=scope)
panel = PHILGridScopePanel(parent, scope, style, label_size=label_size)
for obj in scope.active_objects():
if obj.is_definition:
obj_style = self.phil_index.get_scope_style(obj.full_path())
if obj_style.scope_switch:
label = ''
dfn_label_size = (0, -1)
else:
label = None
dfn_label_size = grid_label_size
self.add_definition_control(panel, obj, label=label,
label_size=dfn_label_size,
sizer=panel.main_sizer,
in_grid=True)
else:
# todo: handle scopes here!
pass
parent.main_sizer.Add(panel, flag=wx.EXPAND)
idx = len(parent.controls)
parent.controls[idx] = panel
parent._toggled_scopes.update(self._toggled_scopes)
def rebuild_panel(self, scope=None):
if not scope:
scope = self.scope
self.clear_panel()
self.construct_panel(scope=scope)
def redraw_panel(self, panel=None, reset=False, exempt=None):
if not panel:
panel = self
if exempt is None:
exempt = []
for idx, ctrl in panel.controls.items():
style = self.phil_index.get_scope_style(scope_name=ctrl.full_path)
if ctrl.is_definition:
if reset and ctrl.full_path not in exempt:
reset_ctrl = True
else:
reset_ctrl = False
if ctrl.multi_definition:
ctrl.redraw_dialog(reset_to_default=reset_ctrl)
else:
if reset_ctrl:
value = ctrl.default_value
else:
value = self.phil_index.get_value(path=ctrl.full_path)
if style.input_list:
ctrl.ResetValue(value)
else:
ctrl.SetValue(value)
if style.scope_switch:
value = bool(value)
ctrl.ctr.SetValue(value)
ctrl.parent.check_scope_switches()
else:
if ctrl.full_path in exempt:
continue
if ctrl.multi_scope:
ctrl.redraw_dialog(reset_to_default=reset)
ctrl.redraw_panel(reset=reset)
self.mark_non_defaults()
def construct_panel(self, scope=None):
if not scope:
scope = self.scope
if isinstance(scope, list):
phil_objects = scope
else:
try:
assert scope.is_scope
except AssertionError:
raise Sorry('IOTA PHIL ERROR: Scope object required, {} received'
''.format(type(scope).__name__))
else:
phil_objects = scope.objects
for obj in phil_objects:
max_label_size = self.get_max_label_size(scopes=phil_objects)
if obj.is_scope:
style = self.phil_index.get_scope_style(obj.full_path())
if style.grid:
self.add_scope_grid(self, scope=obj, style=style,
label_size=max_label_size)
else:
self.add_scope_box(scope=obj)
elif obj.is_definition:
self.add_definition_control(parent=self, obj=obj,
label_size=max_label_size)
self.check_scope_switches()
self.Layout()
# Mark widgets with non-default values
self.mark_non_defaults(self.full_path)
def check_scope_switches(self, force_switch=False):
# Go through all scope boxes and disables ones that are turned off
if self.scope_switch is not None or force_switch:
for idx, ctrl in self.controls.items():
enable = self.scope_switch.ctr.GetValue()
if ctrl != self.scope_switch:
if ctrl.is_scope:
ctrl.check_scope_switches(force_switch=True)
else:
ctrl.enable_panel(enable=enable)
else:
for idx, ctrl in self.controls.items():
if ctrl.is_scope:
ctrl.check_scope_switches()
def clear_panel(self, panel=None):
if panel is None:
panel = self
self._multiples = []
self._input_lists = {}
self._toggled_scopes = {}
self.controls = {}
panel.main_sizer.DeleteWindows()
def enable_panel(self, enable=True, children=None):
if not children:
if hasattr(self, 'GetChildren'):
children = self.GetChildren()
if not children:
return
for child in children:
if 'PHIL' in child.__class__.__name__:
self.enable_panel(enable=enable, children=child.GetChildren())
child.Enable(enable=enable)
def collect_errors(self, panel=None):
""" Go through all controls recursively and collect any format errors """
if panel is None:
panel = self
errors = {}
for idx, ctrl in panel.controls.items():
if ctrl.is_definition:
if ctrl.multi_definition:
multi_def_errors = ctrl.collect_errors()
if multi_def_errors:
errors.update(multi_def_errors)
else:
if hasattr(ctrl.ctr, 'error_msg') and ctrl.ctr.error_msg:
errors[ctrl.name] = ctrl.ctr.error_msg
elif ctrl.is_scope:
scope_errors = ctrl.collect_errors()
if scope_errors:
errors.update(scope_errors)
else:
return None
return errors
def change_value(self, full_path, value):
# NOTE: won't work with multiples!
if full_path in self.control_index:
if self.control_index[full_path].is_definition:
self.control_index[full_path].SetValue(value)
def get_value(self, full_path):
if full_path in self.control_index and \
self.control_index[full_path].is_definition:
return self.control_index[full_path].GetStringValue()
return None
class MultiObjectPanelMixin(object):
""" Control-handing mixin for multi-scope and multi-definition panels """
def redraw_dialog(self, remove_controls=None, reset_to_default=False):
if reset_to_default:
self.clear_boxes()
self.add_default()
return
elif remove_controls:
for ctrl in remove_controls:
self.main_sizer.Detach(ctrl)
ctrl.Destroy()
for idx, ctrl in self.controls.items():
ctrl.toggle.Hide()
self.dialog_layout()
def clear_boxes(self):
self.scope_sizer.DeleteWindows()
def reset_boxes(self):
self._current_objects = {}
self.controls = {}
self.redraw_dialog(reset_to_default=True)
def delete_boxes(self, last_only=False, idx=None):
if last_only:
last_index = max([i for i in self._current_objects])
self._current_objects.pop(last_index)
selected_controls = [self.controls.pop(last_index)]
elif idx:
self._current_objects.pop(idx)
selected_controls = [self.controls.pop(idx)]
else:
selected_controls = []
for idx, ctrl in self.controls.items():
if ctrl.selected:
self._current_objects.pop(idx)
ctrl = self.controls.pop(idx)
selected_controls.append(ctrl)
self.redraw_dialog(remove_controls=selected_controls,
reset_to_default=(not self.controls))
# -------------------------- PHIL Sizers and Panels -------------------------- #
class PHILSizer(wx.BoxSizer, gui.WidgetHandlerMixin):
def __init__(self, parent, direction=wx.VERTICAL):
super(PHILSizer, self).__init__(direction)
self.parent = parent
class PHILBoxSizer(wx.StaticBoxSizer, gui.WidgetHandlerMixin):
def __init__(self, parent, box, direction=wx.VERTICAL):
super(PHILBoxSizer, self).__init__(box, direction)
self.parent = parent
class PHILFlexGridSizer(wx.FlexGridSizer, gui.WidgetHandlerMixin):
def __init__(self, parent, rows, cols, vgap, hgap):
super(PHILFlexGridSizer, self).__init__(rows, cols, vgap, hgap)
self.parent = parent
self.label = None
def add_growable(self, cols=None, rows=None, proportion=0):
if cols:
for col in cols:
self.AddGrowableCol(idx=col, proportion=proportion)
if rows:
for row in rows:
self.AddGrowableRow(idx=row, proportion=proportion)
class PHILBaseScrolledPanel(ScrolledPanel, PHILPanelMixin):
def __init__(self, parent, box=None, direction=wx.VERTICAL, *args, **kwargs):
if 'phil_index' in kwargs:
phil_index = kwargs.pop('phil_index')
self.phil_index = phil_index
else:
self.phil_index = getattr(parent, 'phil_index', None)
ScrolledPanel.__init__(self, parent, *args, **kwargs)
self.initialize_phil_panel(parent, box=box, direction=direction)
class PHILBaseFixedPanel(wx.Panel, PHILPanelMixin):
def __init__(self, parent, box=None, direction=wx.VERTICAL, *args, **kwargs):
if 'phil_index' in kwargs:
phil_index = kwargs.pop('phil_index')
self.phil_index = phil_index
else:
self.phil_index = getattr(parent, 'phil_index', None)
wx.Panel.__init__(self, parent, *args, **kwargs)
self.initialize_phil_panel(parent, box=box, direction=direction)
class PHILBaseDialogPanel(PHILBaseScrolledPanel, gui.IOTAScopeCtrl):
def __init__(self, parent, scope, box=None, direction=wx.VERTICAL,
*args, **kwargs):
super(PHILBaseDialogPanel, self).__init__(parent, box=box,
direction=direction,
*args, **kwargs)
gui.IOTAScopeCtrl.__init__(self, scope)
# Set expert level
self.expert_level = self.phil_index.get_min_expert_level(scope)
if self.expert_level is None:
self.expert_level = 0
class PHILBaseScopePanel(PHILBaseFixedPanel, gui.IOTAScopeCtrl):
def __init__(self, parent, scope, box=None, direction=wx.VERTICAL,
*args, **kwargs):
super(PHILBaseScopePanel, self).__init__(parent, box=box,
direction=direction,
*args, **kwargs)
gui.IOTAScopeCtrl.__init__(self, scope)
if not self.phil_index:
self.phil_index = self.parent.phil_index
# Set expert level
self.expert_level = self.phil_index.get_min_expert_level(scope)
if self.expert_level is None:
self.expert_level = 0
class PHILBaseDefPanel(wx.Panel, gui.IOTADefinitionCtrl, PHILPanelMixin):
def __init__(self, parent, phil_object, box=None, direction=wx.VERTICAL,
*args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
gui.IOTADefinitionCtrl.__init__(self, phil_object)
self.multi_definition = False
self.color_change = True
# Initialize panel
self.initialize_phil_panel(parent, box=box, direction=direction)
def GetStringValue(self):
""" Override in subclasses with subclass-specific function """
raise NotImplementedError()
def SetValue(self, value):
""" Overwrite in subclasses with subclass-specific function """
raise NotImplementedError()
class PHILBaseDialog(base.FormattedDialog):
""" Base dialog class for PHIL-formatted settings """
def __init__(self, parent, *args, **kwargs):
style = wx.CAPTION | wx.CLOSE_BOX | wx.RESIZE_BORDER | wx.STAY_ON_TOP
super(PHILBaseDialog, self).__init__(parent, style=style, *args, **kwargs)
self.phil_sizer = PHILSizer(self)
self.envelope.Add(self.phil_sizer, 1, flag=wx.EXPAND | wx.ALL, border=5)
def _set_default_size(self, disp_size):
# Get x, y of Display as well as of Dialog
sw, sh = disp_size
dw, dh = self.GetSize()
# Set dialog height (soft maximum to 2/3 of display height)
soft_max_height = sh * (2/3)
dlg_height = dh if dh <= soft_max_height else soft_max_height
# Set dialog width (soft maximum to 1/3 of display width, pad if height
# has been truncated, to accommodate the scroll bar)
soft_max_width = sw / 3
dw = dw + 20 if dlg_height < dh else dw
dlg_width = dw if dw <= soft_max_width else soft_max_width
# Apply size restrictions (wide enough to accommodate the button bar) and
# set dialog size
if sw > 500 and sh > 300:
self.SetMinSize((500, 300))
self.SetSize(wx.Size(dlg_width, dlg_height))
def _place_near_parent_window(self, disp_geom):
# initially set coordinates to be slightly offset from the top window
wx, wy = self.parent.GetTopLevelParent().GetPosition()
dx, dy, dw, dh = disp_geom
x = wx + int(0.025 * dw)
y = wy + int(0.025 * dh)
# check for boundary situations and adjust
w, h = self.GetSize()
if x + w > dx + dw:
x = dx + dw - w
if y + h > dy + dh:
y = dy + dh - h
self.SetPosition((x, y))
def size_and_place(self):
self.set_size(set_size='dialog')
x, y = self.set_relative_position()
self.SetPosition((x, y))
def OnCancel (self, event):
self.EndModal(wx.ID_CANCEL)
def OnExpertLevel(self, event):
expert_level = self.dlg_ctr.choice.GetSelection()
self.phil_panel.redraw_by_expert_level(expert_level=expert_level)
self.Layout()
self.phil_panel.SetupScrolling()
# ------------------------------- PHIL Widgets ------------------------------- #
class PHILDefPanel(PHILBaseDefPanel):
""" Base class for the PHIL control, subclassed from wx.Panel and
IOTADefinitionCtrl. The panel's main sizer is a FlexGridSizer, with a
customizable grid depending on which kind of control is desired. """
def __init__(self, parent, phil_object, rows=1, cols=2, vgap=10, hgap=10,
*args, **kwargs):
""" Constructor
:param parent: parent object, typically another panel
:param rows: Number of rows in the FlexGridSizer
:param cols: Number of columns in the FlexGridSizer
:param vgap: gap (in pixels) between rows (set to zero if rows=1)
:param hgap: gap (in pixels) between columns (set to zero if cols=1)
:param size: size of the panel
"""
super(PHILDefPanel, self).__init__(parent, phil_object=phil_object)
self.error_btn = None
self.ctr = None
self.selected = False
# Set grid
vgap = vgap if rows > 1 else 0
hgap = hgap if cols > 1 else 0
self.ctrl_sizer = PHILFlexGridSizer(self, rows, cols+3, vgap, hgap)
self.SetSizer(self.ctrl_sizer)
# Attach and hide a checkbox for use with multi-definition widgets
self.toggle = wx.CheckBox(self, label='')
self.ctrl_sizer.Add(self.toggle, flag=wx.ALIGN_CENTER)
self.Bind(wx.EVT_CHECKBOX, self.onToggle, self.toggle)
self.toggle.Hide()
# Attach and hide a small button that would show the format error message
err_bmp = bitmaps.fetch_icon_bitmap('actions', 'status_unknown', size=16)
self.error_btn = ct.GradButton(parent=self, bmp=err_bmp, button_margin=1,
size=(22, 22), gradient_percent=0)
self.ctrl_sizer.add_widget(self.error_btn)
self.Bind(wx.EVT_BUTTON, self.ShowError, self.error_btn)
self.error_btn.Hide()
def onToggle(self, e):
self.selected = self.toggle.GetValue()
def GetLabelSize(self):
size = self.ctrl_sizer.label.GetSize() if self.ctrl_sizer.label else (0, 0)
return size
def SetLabelSize(self, size=wx.DefaultSize):
if self.ctrl_sizer.label:
self.ctrl_sizer.label.SetSize(size)
def GetStringValue(self):
""" Override in subclasses with subclass-specific function """
raise NotImplementedError()
def SetValue(self, value):
""" Overwrite in subclasses with subclass-specific function """
raise NotImplementedError()
def SetError(self, err):
self.error_btn.user_data = err
self.error_btn.Show()
self.set_background(is_error=True)
self.parent.Layout()
self.Refresh()
def RemoveError(self):
self.error_btn.Hide()
self.set_background(is_error=False)
self.parent.Layout()
self.Refresh()
def ShowError(self, e):
err = self.error_btn.user_data
wx.MessageBox(caption='Format Error!',
message=str(err),
style=wx.ICON_EXCLAMATION|wx.STAY_ON_TOP)
def set_background(self, is_error=False, force_null=False):
c_err = (215, 48, 31)
c_new = (254, 240, 217)
if force_null:
color = wx.NullColour
elif is_error:
color = c_err
else:
if self.is_default():
color = wx.NullColour
else:
color = c_new
self.SetBackgroundColour(color)
self.Refresh()
def is_default(self):
default_value = str(self.default_value)
control_value = str(self.GetStringValue())
return control_value == default_value
class PHILScopePanel(PHILBaseScopePanel):
""" Based class for a non-scrolled panel to display PHIL scopes; to be
instantiated for scope boxes """
def __init__(self, parent, scope, direction=wx.VERTICAL, box=None,
*args, **kwargs):
super(PHILScopePanel, self).__init__(parent, scope, box=box,
direction=direction, *args, **kwargs)
self.multi_scope = False
self.window = getattr(parent, 'window', None)
if not self.window:
self.window = self.GetTopLevelParent()
self.parent = parent
self.selected = False
# Set up panel sizer
self.panel_sizer = PHILFlexGridSizer(self, 1, 2, 0, 10)
self.panel_sizer.AddGrowableCol(1)
self.panel_sizer.AddGrowableRow(0)
# Set checkbox (hidden for most PHIL Scope Panels)
self.toggle = wx.CheckBox(self, label='')
self.panel_sizer.Add(self.toggle, flag=wx.ALIGN_CENTER)
self.Bind(wx.EVT_CHECKBOX, self.onToggle, self.toggle)
self.toggle.Hide()
# Set up main PHIL sizer
if box:
assert type(box) == str
panel_box = wx.StaticBox(self, label=box)
self.main_sizer = PHILBoxSizer(self, panel_box, direction)
else:
self.main_sizer = PHILSizer(self, direction)
self.panel_sizer.Add(self.main_sizer, 1, flag=wx.EXPAND)
self.SetSizer(self.panel_sizer)
self.Layout()
def onToggle(self, e):
self.selected = self.toggle.GetValue()
class PHILGridScopePanel(PHILBaseScopePanel):
def __init__(self, parent, scope, style,
label_size=wx.DefaultSize,
*args, **kwargs):
super(PHILGridScopePanel, self).__init__(parent, scope, *args, **kwargs)
self.multi_scope = False
self.window = getattr(parent, 'window', None)
if not self.window:
self.window = self.GetTopLevelParent()
self.parent = parent
self.selected = False
self.switch_ctrl = None
n_items = len(scope.objects)
has_switch = style.has_scope_switch
if not has_switch:
bool_objects = [(o.style and 'scope_switch' in o.style) for o in
scope.objects if o.is_definition]
has_switch = True in bool_objects
if not style.grid:
grid_style = 'auto'
else:
grid_style = style.grid
if grid_style in ('none', 'auto'):
rows = n_items
cols = 1
else:
delimiters = ['x', ':', '-', ',']
bool_d = [(d in grid_style) for d in delimiters]
d_idx = bool_d.index(True)
delimiter = delimiters[d_idx]
rows, cols = grid_style.split(delimiter)
# Populate grid
self.main_sizer = PHILFlexGridSizer(self, int(rows), int(cols), 0, 10)
self.main_sizer.add_growable(cols=range(int(cols)))
if (has_switch and n_items == 2) or n_items == 1: # No label
self.panel_sizer = wx.GridBagSizer(0, 0)
self.panel_sizer.Add(self.main_sizer, flag=wx.EXPAND, pos=(0, 1))
self.panel_sizer.AddGrowableCol(1)
else: # Apply label
self.panel_sizer = wx.GridBagSizer(10, 10)
label = scope.alias
if not label:
if scope.name:
label = scope.name.capitalize()
elif scope.full_path():
label = scope.full_path().split('.')[-1].replace('_',
' ').capitalize()
else:
label = 'Options'
txt_label = wx.StaticText(self, label=label, size=label_size)
if has_switch:
self.panel_sizer.Add(txt_label, pos=(0, 1))
else:
self.panel_sizer.Add(txt_label, pos=(0, 0), span=(1, 2))
self.panel_sizer.Add((25, 0), pos=(1, 0))
self.panel_sizer.Add(self.main_sizer, flag=wx.EXPAND, pos=(1, 1))
self.panel_sizer.AddGrowableCol(1)
self.SetSizer(self.panel_sizer)
def setup_switch(self, switch_ctrl, value=None):
self.switch_ctrl = switch_ctrl
self.panel_sizer.Add(self.switch_ctrl, pos=(0, 0))
if not value:
value = self.switch_ctrl.ctr.GetValue()
self.switch_ctrl.ctr.SetValue(state=value)
class PHILMultiScopePanel(PHILBaseDialogPanel, MultiObjectPanelMixin):
def __init__(self, parent, scope, box=None, direction=wx.VERTICAL,
*args, **kwargs):
super(PHILMultiScopePanel, self).__init__(parent, scope, box=box,
direction=direction,
*args, **kwargs)
self.multi_scope = True
self._current_objects = {}
# Add a sizer for multiple scopes, add existing scopes to it
self.scope_sizer = PHILSizer(self)
# Create a dictionary of PHIL objects
for obj in scope:
idx = scope.index(obj)
self._current_objects[idx] = obj
self.add_boxes()
self.main_sizer.Add(self.scope_sizer, flag=wx.EXPAND)
# Add button box
self.btn_box = ct.AddDeleteButtonBox(self, reset_button=True)
self.main_sizer.Add(self.btn_box, flag=wx.EXPAND | wx.LEFT, border=15)
# Bindings to buttons
self.Bind(wx.EVT_BUTTON, self.onAdd, self.btn_box.btn_add)
self.Bind(wx.EVT_BUTTON, self.onDelete, self.btn_box.btn_del)
self.Bind(wx.EVT_BUTTON, self.onReset, self.btn_box.btn_rst)
self.Bind(wx.EVT_BUTTON, self.onDeleteLast, self.btn_box.btn_del_lst)
self.Bind(wx.EVT_BUTTON, self.onDeleteSelected, self.btn_box.btn_del_sel)
self.Bind(wx.EVT_BUTTON, self.onCancelDelete, self.btn_box.btn_del_not)
self.check_scope_switches()
def add_boxes(self):
for idx, obj in self._current_objects.items():
label = '{} {}'.format(obj.full_path().split('.')[-1].replace('_', ' '),
idx + 1)
self.add_scope_box(scope=obj, sizer=self.scope_sizer, index=idx,
label=label, ignore_multiple=True)
def add_default(self, idx=None):
if not idx:
indices = [i for i in self._current_objects]
if indices:
idx = max(indices) + 1
else:
idx = 0
master_object = self.phil_index.get_master_scope(self.full_path)[0]
self._current_objects[idx] = master_object
label = '{} {}'.format(
master_object.full_path().split('.')[-1].replace('_', ' '),
idx + 1
)
self.add_scope_box(scope=master_object, sizer=self.scope_sizer, index=idx,
label=label, ignore_multiple=True)
self.dialog_layout()
def dialog_layout(self):
self.check_scope_switches()
# self.SetupScrolling()
self.Layout()
self.GetTopLevelParent().Layout()
def onAdd(self, e):
self.add_default()
def onDelete(self, e):
for idx, ctrl in self.controls.items():
ctrl.toggle.Show()
self.Layout()
def onReset(self, e):
self.reset_boxes()
def onDeleteLast(self, e):
self.delete_boxes(last_only=True)
def onDeleteSelected(self, e):
self.delete_boxes()
def onCancelDelete(self, e):
for idx, ctrl in self.controls.items():
ctrl.toggle.Hide()
self.dialog_layout()
class PHILMultiDefPanel(PHILBaseFixedPanel, gui.IOTADefinitionCtrl,
MultiObjectPanelMixin):
def __init__(self, parent, scope, *args, **kwargs):
style = kwargs.pop('style', None)
super(PHILMultiDefPanel, self).__init__(parent, box='', *args, **kwargs)
gui.IOTADefinitionCtrl.__init__(self, scope)
self.multi_definition = True
self._current_objects = {}
self.controls = {}
# Add a sizer for multiple scopes, add existing scopes to it
self.scope_sizer = PHILSizer(self)
# Create a dictionary of PHIL objects
for obj in scope:
idx = scope.index(obj)
self._current_objects[idx] = obj
self.add_definitions()
self.main_sizer.Add(self.scope_sizer, flag=wx.EXPAND)
# Add button box
self.btn_box = ct.AddDeleteButtonBox(self, reset_button=True)
self.main_sizer.Add(self.btn_box, flag=wx.EXPAND | wx.LEFT, border=15)
# Bindings to buttons
self.Bind(wx.EVT_BUTTON, self.onAdd, self.btn_box.btn_add)
self.Bind(wx.EVT_BUTTON, self.onDelete, self.btn_box.btn_del)
self.Bind(wx.EVT_BUTTON, self.onReset, self.btn_box.btn_rst)
self.Bind(wx.EVT_BUTTON, self.onDeleteLast, self.btn_box.btn_del_lst)
self.Bind(wx.EVT_BUTTON, self.onDeleteSelected, self.btn_box.btn_del_sel)
self.Bind(wx.EVT_BUTTON, self.onCancelDelete, self.btn_box.btn_del_not)
self.Fit()
def add_definitions(self):
for idx, obj in self._current_objects.items():
self.add_definition_control(self, obj=obj, ignore_multiple=True)
def add_default(self, idx=None):
if not idx:
indices = [i for i in self._current_objects]
if indices:
idx = max(indices) + 1
else:
idx = 0
master_def = self.phil_index.get_master_scope(self.full_path)[0]
self._current_objects[idx] = master_def
self.add_definition_control(self, obj=master_def, ignore_multiple=True)
self.dialog_layout()
def dialog_layout(self):
self.GetTopLevelParent().Layout()
def onAdd(self, e):
self.add_default()
def onDelete(self, e):
for idx, ctrl in self.controls.items():
ctrl.toggle.Show()
self.Layout()
def onReset(self, e):
self.reset_boxes()
def onDeleteLast(self, e):
self.delete_boxes(last_only=True)
def onDeleteSelected(self, e):
self.delete_boxes()
def onCancelDelete(self, e):
for idx, ctrl in self.controls.items():
ctrl.toggle.Hide()
self.dialog_layout()
def GetStringValue(self):
""" Override in subclasses with subclass-specific function """
raise NotImplementedError()
def SetValue(self, value):
""" Overwrite in subclasses with subclass-specific function """
raise NotImplementedError()
class ValidatedTextCtrl(wx.TextCtrl):
''' Base class for a wx.TextCtrl that performs PHIL-specific validation and
format checking (sub-classes will customize those functions) '''
def __init__(self, *args, **kwargs):
self.error_msg = None
# Intercept a specified value to be set after initialization
# saved_value = None
if 'value' in kwargs:
saved_value = kwargs['value']
kwargs['value'] = ""
else:
saved_value = None
# Initialize base class
super(ValidatedTextCtrl, self).__init__(*args, **kwargs)
self.parent = self.GetParent()
# Set font style for text control
font = wx.Font(norm_font_size, wx.FONTFAMILY_MODERN,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
self.SetFont(font)
style = self.GetWindowStyle()
# Enforce "process ENTER" option
if not style & wx.TE_PROCESS_ENTER:
style |= wx.TE_PROCESS_ENTER
self.SetWindowStyle(style)
# Create appropriate validator (done in subclasses)
self.SetValidator(self.CreateValidator())
# Bindings
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnter, self)
self.Bind(wx.EVT_KILL_FOCUS, self.OnFocusLost, self)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus, self)
# Apply value if one was passed to this control
saved_value = self.parent.ReformatValue(value=saved_value,
raise_error=False)
self.SetStringValue(value=saved_value)
def SetStringValue(self, value=None):
if type(value) in (list, tuple):
value = ' '.join([str(v) for v in value])
self.SetValue(str(value))
def GetStringValue(self):
return str(self.GetValue())
def Validate(self):
is_good = self.GetValidator().Validate(parent=self.parent)
if is_good:
self.parent.RemoveError()
self.Refresh()
else:
if not self.error_msg:
self.error_msg = 'Unknown format error, please double-check your entry.'
self.parent.SetError(self.error_msg)
self.Refresh()
def OnEnter(self, e=None):
self.Validate()
e.Skip()
def OnFocusLost(self, e=None):
self.Validate()
e.Skip()
def OnSetFocus(self, e=None):
e.Skip()
def CreateValidator(self):
return gui.TextCtrlValidator().Clone()
class ValidatedStringCtrl(ValidatedTextCtrl):
def __init__(self, *args, **kwargs):
super(ValidatedStringCtrl, self).__init__(*args, **kwargs)
self._min_len = 0
self._max_len = sys.maxint
def SetMinLength(self, n):
assert (n >= 0)
self._min_len = n
def SetMaxLength(self, n):
assert (n >= 1)
self._max_len = n
def GetMinLength(self):
return self._min_len
def GetMaxLength(self):
return self._max_len
def CheckFormat(self, value):
if "$" in value:
raise ValueError("The dollar symbol ($) may not be used here.")
elif len(value) > self.GetMaxLength():
raise ValueError("Value must be {} characters or less."
"".format(self.GetMaxLength()))
elif len(value) < self.GetMinLength():
raise ValueError("Value must be at least {} characters."
"".format(self.GetMinLength()))
return value
class ValidatedMultiStringCtrl(ValidatedStringCtrl):
""" A subclass of ValidatedNumberCtrl for multi-number PHIL objects """
def __init__(self, *args, **kwargs):
super(ValidatedMultiStringCtrl, self).__init__(*args, **kwargs)
def CheckFormat(self, value):
if value in (None, Auto):
return value
if isinstance(value, str):
values = value.split(' ')
elif type(value) in (list, tuple):
values = value
else:
raise ValueError('Unrecognized format: expecting string or iterable')
# Iterate through values and check format for each
errors = []
for v in values:
err = None
idx = values.index(v) + 1
if v in (None, Auto):
pass
if "$" in v:
err = "Item #{}: The dollar symbol ($) may not be used here." \
"".format(idx)
elif len(value) > self.GetMaxLength():
err = "Item #{}: Value must be {} characters or less." \
"".format(idx, self.GetMaxLength())
elif len(value) < self.GetMinLength():
err = "Item #{}: Value must be {} characters or more." \
"".format(idx, self.GetMinLength())
if err:
errors.append(err)
# Raise Value Error if any errors are found
if errors:
error_msg = 'Error(s) found!\n{}'.format('\n'.join(errors))
raise ValueError(error_msg)
return value
class ValidatedPathCtrl(ValidatedTextCtrl):
def __init__(self, *args, **kwargs):
super(ValidatedPathCtrl, self).__init__(*args, **kwargs)
self.read = self.GetParent().read
self.write = self.GetParent().write
self.is_file = self.GetParent().is_file
self.is_folder = self.GetParent().is_folder
def CheckFormat(self, value=None):
""" A hacky way to validate path syntax in a platform-independent way
Args:
value: path as string
Returns: value if validated, raises ValueError if not
"""
import errno
# Just in case, check that entered path is string or unicode
if not isinstance(value, str) and not isinstance(value, unicode):
raise ValueError('Path must be a string!')
# Check for None or blank space
if value.lower() == 'none' or value.isspace() or not value:
if self.parent.IsOptional():
return value
else:
raise ValueError('Path is not optional! Please provide a valid path.')
# Check each path component for OS errors
# Strip Windows-specific drive specifier (e.g., `C:\`) if it exists
_, pathname = os.path.splitdrive(value)
# Define directory guaranteed to exist
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname)
# Append a path separator to this directory if needed
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Validate each path component
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
except OSError as e:
if hasattr(e, 'winerror'):
if e.winerror == 123:
raise ValueError("{} is not a valid Windows path!"
"".format(value))
elif e.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
raise ValueError("{} is not a valid Posix path!"
"".format(value))
# Check path existence and read/write permissions
permission_errors = []
if os.path.isdir(value):
dirname = value
else:
dirname = os.path.dirname(value)
if self.read:
if not self.write and not os.path.exists(value):
raise ValueError('Path {} not found!'.format(value))
if not os.access(dirname, os.R_OK):
permission_errors.append('Read permission for {} denied!'
''.format(dirname))
elif self.is_file and not os.access(value, os.R_OK):
permission_errors.append('Read permission for {} denied!'
''.format(value))
if self.write:
if not os.access(dirname, os.W_OK):
permission_errors.append('Write permission to {} denied!'
''.format(dirname))
elif self.is_file and not os.access(value, os.W_OK):
permission_errors.append('Write permission to {} denied!'
''.format(value))
if permission_errors:
raise ValueError('Permission errors for {}:\n{}'
''.format(dirname, '\n'.join(permission_errors)))
return value
class ValidatedNumberCtrl(ValidatedTextCtrl):
def __init__(self, *args, **kwargs):
# Check for "type" kwarg, for int or float
self._num_type = kwargs.pop('as_type', 'float')
# Check for 'min' kwarg to set min value for control
vmin = kwargs.pop('min', -sys.maxint)
if vmin is None:
vmin = -sys.maxint
self._value_min = int(vmin) if self._num_type == 'int' else float(vmin)
# Check for 'max' kwarg to set max value for control
vmax = kwargs.pop('max', sys.maxint)
if vmax is None:
vmax = sys.maxint
self._value_max = int(vmax) if self._num_type == 'int' else float(vmax)
# Check for 'allow_none' option (won't be used in subclassed multinumber
# control)
self._allow_none = kwargs.pop('allow_none', True)
super(ValidatedNumberCtrl, self).__init__(*args, **kwargs)
def SetMinValue(self, n):
assert (n >= 0)
self._value_min = n
def SetMaxValue(self, n):
assert (n >= 1)
self._value_max = n
def GetMinValue(self):
return self._value_min
def GetMaxValue(self):
return self._value_max
def determine_type(self, value, is_none=True, is_auto=True):
suggested_type = 'a number'
try:
if 'int' in self._num_type:
value = int(value)
suggested_type = 'an integer'
else:
value = float(value)
suggested_type = 'a float'
except ValueError:
value = str(value)
if value.lower() == 'none':
value = None
elif value.lower() == 'auto':
value = Auto
if isinstance(value, str):
if is_auto and is_none:
suggested_type += ', None, or Auto'
elif is_none:
suggested_type += 'or None'
elif is_auto:
suggested_type += ', None, or Auto'
return suggested_type, value
def CheckFormat(self, value):
""" Checks that the format of the value is numerical; if string is found,
only 'none' and 'auto' can be accepted as valid
:param value: entered value
:return: checked value or error
"""
is_none = self._allow_none
is_auto = self.parent.UseAuto()
suggested_type, value = self.determine_type(value,
is_none=is_none,
is_auto=is_auto)
if (value is None and is_none) or (value is Auto and is_auto):
pass
elif isinstance(value, str):
if (value.lower() == 'none' and is_none) or \
(value.lower() == 'auto' and is_auto):
pass
else:
raise ValueError("String entries are not allowed! Enter {}."
"".format(suggested_type))
else:
if value > self.GetMaxValue():
raise ValueError("Value ({}) must be less than the maximum of {}."
"".format(value, self.GetMaxValue()))
elif value < self.GetMinValue():
raise ValueError("Value ({}) must be more than the minimum of {}."
"".format(value, self.GetMinValue()))
return value
class ValidatedMultiNumberCtrl(ValidatedNumberCtrl):
""" A subclass of ValidatedNumberCtrl for multi-number PHIL objects """
def __init__(self, *args, **kwargs):
self._size_min = kwargs.pop('size_min', 0)
self._size_max = kwargs.pop('size_max', sys.maxint)
self._allow_none_elements = kwargs.pop('allow_none_elements', True)
self._allow_auto_elements = kwargs.pop('allow_auto_elements', True)
super(ValidatedMultiNumberCtrl, self).__init__(*args, **kwargs)
def CheckFormat(self, value):
""" Checks that the value list is the right size; that each of the items
is numerical; and if string values are found, only 'none' and 'auto' can
be accepted as valid
:param value: string or list containing entered value(s)
:return: checked values or error
"""
is_none = self._allow_none_elements
is_auto = self._allow_auto_elements
if (str(value).lower() == 'none' and is_none) or \
(str(value).lower() == 'auto' and is_auto):
return value
if isinstance(value, str):
values = value.strip().split(' ')
else:
assert type(value) in (list, tuple)
values = value
if len(values) < self._size_min:
raise ValueError("Need a minimum of {} values in this field!"
"".format(self._size_min))
if len(values) > self._size_max:
raise ValueError("Cannot have more than {} values in this field!"
"".format(self._size_max))
# Iterate through values and check format for each; error message will be
# a summary of all found errors
errors = []
new_values = []
for item in values:
idx = values.index(item)
err = None
suggested_type, item = self.determine_type(item,
is_none=is_none,
is_auto=is_auto)
new_values.append(item)
if (item is None and is_none) or (item is Auto and is_auto):
pass
if isinstance(item, str):
if (item.lower() == 'none' and is_none) or\
(item.lower() == 'auto' and is_auto):
pass
else:
err = "String entries are not allowed! Enter {}."\
"".format(suggested_type)
else:
if item > self.GetMaxValue():
err = "Value ({}) must be less than the maximum of {}."
"".format(item, self.GetMaxValue())
elif item < self.GetMinValue():
err = "Value ({}) must be more than the minimum of {}."
"".format(item, self.GetMinValue())
if err:
msg = ' Item #{}: {}'.format(idx, err)
errors.append(msg)
# Raise Value Error if any errors are found
if errors:
error_msg = 'Error(s) found!\n{}'.format('\n'.join(errors))
raise ValueError(error_msg)
return new_values
class ValidatedUnitCellCtrl(ValidatedTextCtrl):
def __init__(self, *args, **kwargs):
super(ValidatedUnitCellCtrl, self).__init__(*args, **kwargs)
def CheckFormat(self, value):
""" Check that the entry is a valid unit cell notation
Args:
value: Unit cell parameters
Returns: value if validated, raises ValueError if not
"""
value = makenone(value)
# Break up string by possible delimiters (set up to handle even a mixture
# of delimiters, because you never know)
if value:
uc = value
for dlm in [',', ';', '|', '-', '/']:
if uc.count(dlm) > 0:
uc = uc.replace(dlm, ' ')
uc_params = uc.rsplit()
error_msg = 'Invalid unit cell entry {}'.format(value)
# Check if there are six parameters in unit cell (that's as far as
# validation will go; the correctness of the unit cell is up to the user)
if len(uc_params) != 6:
raise ValueError('{}: should be six parameters!'.format(error_msg))
# Check that every parameter can be converted to float (i.e. that the
# parameters are actually all numbers)
try:
uc_params = [float(p) for p in uc_params]
except ValueError as e:
if 'invalid literal' in e.message.lower():
raise ValueError('{}: unit cell should only contain '
'numbers'.format(error_msg))
raise ValueError('{}: {}'.format(error_msg, e.message))
uc = ' '.join([str(p) for p in uc_params])
else:
uc = None
return str(uc)
class ValidatedSpaceGroupCtrl(ValidatedTextCtrl):
def __init__(self, *args, **kwargs):
super(ValidatedSpaceGroupCtrl, self).__init__(*args, **kwargs)
def CheckFormat(self, value):
""" Check that the entry is a valid space group notation
Args:
value: Space group symbol and/or number
Returns: value if validated, raises ValueError if not
"""
# Attempt to create a space group object; if symbol or number are
# invalid, this will fail
from cctbx import crystal
value = makenone(value) # Convert to Nonetype if None or Null string
if value:
try:
sym = crystal.symmetry(space_group_symbol=str(value))
except RuntimeError as e:
raise ValueError('Invalid space group entry:\n{}'.format(e))
# Return space group symbol even if number is entered
sg = sym.space_group_info()
else:
sg = None
return str(sg)
# ------------------------------- PHIL Buttons ------------------------------- #
class PHILDialogButton(ct.IOTAButton):
""" Button that launches a wx.Dialog auto-populated with PHIL settings.
Will also take out a specified scope from the PHILIndex that resides in
the master window. A selection of scopes to show (while hiding all the
rest) can also be provided. """
def __init__(self, parent, scope, phil_index, name=None,
expert_level=None, title=None, *args, **kwargs):
""" Constructor
:param parent: parent control (here, likely a panel)
:param scope_name: Name of scope that will appear as a dialog
:param include: A list of paths for scopes or definitions that can be
modified in the dialog; the omitted paths will not be
displayed or modified. Can be full paths or names.
:param label: Custom label for the button; if None, a label will be made
from the scope name
:param args: Any other arguments
:param kwargs: Any other keyword arguments
"""
self.parent = parent
self.is_dlg_button = True
self.is_scope = False
self.is_definition = False
self.scope = scope
self.phil_index = phil_index
self.name = name
self.expert_level = expert_level
self.title = title
ct.IOTAButton.__init__(self, parent, handler_function=self.open_phil_dialog,
*args, **kwargs)
# Set attributes from scope(s) and set the button label
self._set_attributes()
label = self.title + '...'
self.SetLabel(label)
def _set_attributes(self):
if isinstance(self.scope, list): # means it's a multiple!
scp = self.scope[0]
else:
scp = self.scope
# Determine overall expert level
self.full_path = scp.full_path()
self.name = scp.name
if not self.name:
if self.full_path:
self.name = self.full_path.split('.')[-1]
else:
self.name = 'phil_options'
# Create button and dialog title from variable name or alias
if not self.title:
if scp.short_caption:
self.title = scp.short_caption
elif scp.alias:
self.title = scp.alias.capitalize()
elif self.name and not self.name.isspace():
self.title = self.name.replace('_', ' ').capitalize()
def get_phil_strings(self):
if isinstance(self.scope, list):
phil_strings = []
for scp in self.scope:
scp_strings = scp.as_str().split('\n')
phil_strings.extend(scp_strings)
else:
phil_strings = self.scope.as_str().split('\n')
return phil_strings
def open_phil_dialog(self, e):
""" Generate a PHIL Dialog from scope; on_OK, the dialog will generate a
PHIL object that can be used to update the button scope """
with PHILDialog(parent=self.parent,
name=self.name,
scope=self.scope,
phil_index=self.phil_index,
title=self.title) as phil_dlg:
if phil_dlg.ShowModal() == wx.ID_OK:
self.scope = phil_dlg.scope
# ------------------------------ PHIL Controls ------------------------------- #
class PHILFileListCtrl(ct.FileListCtrl, gui.IOTADefinitionCtrl):
def __init__(self, parent, phil_object, value=None, *args, **kwargs):
extras = kwargs.pop('extras', None)
if extras:
file_types = extras.get('file_types', None)
folder_types = extras.get('folder_types', None)
data_types = extras.get('data_types', None)
input_filter = extras.get('input_filter', None)
else:
file_types = folder_types = data_types = None
ct.FileListCtrl.__init__(self, parent=parent,
size=(600, 300),
file_types=file_types,
folder_types=folder_types,
data_types=data_types,
input_filter=input_filter
)
gui.IOTADefinitionCtrl.__init__(self, phil_object=phil_object)
self.multi_definition = False
self.SetValue(value)
def update_from_phil(self):
new_values = self.parent.phil_index.get_value(path=self.full_path)
self.ResetValue(value=new_values)
def ResetValue(self, value=None):
self.delete_all()
self.SetValue(value)
def SetValue(self, value=None):
if isinstance(value, list):
values = [v for v in value if v]
if values:
for v in values:
self.add_item(path=v)
else:
if value:
self.add_item(path=value)
def set_background(self):
pass
def GetPHIL(self, full_path=False, indent_length=2):
""" Overridden because FileListCtrl is tricky """
idxs = self.ctr.GetItemCount()
inputs = [self.ctr.GetItemData(i).path for i in range(idxs)]
phil_strings = []
indent = len(self.full_path.split('.')) * indent_length
for inp in inputs:
value_string = '{} = {}'.format(self.name, inp)
phil_string = '{:{ind}}{}'.format(' ', value_string, ind=indent)
phil_strings.append(phil_string)
return '\n'.join(phil_strings)
class PHILPathCtrl(PHILDefPanel):
""" Control for the PHIL path type """
def __init__(self, parent, phil_object, label='', style=None, value=None,
label_size=wx.DefaultSize, *args, **kwargs):
cols = kwargs.pop('cols', 4)
vgap = kwargs.pop('vgap', 0)
PHILDefPanel.__init__(self, parent=parent, phil_object=phil_object,
label_size=label_size, cols=cols, vgap=vgap,
*args, **kwargs)
# Extract relevant styles
self.is_file = 'file' in style.path if style.path else False
self.is_folder = 'folder' in style.path if style.path else False
self.read = 'read' in style.permissions if style.permissions else True
self.write = 'write' in style.permissions if style.permissions else True
# If neither read-only nor write-only is specified, read/write is enabled
if not self.read and not self.write:
self.read = self.write = True
# If file *and* folder are set to True (unlikely), self.is_folder = True
if not self.is_file and not self.is_folder:
self.is_folder = True
elif self.is_file and self.is_folder:
print ('IOTA PHIL DEBUG: {} specified as both file and folder! Setting '
'to folder...'.format(phil_object.full_path()))
self.is_file = False
# Set defaultfile and wildcard parameters for file dialog
if self.is_file:
self.defaultfile = style.defaultfile if style.defaultfile else '*'
self.wildcard = style.wildcard if style.wildcard else '*'
else:
self.defaultfile = '*'
self.wildcard = '*'
# Create path control
self.ctr = ValidatedPathCtrl(self, value=value)
self.SetValue(value=phil_object)
self.ctrl_sizer.add_widget_and_label(widget=self.ctr, label=label,
expand=True, label_size=label_size)
self.ctrl_sizer.add_growable(cols=[3])
# Create browse and view buttons
self.btn_browse = wx.Button(self, label='...', style=wx.BU_EXACTFIT)
viewmag_bmp = bitmaps.fetch_icon_bitmap('actions', 'viewmag', size=16)
self.btn_mag = wx.BitmapButton(self, bitmap=viewmag_bmp)
self.ctrl_sizer.add_widget(self.btn_browse)
self.ctrl_sizer.add_widget(self.btn_mag)
# Bindings
self.Bind(wx.EVT_BUTTON, self.OnBrowse, self.btn_browse)
def SetValue(self, value):
try:
self.SetStringValue(phil_object=value)
except AttributeError:
self.ctr.SetValue(str(value))
def SetStringValue(self, phil_object):
value = self.value_from_words(phil_object=phil_object)[0]
self.ctr.SetValue(str(value))
def GetStringValue(self):
return self.ctr.GetValue()
def OnBrowse(self, e):
if self.is_file:
self._open_file_dialog()
elif self.is_folder:
self._open_folder_dialog()
else:
command_list = [('Browse files...',
lambda evt: self._open_file_dialog()),
('Browse folders...',
lambda evt: self._open_folder_dialog())]
browse_menu = ct.Menu(self)
browse_menu.add_commands(command_list)
self.PopupMenu(browse_menu)
browse_menu.Destroy()
self.ctr.Validate()
e.Skip()
def onMagButton(self, e):
dirview = DirView(self, title='Current Folder')
if dirview.ShowModal() == wx.ID_OK:
dirview.Destroy()
e.Skip()
def _open_folder_dialog(self):
dlg = wx.DirDialog(self, "Choose folder:", style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.ctr.SetValue(dlg.GetPath())
dlg.Destroy()
def _open_file_dialog(self):
dlg = wx.FileDialog(
self, message="Choose file",
defaultDir=os.curdir,
defaultFile=self.defaultfile,
wildcard=self.wildcard,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
filepath = dlg.GetPaths()[0]
self.ctr.SetValue(filepath)
class PHILStringCtrl(PHILDefPanel):
""" Control for the PHIL string type """
def __init__(self, parent, phil_object, control=None, label='', value=None,
label_size=wx.DefaultSize, *args, **kwargs):
vgap = kwargs.pop('vgap', 0)
PHILDefPanel.__init__(self,
parent=parent,
phil_object=phil_object,
label_size=label_size,
vgap=vgap,
*args, **kwargs)
if not control:
control = ValidatedStringCtrl
self._create_control(control=control, value=value)
self._place_control(label, label_size)
def _create_control(self, control, value=None):
self.ctr = control(self, value=value)
self.ctr.Validate()
def _place_control(self, label, label_size):
self.ctrl_sizer.add_widget_and_label(widget=self.ctr, label=label,
expand=True, label_size=label_size)
self.ctrl_sizer.add_growable(cols=[3])
def GetStringValue(self):
return self.ctr.GetValue()
def SetValue(self, value=None):
value = self.ReformatValue(value, raise_error=False)
self.ctr.SetStringValue(value=value)
self.ctr.Validate()
class PHILMultiStringCtrl(PHILDefPanel):
""" Control for the PHIL ints and floats (multiple numbers) types """
def __init__(self, parent, phil_object, label='', value=None,
label_size=wx.DefaultSize, *args, **kwargs):
vgap = kwargs.pop('vgap', 0)
PHILDefPanel.__init__(self, parent=parent, phil_object=phil_object,
vgap=vgap, *args, **kwargs)
# make control
self.ctr = ValidatedMultiStringCtrl(self, value=value)
self.ctr.Validate()
# place control in sizer
self.ctrl_sizer.add_widget_and_label(self.ctr, label=label,
label_size=label_size, expand=True)
self.ctrl_sizer.add_growable(cols=[3])
def SetValue(self, value):
if isinstance(value, str):
value = value.split()
elif type(value) not in (list, tuple):
raise ValueError('IOTA GUI Error: Multi-string Control: Value should be a '
'string or an iterable!')
value = self.ReformatValue(value, raise_error=False)
self.ctr.SetStringValue(value=value)
self.ctr.Validate()
def GetStringValue(self):
return self.ctr.GetValue()
def is_default(self):
if type(self.default_value) in (list, tuple):
default_value = ' '.join([str(v).lower() for v in self.default_value])
else:
default_value = str(self.default_value).lower()
control_value = self.GetStringValue().lower()
return default_value == control_value
class PHILSpaceGroupCtrl(PHILStringCtrl):
""" Control for the PHIL Space Group, subclassed from PHILStringCtrl """
def __init__(self, parent, phil_object, label='', value=None,
label_size=wx.DefaultSize, *args, **kwargs):
PHILStringCtrl.__init__(self, parent,
label=label,
control=ValidatedSpaceGroupCtrl,
phil_object=phil_object,
label_size=label_size,
value=value,
*args, **kwargs)
class PHILUnitCellCtrl(PHILStringCtrl):
""" Control for the PHIL Unit Cell, subclassed from PHILStringCtrl """
def __init__(self, parent, phil_object, label='', value=None,
label_size=wx.DefaultSize, *args, **kwargs):
PHILStringCtrl.__init__(self, parent,
phil_object=phil_object,
control=ValidatedUnitCellCtrl,
label=label,
label_size=label_size,
value=value,
*args, **kwargs)
class PHILBaseChoiceCtrl(PHILDefPanel):
""" Choice control for PHIL choice item, with label """
def __init__(self, parent,
phil_object,
label='',
label_size=wx.DefaultSize,
ctrl_size=wx.DefaultSize,
*args, **kwargs):
""" Constructor
:param parent: parent object
:param label: choice control label
:param label_size: size of choice control label
:param label_style: normal, bold, italic, or italic_bold
:param ctrl_size: size of choice control
"""
# Initialize the base class
PHILDefPanel.__init__(self, parent=parent, phil_object=phil_object,
label_size=label_size, *args, **kwargs)
self._options = None
# Set choice control
self.ctr = wx.Choice(self, size=ctrl_size)
self.ctrl_sizer.add_widget_and_label(widget=self.ctr, label=label,
label_size=label_size)
self.Bind(wx.EVT_CHOICE, self.onChoice, self.ctr)
def is_default(self):
default_value = self.default_value
control_value = self.GetPHILValue()
# Sometimes None is replaced with '---' in PHIL choice controls
if control_value == '---':
control_value = None
return str(control_value) == str(default_value)
def onChoice(self, e):
self.set_background()
def SetChoices(self, choices, captions=None, value=None, allow_none=True):
''' Insert choices into the control
:param choices: list of choices (must be list or tuple)
:param value: definition value from PHIL object
:param captions: list of captions (optional)
:param allow_none: allow no selection
'''
# Determine selection
selection = None
if isinstance(value, list):
value = value[0]
if value:
is_selected = [(choice.replace('*', '') == value) for choice in choices]
else:
is_selected = [("*" in choice) for choice in choices]
if True in is_selected:
selection = is_selected.index(True)
# Strip asterisk(s) from list of choices
choices = [choice.replace("*", "") for choice in choices]
# Apply or create captions and set selection
if captions is None:
captions = list(choices)
if len(captions) != len(choices):
raise RuntimeError("Wrong number of caption items for {}\n"
"Choices: {}\n"
"Captions: {}"
"".format(self.name,
'\n'.join(choices),
'\n'.join(captions)))
# Add a dashed line if parameter is optional (or None is an option)
if allow_none:
if choices[0] is None or choices[0].lower() == 'none':
captions[0] = '---'
choices[0] = None
elif self.IsOptional():
captions.insert(0, "---")
choices.insert(0, None)
# Increment selection to account for item insertion
if selection is not None:
selection += 1
# Sometimes selection may be None; if so, set it to zero
if selection is None:
selection = 0
# Set options, captions, and selection
self._options = choices
self.ctr.SetItems(captions)
self.ctr.SetSelection(selection)
def SetValue(self, value):
''' Set choice selection to specific value '''
if value not in self._options:
raise Sorry('Value {} not found! Available choices are:\n{}'
''.format(value, '\n'.join(self._options)))
selection = self._options.index(value)
self.ctr.SetSelection(selection)
def GetValue(self):
raise NotImplementedError("Please use GetPhilValue()")
def GetPHILValue(self):
"""Returns a single string."""
return self._options[self.ctr.GetSelection()]
def GetStringValue(self):
"""Returns the long format (all choices, '*' denotes selected)."""
selection = self.ctr.GetSelection()
choices_out = []
for i, choice in enumerate(self._options):
if choice is None:
continue
elif i == selection:
choices_out.append("*" + choice)
else:
choices_out.append(choice)
return " ".join(choices_out)
class PHILChoiceCtrl(PHILBaseChoiceCtrl):
def __init__(self, parent, phil_object, label='', captions=None, value=None,
*args, **kwargs):
super(PHILChoiceCtrl, self).__init__(parent=parent,
phil_object=phil_object,
label=label,
*args, **kwargs)
choices = [str(i) for i in phil_object.words]
self.SetChoices(choices=choices, captions=captions, value=value)
def GetValue(self):
raise NotImplementedError("Please use GetPhilValue()")
class PHILTriBoolCtrl(PHILChoiceCtrl):
""" Three-way boolean control: returns True, False, or None. Currently used as
the option if a boolean PHIL definition has a default value of None.
PHIL definitions with default values of Auto or True/False are
automatically made as wx.CheckBox controls. That can be overridden by
specifying 'tribool' in a definition's style card.
"""
def __init__(self, parent, phil_object, label='', value=None,
*args, **kwargs):
super(PHILTriBoolCtrl, self).__init__(parent=parent,
phil_object=phil_object,
label=label,
value=value,
*args, **kwargs)
self._options = None
self.SetOptional(True)
self.SetChoices(choices=['None', 'Yes', 'No'], value=value,
allow_none=False)
def SetValue(self, value):
if value is True:
self.ctr.SetSelection(1)
elif value is False:
self.ctr.SetSelection(2)
else:
assert value in [None, Auto]
self.ctr.SetSelection(0)
def GetValue(self):
return self.GetPhilValue()
def GetPhilValue(self):
vals = [None, True, False]
return vals[self.ctr.GetSelection()]
def GetStringValue(self):
return str(self.GetPhilValue())
class PHILNumberCtrl(PHILDefPanel):
""" Control for the PHIL int and float types """
def __init__(self, parent, phil_object, label='', value=None,
label_size=wx.DefaultSize, *args, **kwargs):
vgap = kwargs.pop('vgap', 0)
PHILDefPanel.__init__(self, parent=parent, phil_object=phil_object,
vgap=vgap, *args, **kwargs)
self.ctr = ValidatedNumberCtrl(self,
as_type=phil_object.type.phil_type,
min=phil_object.type.value_min,
max=phil_object.type.value_max,
value=value)
self.ctr.Validate() # Validate to make sure the input value is legit
if hasattr(phil_object.type, 'allow_none'):
self.SetOptional(optional=phil_object.type.allow_none)
self.ctrl_sizer.add_widget_and_label(self.ctr, label=label,
label_size=label_size, expand=True)
self.ctrl_sizer.add_growable(cols=[2])
def GetStringValue(self):
""" Extract value as a string """
return self.ctr.GetValue()
def SetValue(self, value):
value = self.ReformatValue(value, raise_error=False)
self.ctr.SetStringValue(value=value)
self.ctr.Validate()
def is_default(self):
default_value = str(self.default_value)
control_value = str(self.GetStringValue())
# convert to float (unless None, Auto) for accurate comparison
if default_value.isdigit():
default_value = float(default_value)
if control_value.isdigit():
control_value = float(control_value)
return control_value == default_value
class PHILMultiNumberCtrl(PHILDefPanel):
""" Control for the PHIL ints and floats (multiple numbers) types """
def __init__(self, parent, phil_object, label='', value=None,
label_size=wx.DefaultSize, *args, **kwargs):
vgap = kwargs.pop('vgap', 0)
PHILDefPanel.__init__(self, parent=parent, phil_object=phil_object,
vgap=vgap, *args, **kwargs)
# make control
self.ctr = ValidatedMultiNumberCtrl(
self,
as_type=phil_object.type.phil_type,
min=phil_object.type.value_min,
max=phil_object.type.value_max,
size_min=phil_object.type.size_min,
size_max=phil_object.type.size_max,
value=value
)
# Validate to make sure the input value is legit
self.ctr.Validate()
# place control in sizer
self.ctrl_sizer.add_widget_and_label(self.ctr, label=label,
label_size=label_size, expand=True)
self.ctrl_sizer.add_growable(cols=[2])
def GetStringValue(self):
""" Extract value as a string """
return self.ctr.GetValue()
def SetValue(self, value):
if isinstance(value, str):
value = value.split()
value = self.ReformatValue(value, raise_error=False)
self.ctr.SetStringValue(value=value)
self.ctr.Validate()
def is_default(self):
# Convert strings into lists for comparison
if isinstance(self.default_value, str):
default_values = self.default_value.split()
else:
default_values = self.default_value
control_values = self.GetStringValue().split()
# Iterate through values and compare (assume None and Auto values will be
# NoneType and AutoType objects, rather than strings)
if default_values is not None:
for dv in default_values:
idx = default_values.index(dv)
cv = str(control_values[idx])
dv = str(dv)
if dv.isdigit():
dv = float(dv)
if cv.isdigit():
cv = float(cv)
if dv != cv:
return False
return True
else:
if len(control_values) == 1:
control_values = control_values[0]
elif len(control_values) == 0:
control_values = None
return str(default_values) == str(control_values)
class PHILCheckBoxCtrl(PHILDefPanel):
""" Checkbox control for PHIL bool item (with tribool option as default;
PHIL style has to be set to noauto in order to make this a regular
bool) """
def __init__(self, parent, phil_object, label='', style=None,
label_size=wx.DefaultSize, value=False, *args, **kwargs):
""" Constructor """
vgap = kwargs.pop('vgap', 0)
PHILDefPanel.__init__(self, parent=parent, phil_object=phil_object,
label_size=label_size, vgap=vgap, *args, **kwargs)
self.style = style
self.scope_switch = self.style.scope_switch
if self.default_value is Auto:
self.ctr = wx.CheckBox(self, label=label,
style=wx.CHK_ALLOW_3RD_STATE_FOR_USER |
wx.CHK_3STATE)
else:
self.ctr = wx.CheckBox(self, label=label)
self.SetValue(value)
if label == '':
border = 0
else:
border = 5
self.ctrl_sizer.add_widget(widget=self.ctr, border=border)
self.Bind(wx.EVT_CHECKBOX, self.onChangeValue, self.ctr)
def GetStringValue(self):
""" Extract value as a string """
return str(self.GetValue())
def onChangeValue(self, e):
self.set_background()
if self.scope_switch:
self.parent.check_scope_switches()
def SetValue(self, value=None):
""" Set checkbox state, None is interpreted as Auto if either is allowed """
if value in (None, Auto):
assert (self.ctr.Is3State())
self.ctr.Set3StateValue(wx.CHK_UNDETERMINED)
else:
value = bool(value)
if self.ctr.Is3State():
if value:
self.ctr.Set3StateValue(wx.CHK_CHECKED)
else:
self.ctr.Set3StateValue(wx.CHK_UNCHECKED)
else:
self.ctr.SetValue(value)
def GetValue(self):
""" Set checkbox state """
if self.ctr.Is3State():
value = self.ctr.Get3StateValue()
if value == wx.CHK_UNDETERMINED:
return Auto
else:
return value == wx.CHK_CHECKED
else:
return self.ctr.GetValue()
class WidgetFactory(object):
''' Class that will automatically make widgets for automated dialog making '''
widget_types = {
'path' : PHILPathCtrl ,
'str' : PHILStringCtrl ,
'strings' : PHILMultiStringCtrl ,
'choice' : PHILChoiceCtrl ,
'number' : PHILNumberCtrl ,
'numbers' : PHILMultiNumberCtrl ,
'bool' : PHILCheckBoxCtrl ,
'tribool' : PHILTriBoolCtrl ,
'space_group' : PHILSpaceGroupCtrl ,
'unit_cell' : PHILUnitCellCtrl
}
def __init__(self):
pass
@staticmethod
def make_widget(parent,
phil_object,
label_size=wx.DefaultSize,
widget_types=widget_types,
*args, **kwargs):
style = kwargs.pop('style', None)
value = kwargs.pop('value', None)
label = kwargs.pop('label', None)
wtype = phil_object.type.phil_type
if label is None:
alias = phil_object.alias_path()
if alias:
label = alias
else:
label = phil_object.full_path().split('.')[-1]
label = label.replace('_', ' ').capitalize()
if wtype == 'bool':
label_size = wx.DefaultSize
else:
label += ": "
if wtype in ('int', 'float'):
wtype = 'number'
elif wtype in ('ints', 'floats'):
wtype = 'numbers'
elif style.tribool or (wtype == 'bool' and value is None):
wtype = 'tribool'
if wtype == 'path' and (style and style.input_list):
widget = PHILFileListCtrl(parent=parent,
phil_object=phil_object,
value=value,
*args, **kwargs)
else:
if wtype in widget_types:
widget_ctrl = widget_types[wtype]
else:
widget_ctrl = PHILStringCtrl
widget = widget_ctrl(parent=parent,
phil_object=phil_object,
label=label,
label_size=label_size,
value=value,
style=style,
*args, **kwargs)
return widget
# ----------------------------- PHIL Controls -------------------------------- #
class PHILDialogPanel(PHILBaseDialogPanel):
""" Panel automatically created from PHIL settings """
_str_extras = {}
_path_extras = {}
_number_extras = {}
_unit_cell_extras = {}
_space_group_extras = {}
_choice_extras = {}
_checkbox_extras = {}
def __init__(self, parent, scope, *args, **kwargs):
str_extras = kwargs.pop('str_extras', None)
if str_extras:
self._str_extras.update(str_extras)
path_extras = kwargs.pop('path_extras', None)
if path_extras:
self._path_extras.update(path_extras)
number_extras = kwargs.pop('number_extras', None)
if number_extras:
self._number_extras.update(number_extras)
unit_cell_extras = kwargs.pop('unit_cell_extras', None)
if unit_cell_extras:
self._unit_cell_extras.update(unit_cell_extras)
space_group_extras = kwargs.pop('space_group_extras', None)
if space_group_extras:
self._space_group_extras.update(space_group_extras)
choice_extras = kwargs.pop('choice_extras', None)
if choice_extras:
self._choice_extras.update(choice_extras)
checkbox_extras = kwargs.pop('checkbox_extras', None)
if checkbox_extras:
self._checkbox_extras.update(checkbox_extras)
super(PHILDialogPanel, self).__init__(parent, scope, *args, **kwargs)
self.scope = scope
if not self.phil_index:
if hasattr(self.window, 'phil_index'):
self.phil_index = self.window.phil_index
else:
raise Sorry('IOTA PHIL ERROR: PHILIndex not found!')
# Recurse through scope and create controls and widgets
self.construct_panel()
# Redraw the window to show/hide panels based on expert level
self.redraw_by_expert_level()
class PHILDialog(PHILBaseDialog):
""" Dialog auto-populated with PHIL settings """
def __init__(self, parent, scope, phil_index, name=None, title=None,
*args, **kwargs):
""" Constructor
:param parent: parent GUI element
:param name: name of the PHIL scope rendered by this dialog
:param scope: PHIL scope from which to build the dialog
"""
self.phil_index = phil_index
self.parent = parent
self.name = name
if not self.name:
self.name = parent.name
if not title:
title = self.name.replace('_', ' ').capitalize()
super(PHILDialog, self).__init__(parent, title=title, *args, **kwargs)
if not isinstance(scope, list) and scope.multiple:
self.scope = [scope]
else:
self.scope = scope
if isinstance(self.scope, list):
self._scope_paths = list(set([s.full_path() for s in self.scope]))
self.phil_panel = PHILMultiScopePanel(self,
scope=self.scope,
phil_index=self.phil_index)
else:
self._scope_paths = [self.scope.full_path()]
self.phil_panel = PHILDialogPanel(self,
scope=self.scope,
phil_index=self.phil_index)
self.phil_sizer.add_panel(self.phil_panel)
# Dialog control
self.dlg_ctr = ct.DialogButtonsCtrl(self, preset='PHIL_DIALOG')
self.envelope.Add(self.dlg_ctr,
flag=wx.EXPAND | wx.ALIGN_RIGHT | wx.ALL,
border=10)
# Set up size and scrolling (adjust size if auto-fit dialog is too big
# for screen)
self.Fit()
self.phil_panel.SetupScrolling()
self.size_and_place()
self.Layout()
# Bindings
self.Bind(wx.EVT_BUTTON, self.OnOkay, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=wx.ID_CANCEL)
self.Bind(wx.EVT_CHOICE, self.OnExpertLevel, self.dlg_ctr.choice)
def OnOkay (self, event):
""" Check for saved errors and pop a warning if any are found (user
cannot continue if any errors are present) """
all_errors = self.phil_panel.collect_errors()
if all_errors:
# Check for errors and pop up a message if any are present
wx.MessageBox(caption='Errors in Settings!',
message='Correct all errors to accept the changes.',
style=wx.OK|wx.ICON_EXCLAMATION)
else:
phil_string = self.phil_panel.GetPHIL(expand=True)
if phil_string:
self.phil_index.update_phil(phil_string=phil_string)
self.scope = self.phil_index.get_scopes(include=self._scope_paths)
self.EndModal(wx.ID_OK)
# -- end
|
__author__ = 'Tanim Islam'
__email__ = 'tanim.islam@gmail.com'
import sys, os, logging, datetime, numpy, pandas
from itertools import chain
_mainDir = os.path.dirname( os.path.abspath( __file__ ) )
#
## resources directory and covid-19 git submodule
resourceDir = os.path.join( _mainDir, 'resources' )
covid19ResDir = os.path.join( _mainDir, 'covid-19-data' )
#
## must both exist
assert(all(map(lambda dirname: os.path.isdir( dirname ), ( resourceDir, covid19ResDir ) ) ) )
from covid19_stats.engine import gis
class COVID19Database( object ):
"""
This class implements a `singleton pattern`_ with static access methods to US GIS_ data and COVID-19 cumulative summary case and death data, for territorial units within the United States. It lazily instantiates itself via GIS_ loading functionality.
The main data this `singleton <singleton pattern_>`_ object contains is the cumulative COVID-19 cumulative cases and deaths, for US counties, produced by the `NY Times COVID-19 database <https://github.com/nytimes/covid-19-data>`_.
This creates a custom `FIPS code`_, with number `00001`, by melding together the five New York City boroughs (`Manhattan <https://en.wikipedia.org/wiki/Manhattan>`_, `Brooklyn <https://en.wikipedia.org/wiki/Brooklyn>`_, `Queens <https://en.wikipedia.org/wiki/Queens>`_, `The Bronx <https://en.wikipedia.org/wiki/The_Bronx>`_, and `Staten Island <https://en.wikipedia.org/wiki/Staten_Island>`_). This makes the COVID-19 geographic data set consistent with the COVID-19 cumulative cases and deaths data sets of the `NY Times COVID-19 database <https://github.com/nytimes/covid-19-data>`_.
In addition to a :py:class:`dict` of MSA_ regions created or loaded by :py:meth:`create_and_store_msas_and_fips_2019 <covid19_stats.engine.gis.create_and_store_msas_and_fips_2019>`, this class also contains CONUS_ and state and territory regions dictionaries.
.. _`singleton pattern`: https://en.wikipedia.org/wiki/Singleton_pattern
.. _GIS: https://en.wikipedia.org/wiki/Geographic_information_system
.. _`FIPS code`: https://en.wikipedia.org/wiki/FIPS_county_code
.. _MSA: https://en.wikipedia.org/wiki/Metropolitan_statistical_area
.. _CONUS: https://en.wikipedia.org/wiki/Contiguous_United_States
"""
class __COVID19Database( object ):
def get_stat_line( self, line ):
line_split = list(map(lambda tok: tok.strip(), line.split(',')))
dstring = line_split[0]
county_name = line_split[1].strip( )
state_name = line_split[2].strip( )
fips = line_split[-3].strip( )
#
## NYC IS SPECIAL!!!
if county_name == 'New York City': fips = '00001'
if fips == '': return None
cases_cumulative = int( line_split[-2] )
try: death_cumulative = int( line_split[-1] )
except: death_cumulative = 0
return {
'date' : datetime.datetime.strptime(
dstring, '%Y-%m-%d' ).date( ),
'county' : county_name,
'state' : state_name,
'fips' : fips,
'cumulative cases' : cases_cumulative,
'cumulative death' : death_cumulative }
def create_nyc_custom_fips( self, bdict ):
"""
create a custom FIPS dataset for NYC alone, FIPS #00001
"""
from shapely.geometry import Polygon, MultiPolygon
from shapely.ops import unary_union
def _create_poly( shape ):
lngs = shape[:,0]
lats = shape[:,1]
p = Polygon( list(zip( lngs, lats ) ) )
return p
#
## these are the FIPS for the FIVE NYC BOROUGHS
#fips_five_boroughs = fips_missing_2019 & data_nyc['fips']
#
## first get a boundary dict: fips -> points
#bdict = get_boundary_dict( fips_five_boroughs )
#
## second, construct a list of all the Polygons corresponding to these boroughs
all_polys = list(map(_create_poly, chain.from_iterable( bdict.values( ) ) ) )
#
## third, make a collection of MultiPolygon from the unary_union of these Polygons
newpolys = unary_union( MultiPolygon( all_polys ) )
#
## fourth, get the new shapes, ordered by area from smallest to largest
newshapes = list(map(lambda poly: numpy.array( poly.exterior.coords.xy ).T, # take a Polygon, convert it into shape format we understand
sorted( newpolys.geoms, key = lambda poly: poly.area )[::-1] # sort by maximum to minimum area
) )
#
## fifth (and finally), return this new FIPS data structure: { 'bbox' : bbox, 'points' : list-of-shapes }
## FIPS # is 00001
bbox = gis.calculate_total_bbox( newshapes ) # bbox
geom_nyc = { 'bbox' : bbox, 'points' : newshapes }
return geom_nyc
def __init__( self ):
#
## all COVID-19 data
all_counties_nytimes_covid19_data = list(
filter(None,
map(self.get_stat_line,
list(
map(lambda line: line.strip(), filter(
lambda line: len( line.strip( ) ) != 0,
open( os.path.join( covid19ResDir, "us-counties.csv" ), "r" ).readlines())))[1:])))
self.all_counties_nytimes_covid19_data = pandas.DataFrame(
dict(map(lambda key: ( key, list(map(lambda entry: entry[key], all_counties_nytimes_covid19_data ) ) ),
{ 'date', 'county', 'state', 'fips', 'cumulative cases', 'cumulative death' } ) ) )
#
## FIPS data for county shapes 2018
self.fips_data_2019 = gis.create_and_store_fips_2019( )
#
## now population data for fips found from MSAs
self.fips_popdict_2019 = gis.create_fips_popmap_2019( )
#
## FIPS data for county adjacency 2018
self.fips_adj_2019 = gis.construct_adjacency( self.fips_data_2019 )
#
## CENSUS dictionary of FIPS to COUNTY/STATE
self.fips_countystate_dict, self.cs_fips_dict = gis.create_and_store_fips_counties_2019( )
self.data_msas_2019 = gis.create_and_store_msas_and_fips_2019( )
self.fips_msas_2019 = dict(chain.from_iterable(
map(lambda entry: map(lambda fips: ( fips, entry['prefix'] ), entry['fips']),
self.data_msas_2019.values( ) ) ) )
#
## these are the FIPS missing, highlighting NYC
## include FIPS = 00001 EXCEPT for fips_adj_2018
## SHOULD WE ALSO DELETE THE FIVE BOROUGHS FIPS??
def _get_boundary_dict( fips_collection ):
boundary_dict = dict(map(lambda fips: (
fips, self.fips_data_2019[ fips ][ 'points' ] ), fips_collection ) )
return boundary_dict
_fips_missing_2019 = set( self.fips_msas_2019 ) - set( self.all_counties_nytimes_covid19_data.fips )
# map(lambda entry: entry['fips'], all_counties_nytimes_covid19_data ) )
_fips_five_boroughs = _fips_missing_2019 & self.data_msas_2019['nyc']['fips']
#
nyc_fips = '00001'
self.fips_data_2019[ nyc_fips ] = self.create_nyc_custom_fips(
_get_boundary_dict( _fips_five_boroughs ) )
## DELETE
for fips in _fips_five_boroughs: self.fips_data_2019.pop( fips )
#
self.fips_countystate_dict[ nyc_fips ] = { 'county' : 'New York City', 'state' : 'New York' }
#
self.cs_fips_dict[ ( 'New York City', 'New York' ) ] = nyc_fips
## DELETE FIRST THEN SECOND
for cs_found in map(lambda fips: self.fips_countystate_dict[ fips ], _fips_five_boroughs ):
tup = ( cs_found[ 'county' ], cs_found[ 'state' ] )
self.cs_fips_dict.pop( tup )
for fips in _fips_five_boroughs: self.fips_countystate_dict.pop( fips )
#
## AND DELETE??
oldfips = self.data_msas_2019[ 'nyc' ][ 'fips' ].copy( )
self.data_msas_2019[ 'nyc' ][ 'fips' ] = set(list( oldfips ) + [ nyc_fips ] ) - _fips_five_boroughs
#
self.fips_msas_2019[ nyc_fips ] = 'nyc'
## DELETE
for fips in _fips_five_boroughs: self.fips_msas_2019.pop( fips )
#
## now do the same thing for the five boroughs
## remove data for 5 boroughs, replace with fake NYC FIPS
_fips_popdict_remove = set( _fips_five_boroughs ) & set( self.fips_popdict_2019 )
logging.debug( 'REMOVING THESE FIPS: %s.' % _fips_popdict_remove )
_pop_five_boroughs = sum(map(lambda fips: self.fips_popdict_2019[ fips ],
_fips_popdict_remove ) )
for fips in _fips_popdict_remove:
if fips in self.fips_popdict_2019: self.fips_popdict_2019.pop( fips )
self.fips_popdict_2019[ nyc_fips ] = _pop_five_boroughs
#
## now data by states and by CONUS (continental US)
## will refactor so that later on it will live in engine.gis
## however, because right now because of NYC definition,
## and violence done to LOTS of GIS data, move it here AFTER violence
_conus_states = set( map(lambda elem: elem['state'], self.fips_countystate_dict.values( ) ) ) - set([
'Alaska', 'Hawaii', 'Puerto Rico' ] )
self.data_conus = {
'RNAME' : 'CONUS',
'region name' : 'CONUS',
'prefix' : 'conus',
'fips' : list(filter(lambda fips: self.fips_countystate_dict[ fips ][ 'state' ] in
_conus_states, self.fips_countystate_dict)) }
self.data_conus['population'] = sum(list(map(
lambda fips: self.fips_popdict_2019[fips],
set( self.fips_popdict_2019 ) & set( self.data_conus['fips'] ) ) ) )
#
## now do data for all states
self.data_states = dict(map(lambda state: (
'_'.join( state.lower( ).split()), {
'RNAME' : state,
'region name' : state,
'prefix' : '_'.join( state.lower().split()),
'fips' : list(filter(lambda fips: self.fips_countystate_dict[ fips ][ 'state' ] == state,
self.fips_countystate_dict)) } ), _conus_states ) )
for prefix in sorted( self.data_states ):
self.data_states[ prefix ][ 'population' ] = sum(list(map(
lambda fips: self.fips_popdict_2019[ fips ],
set( self.fips_popdict_2019 ) & set( self.data_states[ prefix ][ 'fips' ] ) ) ) )
self.mapping_state_rname_conus = dict(map(lambda rname: (
self.data_states[ rname ][ 'region name' ], rname ), self.data_states ) )
#
## data for non-CONUS states and territories
self.data_nonconus_states_territories = dict(
map(lambda state:
( '_'.join( state.lower( ).split()), {
'RNAME' : state,
'region name' : state,
'prefix' : '_'.join( state.lower().split()),
'fips' : list(filter(lambda fips: self.fips_countystate_dict[ fips ][ 'state' ] == state,
self.fips_countystate_dict)) } ),
( 'Alaska', 'Hawaii', 'Puerto Rico' ) ) )
for prefix in sorted( self.data_nonconus_states_territories ):
self.data_nonconus_states_territories[ prefix ][ 'population' ] = sum(
list(map(lambda fips: self.fips_popdict_2019[ fips ],
set( self.fips_popdict_2019 ) &
set( self.data_nonconus_states_territories[ prefix ][ 'fips' ] ) ) ) )
self.mapping_state_rname_nonconus = dict(
map(lambda rname: ( self.data_nonconus_states_territories[ rname ][ 'region name' ], rname ),
self.data_nonconus_states_territories ) )
#
## now modify the fips_popdict_2019. Any county NOT in there has a population of zero!
remaining_fips = set(self.fips_countystate_dict) - set(self.fips_popdict_2019)
for fips in remaining_fips:
self.fips_popdict_2019[ fips ] = 0
#
## hidden singleton instance
__instance = None
@classmethod
def _getInstance( cls ):
if COVID19Database.__instance == None:
COVID19Database.__instance = COVID19Database.__COVID19Database( )
return COVID19Database.__instance
@classmethod
def fips_data_2019( cls ):
"""
:returns: the :py:class:`dict` of county geographical information. It returns the *same* data structure as what :py:meth:`create_and_store_fips_2019 <covid19_stats.engine.gis.create_and_store_fips_2019>` returns.
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.fips_data_2019
@classmethod
def fips_popdict_2019( cls ):
"""
:returns: the :py:class:`dict` of county population data. It returns the *same* data structure as what :py:meth:`create_and_store_fips_2019 <covid19_stats.engine.gis.create_fips_popmap_2019>` returns.
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.fips_popdict_2019
@classmethod
def fips_dataframe_2019( cls ):
"""
Returns a :py:class:`DataFrame <pandas.DataFrame>` with following columns: FIPS, county name, state or territory, and population. Data comes from :py:class:`dict` vomited out by :py:meth:`fips_popdict_2019 <covid19_stats.COVID19Database.fips_popdict_2019>` and :py:meth:`fips_countystate_dict <<covid19_stats.COVID19Database.fips_countystate_dict>`.
"""
inst = COVID19Database._getInstance( )
fips_in_order = sorted( inst.fips_countystate_dict )
county_in_order = list(map(lambda fips: inst.fips_countystate_dict[ fips ][ 'county' ], fips_in_order ) )
state_in_order = list(map(lambda fips: inst.fips_countystate_dict[ fips ][ 'state' ], fips_in_order ) )
pop_in_order = numpy.array(list(map(lambda fips: inst.fips_popdict_2019[ fips ], fips_in_order ) ), dtype=int )
return pandas.DataFrame({
'fips' : fips_in_order, 'county' : county_in_order, 'state_or_territory' : state_in_order, 'population' : pop_in_order })
@classmethod
def fips_adj_2018( cls ):
"""
:returns: the :py:class:`dict` of adjacency information for US counties and territories. It returns the *same* data structure as what :py:meth:`construct_adjacency <covid19_stats.engine.gis.construct_adjacency>` returns.
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.fips_adj_2018
@classmethod
def fips_countystate_dict( cls ):
"""
:returns: the :py:class:`dict` of county `FIPS code`_ to a :py:class:`dict` of ``county`` and ``state``. It returns one of the :py:class:`dict`\ s (mapping of county `FIPS code`_ to county and state name) that :py:meth:`create_and_store_fips_counties_2019 <covid19_stats.engine.gis.create_and_store_fips_counties_2019>` returns.
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.fips_countystate_dict
@classmethod
def fips_msas_2019( cls ):
"""
:returns: the :py:class:`dict` of county `FIPS code`_ to the MSA_, identified by its prefix (for example, ``nyc`` is the New York City metropolitan area). *Implictly*, this :py:class:`dict` only contains the counties that lie within an MSA_.
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.fips_msas_2019
@classmethod
def data_msas_2019( cls ):
"""
:returns: the :py:class:`dict` of MSA_ region information. It returns the *same* data structure as what :py:meth:`create_and_store_msas_and_fips_2019 <covid19_stats.engine.gis.create_and_store_msas_and_fips_2019>` returns.
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.data_msas_2019
@classmethod
def all_counties_nytimes_covid19_data( cls ):
"""
:returns: a :py:class:`DataFrame <pandas.DataFrame>` of the big shebang, the *reason behind the reason*, for the whole data set of COVID-19 cumulative cases and deaths. *It is unordered*. Here are the keys in this :py:class:`DataFrame <pandas.DataFrame>`: ``date`` (type :py:class:`date <datetime.date>`), ``county`` (of type :py:class:`string <str>`), ``state`` (of type :py:class:`string <str>`), ``fips`` (the FIPS code of type :py:class:`string <str>`), ``cumulative cases`` (of type :py:class:`int`), and ``cumulative death`` (of type :py:class:`int`).
As of 25 February 2021, there are almost :math:`10^6` records in this :py:class:`list`.
:rtype: :py:class:`DataFrame <pandas.DataFrame>`
"""
inst = COVID19Database._getInstance( )
return inst.all_counties_nytimes_covid19_data
@classmethod
def data_conus( cls ):
"""
:returns: the same type of region data structure for a specific MSA_. Easier to show rather than describe in words this :py:class:`dict`.
.. _conus_example_data:
.. code-block:: python
{'RNAME': 'CONUS',
'region name': 'CONUS',
'prefix': 'conus',
'fips': ['48059',
'48253',
'48441',
'39133',
'39153',
'13095',
'13177',
'13273',
'13321',
'41043',
'36001',
'36083',
'36091',
'36093',
...],
'population': 308126624}
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.data_conus
@classmethod
def mapping_state_rname_conus( cls ):
"""
:returns: a :py:class:`dict` of state names, for states in the CONUS_, to the region identifying name. Here is what it returns,
.. code-block:: python
{'New Mexico': 'new_mexico',
'Minnesota': 'minnesota',
'Maryland': 'maryland',
'Tennessee': 'tennessee',
'Oregon': 'oregon',
'New Hampshire': 'new_hampshire',
'Ohio': 'ohio',
'Maine': 'maine',
'Utah': 'utah',
'Alabama': 'alabama',
'Michigan': 'michigan',
'Iowa': 'iowa',
'New York': 'new_york',
'South Carolina': 'south_carolina',
'Nebraska': 'nebraska',
'Vermont': 'vermont',
'Arizona': 'arizona',
'California': 'california',
'Virginia': 'virginia',
'North Dakota': 'north_dakota',
'Kansas': 'kansas',
'District of Columbia': 'district_of_columbia',
'North Carolina': 'north_carolina',
'Delaware': 'delaware',
'Massachusetts': 'massachusetts',
'Oklahoma': 'oklahoma',
'Florida': 'florida',
'Montana': 'montana',
'Idaho': 'idaho',
'Pennsylvania': 'pennsylvania',
'Texas': 'texas',
'Illinois': 'illinois',
'Kentucky': 'kentucky',
'Mississippi': 'mississippi',
'Wyoming': 'wyoming',
'Colorado': 'colorado',
'Arkansas': 'arkansas',
'Indiana': 'indiana',
'Nevada': 'nevada',
'Georgia': 'georgia',
'New Jersey': 'new_jersey',
'Connecticut': 'connecticut',
'West Virginia': 'west_virginia',
'Louisiana': 'louisiana',
'Rhode Island': 'rhode_island',
'Wisconsin': 'wisconsin',
'Missouri': 'missouri',
'Washington': 'washington',
'South Dakota': 'south_dakota'}
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.mapping_state_rname_conus
@classmethod
def mapping_state_rname_nonconus( cls ):
"""
:returns: a :py:class:`dict` of state names, for states and territories outside the CONUS_, to the region identifying name. Here is what it returns,
.. code-block:: python
{'Alaska': 'alaska', 'Hawaii': 'hawaii', 'Puerto Rico': 'puerto_rico'}
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.mapping_state_rname_nonconus
@classmethod
def data_states( cls ):
"""
:returns: the :py:class:`dict` of US state information, for states in the CONUS_. It returns the *same* type of data structure as what :py:meth:`create_and_store_msas_and_fips_2019 <covid19_stats.engine.gis.create_and_store_msas_and_fips_2019>` returns. But better show-than-tell, here is the data for the state of `Rhode Island <https://en.wikipedia.org/wiki/Rhode_Island>`_.
.. _rhode_island_state_example_data:
.. code-block:: python
{'rhode_island': {'RNAME': 'Rhode Island',
'region name': 'Rhode Island',
'prefix': 'rhode_island',
'fips': ['44001', '44003', '44005', '44007', '44009'],
'population': 1059361}}
The identifying key is the lowercase, no-spaced version of the state's name. Look at the values of the :py:class:`dict` that :py:meth:`mapping_state_rname_conus <covid19_stats.COVID19Database.mapping_state_rname_conus>` returns.
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.data_states
@classmethod
def data_nonconus_states_territories( cls ):
"""
:returns: the :py:class:`dict` of US state and territory information, for states and territories *not* in the CONUS_. It returns the *same* type of data structure as what :py:meth:`create_and_store_msas_and_fips_2019 <covid19_stats.engine.gis.create_and_store_msas_and_fips_2019>` returns. But better show-than-tell, here is the data for the state of `Hawaii <https://en.wikipedia.org/wiki/Hawaii>`_.
.. code-block:: python
{'hawaii': {'RNAME': 'Hawaii',
'region name': 'Hawaii',
'prefix': 'hawaii',
'fips': ['15009', '15003', '15001', '15007', '15005'],
'population': 1415786}}
The identifying key is the lowercase, no-spaced version of the state's name. Look at the values of the :py:class:`dict` that :py:meth:`mapping_state_rname_nonconus <covid19_stats.COVID19Database.mapping_state_rname_nonconus>` returns.
:rtype: dict
"""
inst = COVID19Database._getInstance( )
return inst.data_nonconus_states_territories
|
from benchlingapi.models import schema
def test_field_schema():
s = schema.FieldSchema()
d = s.load({"isMulti": True, "textValue": None, "type": "dropdown", "value": []})
print(s.dump(d))
|
from copy import deepcopy
import os
os.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"
from optimize import optimize
opt_args = {}
opt_args['graph_ga'] = dict(
smi_file='./data/guacamol_v1_train.smiles',
population_size=100,
offspring_size=200,
generations=150,
mutation_rate=0.01,
n_jobs=-1,
patience=150,
canonicalize=False)
opt_args['lstm_hc'] = dict(
pretrained_model_path='./guacamol_baselines/smiles_lstm_hc/pretrained_model/model_final_0.473.pt',
n_epochs=151,
mols_to_sample=1028,
keep_top=512,
optimize_n_epochs=1,
max_len=100,
optimize_batch_size=64,
number_final_samples=1028,
sample_final_model_only=False,
smi_file='./data/guacamol_v1_train.smiles',
n_jobs=-1,
canonicalize=False)
opt_args['mso'] = dict(
smi_file='./data/guacamol_v1_valid.smiles',
num_part=200,
num_iter=150)
# Set everything that varies in the loop to None
base_config = dict(
chid=None,
n_estimators=100,
n_jobs=8,
external_file='./data/guacamol_v1_test.smiles',
n_external=3000,
seed=None,
opt_name=None,
optimizer_args=None)
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from parametersearch import ParameterSearch
import os
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--host", type=str, help='host address', default="localhost")
parser.add_argument("--port", type=int, help='host port', default="7532")
parser.add_argument("--server", help="run as client process", action="store_true")
parser.add_argument("--work", help="run as client process", action="store_true")
parser.add_argument("--nruns", type=int, help='How many runs to perform per task', default=3)
parser.add_argument("--random_seed_0", type=int, help='Random seed to use when splitting in train/test', default=0)
parser.add_argument("--random_seed_1", type=int, help='Random seed to use when splitting in split1/split2', default=0)
parser.add_argument("--min_samples_leaf", type=int, help='min_samples_leaf parameter for the RF', default=1)
parser.add_argument("--max_depth", type=int, help='max_depth parameter for the RF', default=None)
parser.add_argument("--random_start", action="store_true")
parser.add_argument("--return_training_set", action="store_true")
parser.add_argument("--use_max_score", action="store_true")
parser.add_argument("--log_base", help='', default='results/test')
parser.add_argument("--chids_set", help='', default='classic')
parser.add_argument("--n_estimators", type=int, help='number of trees to use in the RF', default=100)
args = parser.parse_args()
for opt_name in ['mso', 'graph_ga', 'lstm_hc']:
optimizer_args = opt_args[opt_name]
if args.random_start and opt_name in ['graph_ga', 'lstm_hc']:
optimizer_args['random_start'] = True
if args.chids_set == 'classic':
chids = ['CHEMBL1909203', 'CHEMBL1909140', 'CHEMBL3888429']
else:
chids = ['CHEMBL3888429', 'ALDH1']
for chid in chids:
for i in range(0, args.nruns):
config = deepcopy(base_config)
config['chid'] = chid
config['seed'] = i
config['opt_name'] = opt_name
config['optimizer_args'] = optimizer_args
config['random_seed_0'] = args.random_seed_0
config['random_seed_1'] = args.random_seed_1
config['min_samples_leaf'] = args.min_samples_leaf
config['max_depth'] = args.max_depth
config['random_start'] = args.random_start
config['return_training_set'] = args.return_training_set
config['use_max_score'] = args.use_max_score
config['log_base'] = args.log_base
config['n_estimators'] = args.n_estimators
print(f'Run {i+1}/{args.nruns}, {opt_name}, {chid}')
optimize(**config)
|
import os
from . import tmp_uuid_and_uri # NOQA
from . import TEST_SAMPLE_DATA
from . import tmp_env_var
def test_http_manifest(tmp_uuid_and_uri): # NOQA
uuid, dest_uri = tmp_uuid_and_uri
from dtoolcore import ProtoDataSet, generate_admin_metadata
from dtoolcore import DataSet
name = "my_dataset"
admin_metadata = generate_admin_metadata(name)
admin_metadata["uuid"] = uuid
sample_data_path = os.path.join(TEST_SAMPLE_DATA)
local_file_path = os.path.join(sample_data_path, 'tiny.png')
# Create a minimal dataset
proto_dataset = ProtoDataSet(
uri=dest_uri,
admin_metadata=admin_metadata,
config_path=None)
proto_dataset.create()
proto_dataset.put_item(local_file_path, 'tiny.png')
proto_dataset.put_readme("---\nproject: testing\n")
proto_dataset.freeze()
dataset = DataSet.from_uri(dest_uri)
# Test HTTP manifest.
http_manifest = dataset._storage_broker._generate_http_manifest(expiry=None) # NOQA
assert "admin_metadata" in http_manifest
assert http_manifest["admin_metadata"] == dataset._admin_metadata
assert "overlays" in http_manifest
assert "readme_url" in http_manifest
assert "manifest_url" in http_manifest
assert "item_urls" in http_manifest
assert "annotations" in http_manifest
assert "tags" in http_manifest
assert set(http_manifest["item_urls"].keys()) == set(dataset.identifiers)
def test_http_enable(tmp_uuid_and_uri): # NOQA
uuid, dest_uri = tmp_uuid_and_uri
from dtoolcore import ProtoDataSet, generate_admin_metadata
from dtoolcore import DataSet
name = "my_dataset"
admin_metadata = generate_admin_metadata(name)
admin_metadata["uuid"] = uuid
sample_data_path = os.path.join(TEST_SAMPLE_DATA)
local_file_path = os.path.join(sample_data_path, 'tiny.png')
# Create a minimal dataset
proto_dataset = ProtoDataSet(
uri=dest_uri,
admin_metadata=admin_metadata,
config_path=None)
proto_dataset.create()
proto_dataset.put_item(local_file_path, 'tiny.png')
proto_dataset.put_readme("---\nproject: testing\n")
proto_dataset.freeze()
dataset = DataSet.from_uri(dest_uri)
# Add an annotation.
dataset.put_annotation("project", "dtool-testing")
# Add tags.
dataset.put_tag("amazing")
dataset.put_tag("stuff")
access_url = dataset._storage_broker.http_enable()
assert access_url.find("?") == -1 # This is not a presigned URL dataset.
assert access_url.startswith("https://")
dataset_from_http = DataSet.from_uri(access_url)
# Assert that the annotation has been copied across.
assert dataset_from_http.get_annotation("project") == "dtool-testing"
# Asser that the tags are available.
assert dataset_from_http.list_tags() == ["amazing", "stuff"]
from dtoolcore.compare import (
diff_identifiers,
diff_sizes,
diff_content
)
assert len(diff_identifiers(dataset, dataset_from_http)) == 0
assert len(diff_sizes(dataset, dataset_from_http)) == 0
assert len(diff_content(dataset_from_http, dataset)) == 0
# Make sure that none of the URLs in the manifest are presigned.
http_manifest = dataset_from_http._storage_broker.http_manifest
assert http_manifest["manifest_url"].find("?") == -1
assert http_manifest["readme_url"].find("?") == -1
for url in http_manifest["item_urls"].values():
assert url.find("?") == -1
for url in http_manifest["annotations"].values():
assert url.find("?") == -1
def test_http_enable_with_presigned_url(tmp_uuid_and_uri): # NOQA
uuid, dest_uri = tmp_uuid_and_uri
from dtoolcore import ProtoDataSet, generate_admin_metadata
from dtoolcore import DataSet
name = "my_dataset"
admin_metadata = generate_admin_metadata(name)
admin_metadata["uuid"] = uuid
sample_data_path = os.path.join(TEST_SAMPLE_DATA)
local_file_path = os.path.join(sample_data_path, 'tiny.png')
# Create a minimal dataset
proto_dataset = ProtoDataSet(
uri=dest_uri,
admin_metadata=admin_metadata,
config_path=None)
proto_dataset.create()
proto_dataset.put_item(local_file_path, 'tiny.png')
proto_dataset.put_readme("---\nproject: testing\n")
proto_dataset.freeze()
dataset = DataSet.from_uri(dest_uri)
# Add an annotation.
dataset.put_annotation("project", "dtool-testing")
# Add tags.
dataset.put_tag("amazing")
dataset.put_tag("stuff")
with tmp_env_var("DTOOL_S3_PUBLISH_EXPIRY", "120"):
access_url = dataset._storage_broker.http_enable()
assert access_url.find("?") != -1 # This is a presigned URL dataset.
assert access_url.startswith("https://")
dataset_from_http = DataSet.from_uri(access_url)
# Assert that the annotation has been copied across.
assert dataset_from_http.get_annotation("project") == "dtool-testing"
# Asser that the tags are available.
assert dataset_from_http.list_tags() == ["amazing", "stuff"]
from dtoolcore.compare import (
diff_identifiers,
diff_sizes,
diff_content
)
assert len(diff_identifiers(dataset, dataset_from_http)) == 0
assert len(diff_sizes(dataset, dataset_from_http)) == 0
assert len(diff_content(dataset_from_http, dataset)) == 0
# Make sure that all the URLs in the manifest are presigned.
http_manifest = dataset_from_http._storage_broker.http_manifest
assert http_manifest["manifest_url"].find("?") != -1
assert http_manifest["readme_url"].find("?") != -1
for url in http_manifest["item_urls"].values():
assert url.find("?") != -1
for url in http_manifest["annotations"].values():
assert url.find("?") != -1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractBaseUser
from django.db import models
# Create your models here.
class CWUser(models.Model):
cw_username = models.CharField(max_length=100)
cw_url = models.CharField(max_length=100)
def __str__(self):
return self.cw_username
@property
def last_task(self):
if self.tasks.first():
return self.tasks.latest('created').name
else:
return []
class Tasks(models.Model):
name = models.CharField(max_length=100)
user = models.ForeignKey('users.CWUser', null=True, blank=True, related_name='tasks', on_delete=models.SET_NULL)
created = models.DateTimeField(auto_created=True, auto_now_add=True)
def __str__(self):
return self.name
|
"""
C code printer
"""
from str import StrPrinter
from sympy.printing.precedence import precedence, PRECEDENCE
from sympy.core.basic import S
class CCodePrinter(StrPrinter):
"""A printer to convert python expressions to stings of c code"""
printmethod = "_ccode_"
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp is S.NegativeOne:
return '1/%s'%(self.parenthesize(expr.base, PREC))
else:
return 'pow(%s,%s)'%(self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Exp1(self, expr):
return "exp(1)"
def _print_Piecewise(self, expr):
ecpairs = ["(%s) {\n%s\n}\n" % (self._print(c), self._print(e)) \
for e, c in expr.args[:-1]]
last_line = ""
if expr.args[-1].cond is S.One:
last_line = "else {\n%s\n}" % self._print(expr.args[-1].expr)
else:
ecpairs.append("(%s) {\n%s\n" % \
(self._print(expr.args[-1].cond),
self._print(expr.args[-1].expr)))
code = "if %s" + last_line
return code % "else if ".join(ecpairs)
def ccode(expr):
r"""Converts an expr to a string of c code
Works for simple expressions using math.h functions.
>>> from sympy import *
>>> from sympy.abc import *
>>> ccode((2*tau)**Rational(7,2))
'8*pow(2,(1/2))*pow(tau,(7/2))'
"""
return CCodePrinter().doprint(expr)
def print_ccode(expr):
"""Prints C representation of the given expression."""
print ccode(expr)
|
# Copyright (c) 2021, Summayya and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.utils import flt
import json
from ..gl_entry.gl_entry import create_gl_entry
class SalesInvoice(Document):
def validate(self):
self.set_status()
self.set_total_amount()
def set_total_amount(self):
self.amount = 0
self.amount = sum(flt(item.amount, 3) for item in self.item)
self.total_quantity = 0
self.total_quantity = sum(item.quantity for item in self.item)
def set_status(self):
'''
Draft: 0
Submitted: 1, Paid or Unpaid or Overdue
Cancelled: 2
'''
if self.is_new():
if self.get('amended_form'):
self.status = 'Draft'
return
if self.docstatus == 1:
self.status = 'Unpaid'
def on_submit(self):
create_gl_entry(self, 'Sales Invoice',
self.credit_account, self.debit_account)
def on_cancel(self):
create_gl_entry(self, 'Sales Invoice',
self.debit_account, self.credit_account)
@frappe.whitelist(allow_guest=True)
def generate_sales_invoice(data, company):
data = json.loads(data)
items = []
for item in data:
items.append(frappe._dict(
{"item_name": item['name'], "quantity": item['quantity'], "rate": item['price'], "amount": item['total_price']}
))
doc = frappe.get_doc({
'doctype': 'Sales Invoice',
'customer': 'Ecom customer',
'company': company,
'item': items,
'debit_account': 'Debitors - GE',
'credit_account': 'Sales - GE'
})
doc.submit()
return doc.name
|
"""empty message
Revision ID: cfb3cf41a07a
Revises: c070976dc8fb
Create Date: 2016-08-30 16:21:04.035292
"""
# revision identifiers, used by Alembic.
revision = 'cfb3cf41a07a'
down_revision = 'c070976dc8fb'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
# for any users without an email, bring over the username if avail
conn = op.get_bind()
result = conn.execute(text("""SELECT id, username FROM users WHERE email is
null AND username != 'Anonymous'"""))
results = result.fetchall()
for r in results:
conn.execute(text("""UPDATE users SET email=:username WHERE id=:id"""),
username=r[1], id=r[0])
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'username')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('username', sa.VARCHAR(
length=64), autoincrement=False, nullable=True))
### end Alembic commands ###
|
import pandas as pd
import numpy as np
import math
import matplotlib
matplotlib.use('TkAgg')
from matplotlib_venn import venn2, venn3
import mca
import matplotlib.pyplot as plt
from collections import defaultdict
import json
from adjustText import adjust_text as AT
class compare:
"""
Compare -- plot collections for comparison purposes.
Description:
Compare is a set of tools intended for comparing collections of Web Archives based
on derivative sets created by Warcbase (https://github.com/lintool/warcbase).
Args:
@collectionset (list): A list of lists or a dict() of size 2 or greater for comparison
purposes.
@names (list): An optional list of names for the collections. Must be equal in size to
collections. If collections is a dict, this parameter will be overwritten.
@exclude (list): A list of collection names or index keys to exclude from the analysis.
@REMOVE_SINGLES (Bool): (Default:True) For 4 collections or more, remove from the analysis
any data points that are
members of only one collection. This reduces the chance that a disproportionately
large collection
will be seen as an outlier merely because it is disproportionately large.
Example:
$ data = [["happy", "excited", "content", "nostalgic"],
["sad", "unhappy", "nostalgic", "melancholic"],
["reminiscing", "remembering", "nostalgic"],
["happy", "get lucky", "money maker"],
["get lucky", "money maker"],
["excited", "love actually", "get lucky"]]
$ names = ["happy words", "sad words", "memory words", "pharrel williams songs", "dancehall slang", "rom coms"]
$ comp = compare.Compare(data, names)
$ comp.plot_ca()
"""
def __init__ (self, collectionset, names=[], exclude=[], REMOVE_SINGLES=True):
self.collection_names = names
self.exclude = exclude
self.collectionset = collectionset
self.REMOVE_SINGLES = REMOVE_SINGLES
self.DIMS = 2
self.LABEL_BOTH_FACTORS = False
self.adjust = False
self.dimensions = None
self.counts = None
self.result = {}
self.clabels = []
self.rlabels = []
self.plabels = []
if isinstance(self.collectionset, dict):
self.collection_names = [x.strip() for x in self.collectionset.keys()]
self.collectionset = [x.strip() for x in self.collectionset.values()]
if type([y[0] for y in self.collectionset][0]) is tuple: #will need to include checks for size of sample
print ("yay mca")
self.collection_names = list(set([x[0] for y in self.collectionset for x in y]))
if self.index:
self.collectionset = self.sublist(self.collectionset, self.index)
self.collection_names = self.sublist(self.collection_names, self.index)
self.mca(self.collectionset, self.collection_names)
else:
#self.collectionset = dict([(x[0], x[1]) for y in self.collectionset for x in y])
if not self.collection_names:
self.collection_names = range(1, len(self.collectionset)+1)
# if index var is provided, use index to filter collection list
if self.exclude:
self.collectionset = self.sublist(self.collectionset, self.index)
self.collection_names = self.sublist(self.collection_names, self.index)
#two sample venn
if len(self.collectionset) == 2:
self.response = self.two_venn(self.collectionset)
#three sample venn
elif len(self.collectionset) == 3:
self.response = self.three_venn(self.collectionset)
#use mca for greater than three
elif len(self.collectionset) >3:
self.ca = self.ca(self.collectionset, self.collection_names)
else:
self.no_compare()
def excluded (self):
if all(isinstance(item, int) for item in self.exclude):
self.collectionset = self.sublist(self.collectionset, self.exclude)
self.collection_names = self.sublist(self.collection_names, self.exclude)
else:
self.collection, self.collection_names = PC.process_collection(self.collection, self.collection_names, self.exclude)
def handle_json (self, input):
pass
def examine_input (self, input):
if isinstance(self.collectionset, dict): #passed a plain dictionary
self.collection_names = [x for x in self.collection.keys()]
self.collectionset=[x for x in self.collection.values()]
def recur_len(self, L):
return sum(L + recur_len(item) if isinstance(item, list) else L for item in L)
def no_compare(self):
return ("Need at least two collectionset to compare results.")
#get a sublist from a list of indices
def sublist (self, list1, list2):
return([list1[x] for x in list2])
def two_venn (self, collectionset):
""" Return a two-way venn diagram of two sets """
self.V2_AB = set(collectionset[0]).intersection(set(collectionset[1]))
return (venn2([set(x) for x in collectionset], set_labels=self.collection_names))
def three_venn (self, collectionset):
""" Return a three-way venn diagram of three sets """
self.V3_ABC = set(collectionset[0]) & set(collectionset[1]) & set(collectionset[2])
self.V3_AB = set(collectionset[0]) & set(collectionset[1]) - self.V3_ABC
self.V3_BC = set(collectionset[1]) & set(collectionset[2]) - self.V3_ABC
self.V3_AC = set(collectionset[0]) & set(collectionset[2]) - self.V3_ABC
self.V3_A = set(collectionset[0]) - (self.V3_ABC | self.V3_AB | self.V3_AC )
self.V3_B = set(collectionset[1]) - (self.V3_ABC | self.V3_AB | self.V3_BC )
self.V3_C = set(collectionset[2]) - (self.V3_ABC | self.V3_BC | self.V3_AC )
return (venn3([set(x) for x in collectionset], set_labels=self.collection_names))
#get set of all items (unduplicated)
def unionize (self, sets_list):
""" Take a list of sets and return a set with all duplicates removed """
return (set().union(*sets_list))
def create_matrix (self, dd, collectionset):
d = []
for y in collectionset:
d.append({x:x in y for x in dd})
return (pd.DataFrame(d, index=self.collection_names))
def remove_singles (self, df):
return (df.loc[:, df.sum(0) >1].fillna(False))
def fill_vars (self, df):
self.response = df
self.counts = mca.mca(df)
if len(self.counts.L >1):
self.dimensions = self.counts.L
else:
self.dimensions = np.append(self.counts.L, 0.0)
self.result["rows"] = self.counts.fs_r(N=self.DIMS)
self.result["columns"] = self.counts.fs_c(N=self.DIMS)
self.rlabels = df.columns.values
self.clabels = self.collection_names
def plot3d (self):
if self.DIMS != 3:
print ("There was a problem, Trying to do a 3D plot for a non-3D data.")
clabels = self.collection_names
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
def ca(self, collectionset, names):
# use dd to create a list of all websites in the collectionset
dd = self.unionize(collectionset)
#populate table with matches for actors (weblists)
df = self.create_matrix(dd, collectionset)
if self.REMOVE_SINGLES:
df = self.remove_singles(df)
self.fill_vars(df)
return(True)
def mca(self, collectionset, names):
#print ([x[2] for y in collectionset for x in y][0:3])
default = defaultdict(list)
coll = defaultdict(list)
src_index, var_index, d = [], [], []
for x in collectionset:
for y,k,v in x:
default[y+'%'+k].append(v)
#print(list(default)[0:3])
dd = self.unionize([j for y, j in default.items()])
#print (dd)
for key, val in default.items():
#print (key)
keypair = key.split("%")
collect, year = keypair[0], keypair[1]
coll[collect].append(year)
d.append({url: url in val for url in dd})
for happy, sad in coll.items():
src_index = (src_index + [happy] * len(sad))
#src_index = (happy * len(sad) for happy, sad in coll.items())
var_index = (var_index + sad)
col_index = pd.MultiIndex.from_arrays([src_index, var_index], names=["Collection", "Date"])
#X = {x for x in (self.unionize(collectionset))}
table1 = pd.DataFrame(data=d, index=col_index, columns=dd)
if self.REMOVE_SINGLES:
table1 = table1.loc[:, table1.sum(0) >1 ]
table2 = mca.mca(table1)
#print (table2.index)
self.response = table1
self.dimensions = table2.L
#print(table2.inertia)
fs, cos, cont = 'Factor score','Squared cosines', 'Contributions x 1000'
data = pd.DataFrame(columns=table1.index, index=pd.MultiIndex
.from_product([[fs, cos, cont], range(1, self.DIMS+1)]))
#print(data)
noise = 0.07 * (np.random.rand(*data.T[fs].shape) - 0.5)
if self.DIMS > 2:
data.loc[fs, :] = table2.fs_r(N=self.DIMS).T
self.result["rows"] = table2.fs_r(N=self.DIMS).T
self.result["columns"] = table2.fs_c(N=self.DIMS).T
self.result["df"] = data.T[fs].add(noise).groupby(level=['Collection'])
data.loc[fs, :] = table2.fs_r(N=self.DIMS).T
# print(data.loc[fs, :])
#print(points)
urls = table2.fs_c(N=self.DIMS).T
self.plabels = var_index
fs_by_source = data.T[fs].add(noise).groupby(level=['Collection'])
fs_by_date = data.T[fs]
self.dpoints = data.loc[fs].values
print(self.dpoints[1:3])
fig, ax = plt.subplots(figsize=(10,10))
plt.margins(0.1)
plt.axhline(0, color='gray')
plt.axvline(0, color='gray')
plt.xlabel('Factor 1 (' + str(round(float(self.dimensions[0]), 3)*100) + '%)')
plt.ylabel('Factor 2 (' + str(round(float(self.dimensions[1]), 3)*100) + '%)')
ax.margins(0.1)
markers = '^', 's', 'o', 'o', 'v', "<", ">", "p", "8", "h"
colors = 'r', 'g', 'b', 'y', 'orange', 'peachpuff', 'm', 'c', 'k', 'navy'
for fscore, marker, color in zip(fs_by_source, markers, colors):
#print(type(fscore))
label, points = fscore
ax.plot(*points.T.values[0:1], marker=marker, color=color, label=label, linestyle='', alpha=.5, mew=0, ms=12)
for plabel, x, y in zip(self.plabels, *self.dpoints[1:3]):
plt.annotate(plabel, xy=(x, y), xytext=(x + .15, y + .15))
ax.legend(numpoints=1, loc=4)
plt.show()
def duplicates (self):
return(set([x for x in l if l.count(x) > 1]) == set())
def plot_ca (self, asfile=""):
texts = []
ctexts = []
plt.figure(figsize=(10,10))
plt.margins(0.1)
plt.axhline(0, color='gray')
plt.axvline(0, color='gray')
plt.xlabel('Factor 1 (' + str(round(float(self.dimensions[0]), 3)*100) + '%)')
plt.ylabel('Factor 2 (' + str(round(float(self.dimensions[1]), 3)*100) + '%)')
plt.scatter(*self.result['columns'].T, s=120, marker='o', c='r', alpha=.5, linewidths=0)
plt.scatter(*self.result['rows'].T, s=120, marker='s', c='blue', alpha=.5, linewidths=0)
for clabel, x, y in zip(self.rlabels, *self.result['columns'].T):
ctexts.append(plt.text(x, y, clabel))
if self.LABEL_BOTH_FACTORS:
for label, x, y in zip(self.clabels, *self.result['rows'].T):
texts.append(plt.text(x, y, label))
if self.adjust:
AT(texts,arrowprops=dict(arrowstyle="-", color='k', lw=0.5))
AT(ctexts, arrowprops=dict(arrowstyle="-", color='k', lw=0.5))
if asfile:
plt.savefig(asfile, bbox_inches='tight')
plt.show()
def plot_ca_3d(self):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
plt.margins(0.1)
plt.axhline(0, color='gray')
plt.axvline(0, color='gray')
ax.set_xlabel('Factor 1 (' + str(round(float(self.dimensions[0]), 3)*100) + '%)')
ax.set_ylabel('Factor 2 (' + str(round(float(self.dimensions[1]), 3)*100) + '%)')
ax.set_zlabel('Factor 3 (' + str(round(float(self.dimensions[2]), 3)*100) + '%)')
ax.scatter(*self.result['columns'], s=120, marker='o', c='r', alpha=.5, linewidths=0)
ax.scatter(*self.result['rows'], s=120, marker='s', c='whitesmoke', alpha=.5, linewidths=0)
for clabel, x, y, z in zip(self.clabels, *self.result['rows']):
ax.text(x,y,z, '%s' % (clabel), size=20, zorder=1, color='k')
if __name__ == "__main__":
with open('./data/parliamentary_committees.json') as f:
data = json.load(f)
values = [y['membership'] for y in data.values()]
names = [q for q in data.keys()]
print(names)
compare = compare(values, names)
compare.LABEL_BOTH_FACTORS = True
compare.adjust = True
compare.plot_ca()
|
def sort_array(a):
if not a:
return []
odd=sorted(i for i in a if i%2==1)
even=sorted((i for i in a if i%2==0), key=lambda x: -x)
res=[]
odd_len=0
even_len=0
temp=a[0]%2
index=0
rec=0
while index<len(a):
if a[index]%2!=temp:
if temp==1:
res.extend(odd[odd_len:odd_len+index-rec])
odd_len+=index-rec
else:
res.extend(even[even_len:even_len+index-rec])
even_len+=index-rec
rec=index
temp=a[index]%2
index+=1
res.extend(odd[odd_len:]) if odd_len!=len(odd) else res.extend(even[even_len:])
return res |
import os
import h5py
import click
import torch
import copy
from tqdm import tqdm
from global_constants import flickr_paths
import utils.io as io
from utils.html_writer import HtmlWriter
from .models.cap_encoder import CapEncoderConstants, CapEncoder
from .cache_neg_features import convert_tokens_to_ids, remove_padding
@click.command()
@click.option(
'--subset',
type=click.Choice(['train','val','test']),
default='flickr subset to identify nouns for')
def main(**kwargs):
model_const = CapEncoderConstants()
cap_encoder = CapEncoder(model_const).cuda()
subset = kwargs['subset']
filename = os.path.join(
flickr_paths['proc_dir'],
flickr_paths['noun_negatives']['samples'][subset])
neg_samples = io.load_json_object(filename)
filename = os.path.join(
flickr_paths['proc_dir'],
flickr_paths['noun_negatives']['feats'][subset])
feats_f = h5py.File(filename,'w')
total_count = 0
diff_len_count = 0
for image_id in tqdm(neg_samples.keys()):
for cap_id in neg_samples[image_id].keys():
for str_neg_idx in neg_samples[image_id][cap_id]['negs'].keys():
neg_idx = int(str_neg_idx)
pos_tokens = remove_padding(neg_samples[image_id][cap_id]['gt'])
pos_batch_tokens = [pos_tokens]
pos_token_ids = convert_tokens_to_ids(pos_batch_tokens,cap_encoder)
pos_token_ids = torch.LongTensor(pos_token_ids).cuda()
pos_feats = cap_encoder(pos_token_ids)
pos_feats = pos_feats[:,neg_idx,:]
neg_batch_tokens = \
neg_samples[image_id][cap_id]['negs'][str_neg_idx]
neg_token_ids = convert_tokens_to_ids(neg_batch_tokens,cap_encoder)
neg_token_ids = torch.LongTensor(neg_token_ids).cuda()
neg_feats = cap_encoder(neg_token_ids)
neg_feats = neg_feats[:,neg_idx,:]
feats = torch.cat((pos_feats,neg_feats),0)
feats = feats.cpu().detach().numpy()
feats_f.create_dataset(
f'{image_id}_{cap_id}_{str_neg_idx}',
data=feats)
feats_f.close()
if __name__=='__main__':
with torch.no_grad():
main() |
from http.server import BaseHTTPRequestHandler, HTTPServer
import urllib.parse
import os
from urllib.parse import urlparse, parse_qs
from bunny_lab.logic import BunnyLab
BUNNY_LAB_DIR = os.path.dirname(__file__)
bunny_lab = BunnyLab()
def handle_reset():
bunny_lab = BunnyLab()
return ""
def handle_register_user():
name = bunny_lab.register_user()
return name
def handle_unregister_user(name):
bunny_lab.unregister_user(name)
return ""
def handle_bunnies_saved(name, number):
saved_bunnies = bunny_lab.bunnies_saved(name, number)
return str(saved_bunnies)
def handle_bunny_saved(name):
saved_bunnies = bunny_lab.bunny_saved(name, number)
return str(saved_bunnies)
def handle_file_request(path):
path = os.path.join(BUNNY_LAB_DIR, "static", path)
return open(path).read()
class RequestHandler(BaseHTTPRequestHandler):
def confirm_post(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def try_serve_file(self):
parsed_url = urlparse(self.requestline)
path = parsed_url.path
try:
path = os.path.join(BUNNY_LAB_DIR, "static") + path
response = open(path).read()
print("READING FILE: ", open(path).read())
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
results = bunny_lab.print_results()
self.wfile.write(response.encode())
except:
print("FAILED READING FILE: ", path)
self.send_response(400)
self.end_headers()
def generate_and_send_results(self):
parsed_url = urlparse(self.requestline)
path = parsed_url.path
qs = parse_qs(path)
print(path, qs)
if "notebook" in qs:
full_html = False
else:
full_html = True
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
results = bunny_lab.print_results(full_html)
self.wfile.write(results.encode())
def do_GET(self):
self.protocol_version = "HTTP/1.1"
if self.path[:2] != "/?"[:len(self.path)]:
self.try_serve_file()
else:
self.generate_and_send_results()
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length).decode('utf-8')
post_data = parse_qs(post_data)
print("Incoming post: ", post_data)
action = post_data["action"][0]
if action == "reset":
name = post_data["name"][0]
if name == "b.diddy":
handle_reset()
message = ""
elif action == "register":
message = handle_register_user()
elif action == "unregister":
name = post_data["name"][0]
message = handle_unregister_user(name)
elif action == "saved":
name = post_data["name"][0]
number = int(post_data["number"][0])
message = handle_bunnies_saved(name, number)
elif action == "bunny_saved":
name = post_data["name"][0]
message = handle_bunny_saved(name)
self.send_response(200, message)
self.send_header('Content-type', 'text/html')
self.end_headers()
|
"""
数据库写操作示例 2
"""
import pymysql
args = {
"host": "localhost",
"port": 3306,
"user": "root",
"password": "123456",
"database": "stu",
"charset": "utf8"
}
# 连接数据库
db = pymysql.connect(**args)
# 创建游标 游标对象:执行sql得到执行结果的对象
cur = db.cursor()
# 数据批量写操作 insert delete update
stu_list = [
("张三",18,'m',90),
("李四",19,'w',91),
("王五",20,'m',92)
]
try:
sql = "insert into cls (name,age,sex,score) values (%s,%s,%s,%s);"
cur.executemany(sql,stu_list)
db.commit()
except Exception as e:
print(e)
db.rollback()
# 关闭数据库连接
cur.close()
db.close()
|
a, b = map(str,input().split())
print(max(''.join(reversed(a)),''.join(reversed(b)))) |
import dask.dataframe as dd
import dask_searchcv as dcv
import numpy as np
import pandas as pd
import pytest
from dask.dataframe.utils import assert_eq
from dask_glm.regularizers import Regularizer
from sklearn.pipeline import make_pipeline
from dask_ml.datasets import make_classification, make_counts, make_regression
from dask_ml.linear_model import (LinearRegression, LogisticRegression,
PoissonRegression)
from dask_ml.linear_model.utils import add_intercept
@pytest.fixture(params=[r() for r in Regularizer.__subclasses__()])
def solver(request):
"""Parametrized fixture for all the solver names"""
return request.param
@pytest.fixture(params=[r() for r in Regularizer.__subclasses__()])
def regularizer(request):
"""Parametrized fixture for all the regularizer names"""
return request.param
class DoNothingTransformer(object):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
def fit_transform(self, X, y=None):
return X
def get_params(self, deep=True):
return {}
X, y = make_classification(chunks=50)
def test_lr_init(solver):
LogisticRegression(solver=solver)
def test_pr_init(solver):
PoissonRegression(solver=solver)
@pytest.mark.parametrize('fit_intercept', [True, False])
def test_fit(fit_intercept, solver):
X, y = make_classification(n_samples=100, n_features=5, chunks=50)
lr = LogisticRegression(fit_intercept=fit_intercept)
lr.fit(X, y)
lr.predict(X)
lr.predict_proba(X)
@pytest.mark.parametrize('solver', ['admm', 'newton', 'lbfgs',
'proximal_grad', 'gradient_descent'])
def test_fit_solver(solver):
X, y = make_classification(n_samples=100, n_features=5, chunks=50)
lr = LogisticRegression(solver=solver)
lr.fit(X, y)
@pytest.mark.parametrize('fit_intercept', [True, False])
def test_lm(fit_intercept):
X, y = make_regression(n_samples=100, n_features=5, chunks=50)
lr = LinearRegression(fit_intercept=fit_intercept)
lr.fit(X, y)
lr.predict(X)
if fit_intercept:
assert lr.intercept_ is not None
@pytest.mark.parametrize('fit_intercept', [True, False])
def test_big(fit_intercept):
X, y = make_classification(chunks=50)
lr = LogisticRegression(fit_intercept=fit_intercept)
lr.fit(X, y)
lr.predict(X)
lr.predict_proba(X)
if fit_intercept:
assert lr.intercept_ is not None
@pytest.mark.parametrize('fit_intercept', [True, False])
def test_poisson_fit(fit_intercept):
X, y = make_counts(n_samples=100, chunks=500)
pr = PoissonRegression(fit_intercept=fit_intercept)
pr.fit(X, y)
pr.predict(X)
pr.get_deviance(X, y)
if fit_intercept:
assert pr.intercept_ is not None
def test_in_pipeline():
X, y = make_classification(n_samples=100, n_features=5, chunks=50)
pipe = make_pipeline(DoNothingTransformer(), LogisticRegression())
pipe.fit(X, y)
def test_gridsearch():
X, y = make_classification(n_samples=100, n_features=5, chunks=50)
grid = {
'logisticregression__C': [1000, 100, 10, 2]
}
pipe = make_pipeline(DoNothingTransformer(), LogisticRegression())
search = dcv.GridSearchCV(pipe, grid, cv=3)
search.fit(X, y)
def test_add_intercept_dask_dataframe():
X = dd.from_pandas(pd.DataFrame({"A": [1, 2, 3]}), npartitions=2)
result = add_intercept(X)
expected = dd.from_pandas(pd.DataFrame({"intercept": [1, 1, 1],
"A": [1, 2, 3]},
columns=['intercept', 'A']),
npartitions=2)
assert_eq(result, expected)
df = dd.from_pandas(pd.DataFrame({"intercept": [1, 2, 3]}), npartitions=2)
with pytest.raises(ValueError):
add_intercept(df)
def test_unknown_chunks_ok():
# https://github.com/dask/dask-ml/issues/145
X = dd.from_pandas(pd.DataFrame(np.random.uniform(size=(10, 5))), 2).values
y = dd.from_pandas(pd.Series(np.random.uniform(size=(10,))), 2).values
reg = LinearRegression(fit_intercept=False)
reg.fit(X, y)
|
# -*- coding: utf-8; -*-
import argparse
from customs import Agency
from customs.utils import logger
from .cli_base import CliBase
class CatalogCommand(CliBase):
""" Catalog
"""
def __init__(self, sub_parser):
logger.setup_logging('cli')
if not isinstance(sub_parser, argparse._SubParsersAction):
raise TypeError(logger.error("parser should of an instance of argparse._SubParsersAction"))
# Set up rules commands.
self._parser = sub_parser.add_parser('catalog')
super(CatalogCommand, self).__init__()
self._add_sub_commands()
def _add_sub_commands(self):
sub_parser = self._parser.add_subparsers(
title='catalog',
description='command to search agency metadata.',
help='additional help'
)
# add data-centers sub command.
CatalogDataCenters(sub_parser)
# add services sub command.
CatalogServices(sub_parser)
class CatalogDataCenters(object):
def __init__(self, sub_parser):
if not isinstance(sub_parser, argparse._SubParsersAction):
raise Exception("parser should of an instance of argparse.ArgumentParser")
# Set up data-centers catalog sub command.
self._parser = sub_parser.add_parser('data-centers')
self._parser.set_defaults(func=self._list)
def _list(self, args, **extra_args):
if not isinstance(args, argparse.Namespace):
raise TypeError(logger.error("args should of an instance of argparse.Namespace"))
agency_host = '{0}:{1}'.format(args.host, args.agency_port)
agency = Agency(host=agency_host, token=args.token)
data_centers = agency.catalog.datacenters()
if isinstance(data_centers, list):
for data_center in agency.catalog.datacenters():
logger.info(data_center)
else:
logger.info(data_centers)
exit_code = 0
if exit_code != 0:
exit(exit_code)
class CatalogServices(object):
def __init__(self, sub_parser):
if not isinstance(sub_parser, argparse._SubParsersAction):
raise Exception("parser should of an instance of argparse.ArgumentParser")
# Set up data-centers catalog sub command.
self._parser = sub_parser.add_parser('services')
self._parser.set_defaults(func=self._list)
def _list(self, args, **extra_args):
if not isinstance(args, argparse.Namespace):
raise TypeError(logger.error("args should of an instance of argparse.Namespace"))
agency_host = '{0}:{1}'.format(args.host, args.agency_port)
agency = Agency(host=agency_host, token=args.token)
for services in agency.catalog.services():
if isinstance(services, dict):
for service, tags in services.items():
logger.info(service)
else:
logger.info(services)
exit_code = 0
if exit_code != 0:
exit(exit_code)
|
import prog
import tensorflow as tf
#Test different models in this file
if __name__ == "__main__":
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D()) |
import pandas as pd
from numpy.random import RandomState
import sys
df = pd.read_csv(sys.argv[1])
train_out = sys.argv[1][:-3] + "train.csv"
test_out = sys.argv[1][:-3] + "test.csv"
rng = RandomState()
train = df.sample(frac=0.8, random_state=rng)
test = df.loc[~df.index.isin(train.index)]
#print(test)
#train = train[['comment','correction']]
#test = test[['comment','correction']]
#print(test)
train.to_csv(train_out, index=False)
test.to_csv(test_out, index=False)
|
import pytest
from more_itertools import side_effect
from intervaltree import Interval
from evaluate.aligned_pairs import AlignedPair, AlignmentType, AlignedPairs
from unittest.mock import patch, PropertyMock
class TestAlignedPair:
def test_getAlignmentType_pairIsInsertion(self):
aligned_pair = AlignedPair(query_pos=None, ref_pos=10, ref_base="A")
actual = aligned_pair.get_alignment_type()
expected = AlignmentType.INSERTION
assert actual == expected
def test_getAlignmentType_pairIsDeletion(self):
aligned_pair = AlignedPair(query_pos=10, ref_pos=None, ref_base=None)
actual = aligned_pair.get_alignment_type()
expected = AlignmentType.DELETION
assert actual == expected
def test_getAlignmentType_pairIsMatch(self):
aligned_pair = AlignedPair(query_pos=10, ref_pos=15, ref_base="A")
actual = aligned_pair.get_alignment_type()
expected = AlignmentType.MATCH
assert actual == expected
def test_getAlignmentType_pairIsMismatch(self):
aligned_pair = AlignedPair(query_pos=10, ref_pos=15, ref_base="a")
actual = aligned_pair.get_alignment_type()
expected = AlignmentType.MISMATCH
assert actual == expected
class TestAlignedPairs:
def test_initAlignedPairsNoArguments(self):
actual = AlignedPairs()
expected = []
assert actual == expected
def test_initAlignedPairsEmptyList(self):
actual = AlignedPairs([])
expected = []
assert actual == expected
def test_initAlignedPairsListWith4AlignedPairs(self):
aligned_pairs_as_tuples = [
(0, 34, "A"),
(1, None, None),
(2, 35, "A"),
(None, 36, "A"),
]
actual = AlignedPairs(aligned_pairs_as_tuples)
expected = [
AlignedPair(*aligned_pair_as_tuple)
for aligned_pair_as_tuple in aligned_pairs_as_tuples
]
assert actual == expected
@patch.object(
AlignedPair,
"get_alignment_type",
side_effect=[
AlignmentType.MATCH,
AlignmentType.MISMATCH,
AlignmentType.DELETION,
AlignmentType.INSERTION,
],
)
def test_getAlignmentTypes(self, alignedPairMocked):
aligned_pairs = AlignedPairs([AlignedPair()] * 4)
actual = aligned_pairs.get_alignment_types()
expected = [
AlignmentType.MATCH,
AlignmentType.MISMATCH,
AlignmentType.DELETION,
AlignmentType.INSERTION,
]
assert actual == expected
def test_getQueryPositions_emptyAlignedPairsReturnsEmptyList(self, *mocks):
aligned_pairs = AlignedPairs()
actual = aligned_pairs.get_query_positions()
expected = []
assert actual == expected
def test_getQueryPositions_allAlignedPairsAreNoneNoTransformReturnsNone(self):
aligned_pairs = AlignedPairs([AlignedPair(None, 10, "A")])
actual = aligned_pairs.get_query_positions()
expected = [None]
assert actual == expected
@patch.object(
AlignedPairs, "transform_Nones_to_halfway_positions", side_effect=ValueError
)
def test_getQueryPositions_allAlignedPairsAreNoneAndTransformRaisesException(
self, *mocks
):
aligned_pairs = AlignedPairs([AlignedPair(None, 10, "A")])
with pytest.raises(ValueError):
aligned_pairs.get_query_positions(
transform_Nones_into_halfway_positions=True
)
def test_getQueryPositions_twoPairsOneNoneNoTransformReturnsTwoPositionsOneNone(
self
):
aligned_pairs = AlignedPairs(
[AlignedPair(None, 10, "A"), AlignedPair(5, 11, "C")]
)
actual = aligned_pairs.get_query_positions()
expected = [None, 5]
assert actual == expected
@patch.object(
AlignedPairs, "transform_Nones_to_halfway_positions", return_value=[4.5, 5.0]
)
def test_getQueryPositions_twoPairsOneNoneTransformReturnsTwoPositionsOneHalfway(
self, *mocks
):
aligned_pairs = AlignedPairs(
[AlignedPair(None, 10, "A"), AlignedPair(5, 11, "C")]
)
actual = aligned_pairs.get_query_positions(
transform_Nones_into_halfway_positions=True
)
expected = [4.5, 5.0]
assert actual == expected
def test_getRefPositions_emptyAlignedPairsReturnsEmptyList(self, *mocks):
aligned_pairs = AlignedPairs()
actual = aligned_pairs.get_ref_positions()
expected = []
assert actual == expected
def test_getRefPositions_allAlignedPairsAreNoneNoTransformReturnsNone(self):
aligned_pairs = AlignedPairs([AlignedPair(1, None, None)])
actual = aligned_pairs.get_ref_positions()
expected = [None]
assert actual == expected
@patch.object(
AlignedPairs, "transform_Nones_to_halfway_positions", side_effect=ValueError
)
def test_getRefPositions_allAlignedPairsAreNoneAndTransformRaisesException(
self, *mocks
):
aligned_pairs = AlignedPairs([AlignedPair(10, None, None)])
with pytest.raises(ValueError):
aligned_pairs.get_ref_positions(transform_Nones_into_halfway_positions=True)
def test_getRefPositions_twoPairsOneNoneNoTransformReturnsTwoPositionsOneNone(self):
aligned_pairs = AlignedPairs(
[AlignedPair(4, None, None), AlignedPair(5, 11, "C")]
)
actual = aligned_pairs.get_ref_positions()
expected = [None, 11]
assert actual == expected
@patch.object(
AlignedPairs, "transform_Nones_to_halfway_positions", return_value=[4.5, 5.0]
)
def test_getRefPositions_twoPairsOneNoneTransformReturnsTwoPositionsOneHalfway(
self, *mocks
):
aligned_pairs = AlignedPairs(
[AlignedPair(1, None, None), AlignedPair(2, 5, "C")]
)
actual = aligned_pairs.get_ref_positions(
transform_Nones_into_halfway_positions=True
)
expected = [4.5, 5.0]
assert actual == expected
def test_transformNonesToHalfwayPositions_emptyReturnsEmpty(self):
actual = AlignedPairs.transform_Nones_to_halfway_positions([])
expected = []
assert actual == expected
def test_transformNonesToHalfwayPositions_noNonesReturnsInput(self):
array = [1, 2]
actual = AlignedPairs.transform_Nones_to_halfway_positions(array)
expected = array
assert actual == expected
def test_transformNonesToHalfwayPositions_severalNonesReturnsTransformedArray(self):
array = [None, 5, None, None, 6, None, None]
actual = AlignedPairs.transform_Nones_to_halfway_positions(array)
expected = [4.5, 5, 5.5, 5.5, 6, 6.5, 6.5]
assert actual == expected
def test_transformNonesToHalfwayPositions_allNonesButOneReturnsTransformedArray(
self
):
array = [None, None, 0, None, None]
actual = AlignedPairs.transform_Nones_to_halfway_positions(array)
expected = [-0.5, -0.5, 0, 0.5, 0.5]
assert actual == expected
def test_transformNonesToHalfwayPositions_allNonesRaisesException(self):
with pytest.raises(ValueError):
AlignedPairs.transform_Nones_to_halfway_positions([None, None])
def test_getPairsInQueryInterval_nullIntervalReturnsEmpty(self):
interval = Interval(2, 2)
aligned_pairs = AlignedPairs(
[AlignedPair(1, 30, "A"), AlignedPair(2, 31, "C"), AlignedPair(3, 32, "T")]
)
actual = aligned_pairs.get_pairs_in_query_interval(interval)
expected = AlignedPairs()
assert actual == expected
def test_getPairsInQueryInterval_intervalNotInPairsReturnsEmpty(self):
interval = Interval(5, 10)
aligned_pairs = AlignedPairs(
[AlignedPair(1, 30, "A"), AlignedPair(2, 31, "C"), AlignedPair(3, 32, "T")]
)
actual = aligned_pairs.get_pairs_in_query_interval(interval)
expected = AlignedPairs()
assert actual == expected
def test_getPairsInQueryInterval_intervalOverlapsLeftOfPairs(self):
interval = Interval(5, 12)
aligned_pairs = AlignedPairs(
[
AlignedPair(None, 30, "A"),
AlignedPair(11, 31, "C"),
AlignedPair(12, 32, "T"),
]
)
actual = aligned_pairs.get_pairs_in_query_interval(interval)
expected = aligned_pairs[:2]
assert actual == expected
def test_getPairsInQueryInterval_intervalOverlapsRightOfPairs(self):
interval = Interval(4, 12)
aligned_pairs = AlignedPairs(
[
AlignedPair(3, 30, "A"),
AlignedPair(4, 31, "C"),
AlignedPair(None, 32, "T"),
]
)
actual = aligned_pairs.get_pairs_in_query_interval(interval)
expected = aligned_pairs[1:]
assert actual == expected
def test_getPairsInQueryInterval_intervalSpansPairs(self):
interval = Interval(1, 10)
aligned_pairs = AlignedPairs(
[
AlignedPair(4, 30, "A"),
AlignedPair(None, 31, "A"),
AlignedPair(5, 32, "A"),
AlignedPair(None, 33, "A"),
AlignedPair(6, 34, "A"),
AlignedPair(None, 35, "A"),
]
)
actual = aligned_pairs.get_pairs_in_query_interval(interval)
expected = aligned_pairs
assert actual == expected
def test_getPairsInQueryInterval_intervalEnvelopedInPairs(self):
interval = Interval(5, 7)
aligned_pairs = AlignedPairs(
[
AlignedPair(4, 30, "A"),
AlignedPair(None, 31, "A"),
AlignedPair(5, 32, "A"),
AlignedPair(None, 33, "A"),
AlignedPair(6, 34, "A"),
AlignedPair(None, 35, "A"),
]
)
actual = aligned_pairs.get_pairs_in_query_interval(interval)
expected = aligned_pairs[2:5]
assert actual == expected
def test_getIndexOfQueryInterval_nullIntervalReturnsEmpty(self):
interval = Interval(2, 2)
aligned_pairs = AlignedPairs(
[AlignedPair(1, 30, "A"), AlignedPair(2, 31, "C"), AlignedPair(3, 32, "T")]
)
actual = aligned_pairs.get_index_of_query_interval(interval)
expected = (1, 1)
assert actual == expected
def test_getIndexOfQueryInterval_intervalNotInPairsReturnsEmpty(self):
interval = Interval(5, 10)
aligned_pairs = AlignedPairs(
[AlignedPair(1, 30, "A"), AlignedPair(2, 31, "C"), AlignedPair(3, 32, "T")]
)
actual = aligned_pairs.get_index_of_query_interval(interval)
expected = (3, 3)
assert actual == expected
def test_getIndexOfQueryInterval_intervalOverlapsLeftOfPairs(self):
interval = Interval(5, 12)
aligned_pairs = AlignedPairs(
[
AlignedPair(None, 30, "A"),
AlignedPair(11, 31, "C"),
AlignedPair(12, 32, "T"),
]
)
actual = aligned_pairs.get_index_of_query_interval(interval)
expected = (0, 2)
assert actual == expected
def test_getIndexOfQueryInterval_intervalOverlapsRightOfPairs(self):
interval = Interval(4, 12)
aligned_pairs = AlignedPairs(
[
AlignedPair(3, 30, "A"),
AlignedPair(4, 31, "C"),
AlignedPair(None, 32, "T"),
]
)
actual = aligned_pairs.get_index_of_query_interval(interval)
expected = (1, 3)
assert actual == expected
def test_getIndexOfQueryInterval_intervalSpansPairs(self):
interval = Interval(1, 10)
aligned_pairs = AlignedPairs(
[
AlignedPair(4, 30, "A"),
AlignedPair(None, 31, "A"),
AlignedPair(5, 32, "A"),
AlignedPair(None, 33, "A"),
AlignedPair(6, 34, "A"),
AlignedPair(None, 35, "A"),
]
)
actual = aligned_pairs.get_index_of_query_interval(interval)
expected = (0, 6)
assert actual == expected
def test_getIndexOfQueryInterval_intervalEnvelopedInPairs(self):
interval = Interval(5, 7)
aligned_pairs = AlignedPairs(
[
AlignedPair(4, 30, "A"),
AlignedPair(None, 31, "A"),
AlignedPair(5, 32, "A"),
AlignedPair(None, 33, "A"),
AlignedPair(6, 34, "A"),
AlignedPair(None, 35, "A"),
]
)
actual = aligned_pairs.get_index_of_query_interval(interval)
expected = (2, 5)
assert actual == expected
|
#coding:utf-8
#
# id: bugs.core_1935
# title: SIMILAR TO character classes are incorrectly recognized
# decription:
# Checked on:
# 2.5.9.27107: OK, 0.406s.
# 3.0.4.32924: OK, 2.250s.
# 4.0.0.916: OK, 1.562s.
#
# tracker_id: CORE-1935
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
-- two subsequent classes in double braces, concatenated together:
select 11 as r from rdb$database where '1a' similar to '[[:DIGIT:]][[:ALPHA:]]'; -- output: 11
-- comparison with SINGLE class of ONE character length: either digit or whitespace or alpha:
select 12 as r from rdb$database where '1a' similar to '[[:DIGIT:][:WHITESPACE:][:ALPHA:]]'; -- no output, no error
-- comparison with character '2' followed by either digit or whitespace or alpha (should produce non-empty result):
select 21 as r from rdb$database where '2a' similar to '2[[:DIGIT:][:WHITESPACE:][:ALPHA:]]'; -- output: 21
-- comparison with SINGLE class of ONE character length: digit either alnum either alpha
select 22 as r from rdb$database where '2a' similar to '[[:DIGIT:][:ALNUM:][:ALPHA:]]'; -- no output, no error
-- comparison with TWO classes: 1st is result of concatenation alnum and whitespace, 2ns is alnum:
select 31 as r from rdb$database where '3a' similar to '[[:ALNUM:][:WHITESPACE:]][[:ALNUM:]]'; -- 31
-- comparison with TWO classes: 1st alnum, 2nd is result of concatenation whitespace and digit:
select 32 as r from rdb$database where '32' similar to '[[:ALNUM:]][[:WHITESPACE:][:DIGIT:]]'; -- 32
select 41 as r from rdb$database where '4a' SIMILAR TO '[%[:DIGIT:][:ALNUM:]]%';
select 42 as r from rdb$database where '4b' SIMILAR TO '[[:DIGIT:][:ALNUM:]]'; -- no output, no error
select 51 as r from rdb$database where '5a' SIMILAR TO '%[[:DIGIT:][:ALNUM:]%]'; -- 51
select 52 as r from rdb$database where '5a' similar to '[%[:DIGIT:][:ALPHA:]][[:ALNUM:]%]'; -- 52
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
R 11
R 21
R 31
R 32
R 41
R 51
R 52
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
__author__ = 'iriyadays@gmail.com'
import settings
import clanapi
import tornado.ioloop
import tornado.options
import tornado.web
import json
import helper
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
clan_api = clanapi.ClanApi()
class IndexHandler(tornado.web.RequestHandler):
def get(self):
clan = clan_api.search("E7VMK")[0]
players = clan_api.clan(clan['ClanID'])['Players']
self.render('index.html', clan=clan, players=players)
class PlayHandler(tornado.web.RequestHandler):
def get(self, player_id):
player = clan_api.player(player_id)
# f=open('test.json')
# player = json.load(f)
try:
print(player)
except UnicodeEncodeError as e:
player['Name'] = 'Error resolve response'
self.render('player.html', player=player,
find_hero_level=helper.find_hero_level,
find_army_level=helper.find_army_level,
find_spell_level=helper.find_spell_level,
get_if_hide=helper.get_if_hide,
summary_build_level=helper.summary_build_level)
app_settings = {
"template_path": settings.TEMPLATE_PATH,
"static_path": settings.STATIC_PATH,
}
application = tornado.web.Application([
(r"/", IndexHandler),
(r"/player/(.+)", PlayHandler)
], **app_settings)
if __name__ == "__main__":
tornado.options.parse_command_line()
application.listen(options.port)
tornado.ioloop.IOLoop.current().start()
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bounding box tracker implementation."""
from typing import List
from data import Person
from tracker.tracker import Track
from tracker.tracker import Tracker
class BoundingBoxTracker(Tracker):
"""Tracks objects based on bounding box similarity.
Similarity is currently defined as intersection-over-union (IoU).
"""
def _compute_similarity(self, persons: List[Person]) -> List[List[float]]:
"""Computes similarity based on intersection-over-union (IoU).
Args:
persons: An array of detected `Person`s.
Returns:
A 2D array of shape [num_det, num_tracks] with pairwise similarity scores
between detections and tracks.
"""
if (not persons) or (not self._tracks):
return [[]]
sim_matrix = []
for person in persons:
row = []
for track in self._tracks:
row.append(self._iou(person, track))
sim_matrix.append(row)
return sim_matrix
def _iou(self, person: Person, track: Track) -> float:
"""Computes the intersection-over-union (IoU) between a pose and a track.
Args:
person: A `Person`.
track: A `Track`.
Returns:
The IoU between the person and the track. This number is between 0 and 1,
and larger values indicate more box similarity.
"""
x_min = max(person.bounding_box.start_point.x,
track.person.bounding_box.start_point.x)
y_min = max(person.bounding_box.start_point.y,
track.person.bounding_box.start_point.y)
x_max = min(person.bounding_box.end_point.x,
track.person.bounding_box.end_point.x)
y_max = min(person.bounding_box.end_point.y,
track.person.bounding_box.end_point.y)
person_width = person.bounding_box.end_point.x - person.bounding_box.start_point.x
person_height = person.bounding_box.end_point.y - person.bounding_box.start_point.y
track_width = track.person.bounding_box.end_point.x - track.person.bounding_box.start_point.x
track_height = track.person.bounding_box.end_point.y - track.person.bounding_box.start_point.y
if (x_min >= x_max or y_min >= y_max):
return 0.0
intersection = (x_max - x_min) * (y_max - y_min)
area_person = person_width * person_height
area_track = track_width * track_height
return float(intersection) / (area_person + area_track - intersection)
|
from utils import *
OPERATORS = {'+': operator.add, '*': operator.mul}
def eval_op(ops, args):
op, b, a = ops.pop(), args.pop(), args.pop()
args.append(OPERATORS[op](a, b))
def eval(exp, precedence):
ops, args, i = [], [], 0
while i < len(exp):
c = exp[i]
if c.isdigit():
j = i + 1
while j < len(exp) and exp[j].isdigit(): j += 1
args.append(int(exp[i:j]))
i = j - 1
elif c in OPERATORS:
if ops and ops[-1] != '(' and precedence[c] <= precedence[ops[-1]]:
eval_op(ops, args)
ops.append(c)
elif c == '(':
ops.append('(')
else: # ')'
while ops[-1] != '(': eval_op(ops, args)
ops.pop() # pop '('
i += 1
while ops: eval_op(ops, args)
return args[0]
def day18_1(exps):
precedence = {'+': 0, '*': 0}
return sum(eval(e, precedence) for e in exps)
def day18_2(exps):
precedence = {'+': 1, '*': 0}
return sum(eval(e, precedence) for e in exps)
if __name__ == "__main__":
exps = data(18, lambda x: x.replace(' ', ''))
print(f'day18_1: {day18_1(exps)}')
print(f'day18_2: {day18_2(exps)}')
# day18_1: 30753705453324
# day18_2: 244817530095503
# python3 day18.py 0.05s user 0.01s system 93% cpu 0.068 total
|
import logging
from functools import partial
import wandb
import torch
from torch.utils.data import DataLoader
from climart.data_wrangling.constants import TEST_YEARS, LAYERS, OOD_PRESENT_YEARS, TRAIN_YEARS, get_flux_mean, \
get_data_dims, OOD_FUTURE_YEARS, OOD_HISTORIC_YEARS
from climart.data_wrangling.h5_dataset import ClimART_HdF5_Dataset
from climart.models.column_handler import ColumnPreprocesser
from climart.models.interface import get_trainer, is_gnn, is_graph_net, get_model, get_input_transform
from climart.utils.hyperparams_and_args import get_argparser
from climart.utils.preprocessing import Normalizer
from climart.utils.utils import set_seed, year_string_to_list, get_logger, get_target_variable, get_target_types
torch.set_printoptions(sci_mode=False)
log = get_logger(__name__)
def main(params, net_params, other_args, only_final_eval=False, *args, **kwargs):
set_seed(params['seed']) # for reproducibility
# If you don't want to use Wandb (Weights&Biases) logging you may remove all wandb related code.
# wandb_mode=disabled will suppress logging (but will throw errors if wandb is not installed in the environment).
# wandb.login()
project = "ClimART"
run = wandb.init(
project=project,
settings=wandb.Settings(start_method='fork'),
tags=[],
# entity="",
name=params['wandb_name'],
group=params['ID'],
mode=other_args.wandb_mode,
id=params['wandb_ID'], resume="allow", reinit=True
)
spatial_dim, in_dim = get_data_dims(params['exp_type'])
if is_gnn(params['model']) or is_graph_net(params['model']):
# cp maps the data to a graph structure needed for a GCN or GraphNet
cp = ColumnPreprocesser(
n_layers=spatial_dim[LAYERS], input_dims=in_dim, **params['preprocessing_dict']
)
input_transform = cp.get_preprocesser
else:
cp = None
input_transform = partial(get_input_transform, model_class=get_model(params['model'], only_class=True))
dataset_kwargs = dict(
exp_type=params['exp_type'],
target_type=params['target_type'],
target_variable=params['target_variable'],
input_transform=input_transform,
input_normalization=params['in_normalize'],
spatial_normalization_in=params['spatial_normalization_in'],
log_scaling=params['log_scaling'],
)
# Training set:
train_years = year_string_to_list(params['train_years'])
assert all([y in TRAIN_YEARS for y in train_years]), f"All years in --train_years must be in {TRAIN_YEARS}!"
train_set = ClimART_HdF5_Dataset(years=train_years, name='Train',
output_normalization=params['out_normalize'],
spatial_normalization_out=params['spatial_normalization_out'],
load_h5_into_mem=params['load_train_into_mem'],
**dataset_kwargs)
# Validation set:
val_set = ClimART_HdF5_Dataset(years=year_string_to_list(params['validation_years']), name='Val',
output_normalization=None,
load_h5_into_mem=params['load_val_into_mem'],
**dataset_kwargs)
# Main Present-day Test Set(s):
# To compute metrics for each test year, we will have a separate dataloader for each of the test years (2007-14).
test_names = [f'Test_{test_year}' for test_year in TEST_YEARS]
test_sets = [
ClimART_HdF5_Dataset(years=[test_year], name=test_name, output_normalization=None, **dataset_kwargs)
for test_year, test_name in zip(TEST_YEARS, test_names)
]
# OOD Test Sets:
# This will load the 1991 OOD test year, that accounts for Mt. Pinatubo eruptions.
# It is challenging for clear-sky conditions in particular
# --> To load the future or historic OOD test sets, use years=OOD_FUTURE_YEARS or OOD_HISTORIC_YEARS
ood_test_sets, ood_testloader_names = [], []
if other_args.test_ood_1991:
ood_test_sets += [ClimART_HdF5_Dataset(years=OOD_PRESENT_YEARS, name='OOD Test', **dataset_kwargs)]
ood_testloader_names += ['Test_OOD']
if other_args.test_ood_historic:
ood_test_sets += [ClimART_HdF5_Dataset(years=OOD_HISTORIC_YEARS, name='Historic Test', **dataset_kwargs)]
ood_testloader_names += ['Historic']
if other_args.test_ood_future:
ood_test_sets += [ClimART_HdF5_Dataset(years=OOD_FUTURE_YEARS, name='Future Test', **dataset_kwargs)]
ood_testloader_names += ['Future']
net_params['input_dim'] = train_set.input_dim
net_params['spatial_dim'] = train_set.spatial_dim
net_params['out_dim'] = train_set.output_dim
params['target_type'] = get_target_types(params.pop('target_type'))
log.info(f" {'Targets are' if len(params['target_type']) > 1 else 'Target is'} {' '.join(params['target_type'])}")
params['target_variable'] = get_target_variable(params.pop('target_variable'))
params['training_set_size'] = len(train_set)
output_normalizer = train_set.output_normalizer
output_postprocesser = train_set.output_variable_splitter
if not isinstance(output_normalizer, Normalizer):
log.info('Initializing out layer bias to output train dataset mean!')
params['output_bias_mean_init'] = True
out_layer_bias = get_flux_mean()
else:
params['output_bias_mean_init'] = False
out_layer_bias = None
trainer_kwargs = dict(
model_name=params['model'], model_params=net_params,
device=params['device'], seed=params['seed'],
model_dir=params['model_dir'],
out_layer_bias=out_layer_bias,
output_postprocesser=output_postprocesser,
output_normalizer=output_normalizer,
)
if cp is not None:
trainer_kwargs['column_preprocesser'] = cp
trainer = get_trainer(**trainer_kwargs)
dataloader_kwargs = {'pin_memory': True, 'num_workers': params['workers']}
eval_batch_size = 512
trainloader = DataLoader(train_set, batch_size=params['batch_size'], shuffle=True, **dataloader_kwargs)
valloader = DataLoader(val_set, batch_size=eval_batch_size, **dataloader_kwargs)
testloaders = [
DataLoader(test_set, batch_size=eval_batch_size, **dataloader_kwargs) for test_set in test_sets
]
ood_testloaders = [
DataLoader(test_set, batch_size=eval_batch_size, **dataloader_kwargs) for test_set in ood_test_sets
]
wandb.config.update({**net_params, **params})
if not only_final_eval:
best_valid = trainer.fit(trainloader, valloader,
hyper_params=params,
testloader=testloaders,
testloader_names=test_names,
ood_testloader=ood_testloaders,
*args, **kwargs)
wandb.log({'Final/Best_Val_MAE': best_valid})
log.info(f" Testing the best model as measured by validation performance (best={best_valid:.3f})")
del train_set, trainloader, valloader
if other_args.save_model_to_wandb in [True, 'true', 'True'] and not only_final_eval:
wandb.save(trainer.save_model_filepath)
final_test_kwargs = dict(use_best_model=True, verbose=True, model_verbose=False)
if only_final_eval:
final_test_kwargs = {**kwargs, **final_test_kwargs}
final_test_stats = trainer.test(
testloaders=testloaders,
testloader_names=[f'Final_yearly/{name}' for name in test_names],
aggregated_test_name='Final/Test', **final_test_kwargs
)
wandb.log(final_test_stats)
final_ood_stats = trainer.test(
testloaders=ood_testloaders,
testloader_names=[f'Final/{name}' for name in ood_testloader_names], **final_test_kwargs
)
wandb.log(final_ood_stats)
run.finish()
if __name__ == '__main__':
logging.basicConfig()
params, net_params, other_args = get_argparser()
if other_args.resume_training_file is None and other_args.resume_ID is None:
main(
params, net_params, other_args
)
else:
# Resume training from a model checkpoint
log.info(' --------------------> Resuming training of', other_args.resume_training_file)
saved_model = torch.load(other_args.resume_training_file)
params_resume = saved_model['hyper_params']
net_params_resume = saved_model['model_params']
params_resume['epochs'] += other_args.additional_epochs
for k, v in params.items():
if k not in params_resume:
params_resume[k] = v
main(
params_resume, net_params_resume, other_args,
checkpoint=other_args.resume_training_file
)
|
import time
import sys
import math
import numpy as np
import torch
import torch.nn as nn
from onmt.Utils import use_gpu
from cocoa.neural.trainer import Statistics
from cocoa.neural.trainer import Trainer as BaseTrainer
from cocoa.io.utils import create_path
class Trainer(BaseTrainer):
''' Class that controls the training process which inherits from Cocoa '''
def validate(self, valid_iter):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`onmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = Statistics()
num_val_batches = valid_iter.next()
dec_state = None
for batch in valid_iter:
if batch is None:
dec_state = None
continue
elif not self.model.stateful:
dec_state = None
enc_state = dec_state.hidden if dec_state is not None else None
outputs, attns, dec_state = self._run_batch(batch, None, enc_state)
_, batch_stats = self.valid_loss.compute_loss(batch.targets, outputs)
stats.update(batch_stats)
# Set model back to training mode
self.model.train()
return stats
def _run_batch(self, batch, dec_state=None, enc_state=None):
return self._run_seq2seq_batch(batch, dec_state, enc_state)
def _run_seq2seq_batch(self, batch, dec_state=None, enc_state=None):
encoder_inputs = batch.encoder_inputs
decoder_inputs = batch.decoder_inputs
targets = batch.targets
lengths = batch.lengths
#tgt_lengths = batch.tgt_lengths
context_inputs = batch.context_inputs
scene_inputs = batch.scene_inputs
outputs, attns, dec_state = self.model(encoder_inputs,
decoder_inputs, context_inputs, scene_inputs,
lengths, dec_state, enc_state)
return outputs, attns, dec_state
def _gradient_accumulation(self, true_batchs, total_stats, report_stats):
if self.grad_accum_count > 1:
self.model.zero_grad()
dec_state = None
for batch in true_batchs:
if batch is None:
dec_state = None
continue
elif not self.model.stateful:
dec_state = None
enc_state = dec_state.hidden if dec_state is not None else None
outputs, attns, dec_state = self._run_batch(batch, None, enc_state)
loss, batch_stats = self.train_loss.compute_loss(batch.targets, outputs)
self.model.zero_grad()
loss.backward()
self.optim.step()
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# Don't backprop fully.
if dec_state is not None:
dec_state.detach()
|
def compute_factorial(n) :
if n<0:
raise ValueError('Please, provide a natural number')
fact = 1
for i in range(1,n+1):
fact*=i
return fact |
import numpy as np
import io
# Load fasttext vectors into embedding matrix
def load_fasttext_vectors(fname, tokenizer):
vocab_size = len(tokenizer.word_index) + 1
print('Loading FastText Model and building embedding matrix with vocabulary...')
embedding_matrix = np.zeros((vocab_size, 300))
vocabulary = dict(tokenizer.word_index.items())
keys = vocabulary.keys()
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
for line in fin:
tokens = line.rstrip().split(' ')
if tokens[0] in keys:
embedding_matrix[vocabulary[tokens[0]]] = tokens[1:]
print('DONE')
return embedding_matrix
# Load glove vectors into embedding matrix
def load_glove_vectors(fname, tokenizer):
vocab_size = len(tokenizer.word_index) + 1
print('Loading Glove Model...')
f = open(fname,'r')
glove_model = {}
for line in f:
splitLines = line.split()
word = splitLines[0]
wordEmbedding = np.array([float(value) for value in splitLines[1:]])
glove_model[word] = wordEmbedding
print(len(glove_model), ' words loaded!')
print('Building embedding matrix with vocabulary...')
embedding_matrix = np.zeros((vocab_size, 200))
i = 0
for word, index in tokenizer.word_index.items():
if word in glove_model:
i = i + 1
embedding_matrix[index] = glove_model[word]
print('DONE')
del glove_model
return embedding_matrix |
from collections import namedtuple, defaultdict
import sys,re
import pickle
def getData(fileName):
papers = dict()
authorToPapers = defaultdict(set)
venueToPapers = defaultdict(set)
with open(fileName, 'r') as f:
l = f.readline()
count = 1
while l:
#Get data of a single paper.
paperTitle = ''
authors = []
year = -1
venue = ''
index = ''
refs = set()
abstract = ''
while l and l != '\n':
tmp = l
l = f.readline()
#Extract multi-line stuff
while l and l != '\n' and (not(l.startswith('#'))):
tmp += (' ' + l.strip())
l = f.readline()
#Remove non-ASCII characters.
tmp = re.sub(r'[^\x00-\x7F]+', ' ', tmp)
if tmp.startswith('#*'): # --- paperTitle
paperTitle = tmp[2:].strip()
# print 'paperTitle: %s'%paperTitle
elif tmp.startswith('#@'): # --- Authors
al = tmp[2:].split(',')
al = map(str.strip, al)
authors = al
# print 'Authors:', al
elif tmp.startswith('#t'): # ---- Year
year = int(tmp[2:])
# print 'Year:', year
elif tmp.startswith('#c'): # --- publication venue
venue = tmp[2:].strip()
# print 'Venue:', venue
elif tmp.startswith('#index'): # 00---- index id of this paper
index = tmp[6:].strip()
# print 'Index:', index
elif tmp.startswith('#%'): # ---- the id of references of this paper
ref = tmp[2:].strip()
refs.add(ref)
elif tmp.startswith('#!'): # --- Abstract
abstract = tmp[2:].strip()
# print 'Abstract:', abstract
if count % 100000 == 0:
print 'Parsed', count, 'papers'
count += 1
#Reasonable assumption: paper MUST have an index, title and authors!
if paperTitle != '' and authors != [] and index != '':
papers[index] = (paperTitle, authors, year, venue, refs, abstract)
for a in authors:
authorToPapers[a].add(index)
venueToPapers[venue].add(index)
l = f.readline()
return papers, authorToPapers, venueToPapers
if __name__ == '__main__':
p, a, v = getData(sys.argv[1])
with open('paperData.pkl', 'wb') as f:
pickle.dump(p, f)
with open('authorData.pkl', 'wb') as f:
pickle.dump(a, f)
with open('venueData.pkl', 'wb') as f:
pickle.dump(v, f)
|
# First Pre-process the image
import torch
from torch import nn, optim
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from PIL import Image
import time
import json
from collections import OrderedDict
import seaborn as sns
import argparse
import load
import img
parser = argparse.ArgumentParser(description='This is predict function')
parser.add_argument('inputDirectory_image',help=' Enter directory path of the image',action='store',default = '/home/workspace/ImageClassifier/flowers/test/1/image_06743.jpg')
parser.add_argument('checkpoint_path',help='Enter the checkpoint path', action='store',default='/home/workspace/ImageClassifier/checkpoint.pth')
parser.add_argument('--category_names',dest='category',default = 'cat_to_name.json')
parser.add_argument('--top_k', dest = 'topk', type=int,default = 5)
parser.add_argument('--gpu',dest='gpu_use',action = 'store_true')
args = parser.parse_args()
checkpoint = args.checkpoint_path
inputdir = args.inputDirectory_image
category_name = args.category
topk_value = args.topk
gpu_u = args.gpu_use
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
# PREDICTION TIME !
with open(category_name) as f:
flower_to_name = json.load(f)
def predict(image, pretrained_model,flower_to_name, topk_value):
processed_image = img.process_image(image)
pretrained_model.to('cpu')
processed_image.unsqueeze_(0)
probs = torch.exp(pretrained_model.forward(processed_image))
top_probs, top_labs = probs.topk(topk_value)
top_probs = top_probs.detach().numpy().tolist()
top_labs = top_labs.tolist()
labels = pd.DataFrame({'class':pd.Series(pretrained_model.class_to_idx),'flower_name':pd.Series(flower_to_name)})
labels = labels.set_index('class')
labels = labels.iloc[top_labs[0]]
labels['predictions'] = top_probs[0]
return labels
# img = (data_dir +'/test' + '/27/' + 'image_06864.jpg')
# val = predict(img, pretrained_model)
# print(val)
def sanity(img):
labels = predict(img, pretrained_model)
plt.figure(figsize=(5,10))
ax = plt.subplot(2,1,1)
img - process_image(img)
imshow(img,ax)
sns.set_style("whitegrid")
sns.subplot(2,1,2)
sns.barplot(x = labels['predictions'], y = labels['flower_name'], color = '#047495')
plt.xlabel("Probability of Prediction")
plt.xlabel("")
plt.show();
sns.set_style("white")
# model = load.model()
# print(model)
if gpu_u:
checkpoint = load.load_model(checkpoint)
arch_name = checkpoint['structure']
labels = predict(inputdir,pretrained_model,flower_to_name,topk_value)
# print(labels)
else:
pretrained_model = load.load_model_without_cpu(checkpoint)
# pretrained_model = train.pretrained_model
labels = predict(inputdir,pretrained_model,flower_to_name,topk_value)
print(labels)
|
#
# Alberto MH 2021
#
from typing import (
List,
Dict,
Union,
)
from datetime import datetime, date
from sqlalchemy.orm.session import Session
import conf
from fetch.symbol_fetcher import SymbolFetcher
from fetch.data_fetcher import DataFetcher
from db.connection import DatabaseConnection
class SqueezeCompass:
def __init__(self) -> None:
#
# 0. Instantiate structures needed to connect to the database, and
# create tables if they don't yet exist.
self.db = DatabaseConnection()
self.db_session: Session = self.db.get_db_session()
# 1. Load a list of symbols into memory (will carry out checks on
# freshness of constituents file and re-fetch if stale).
self.symbol_fetcher = SymbolFetcher()
symbols: List[str] = self.symbol_fetcher.get_symbols()
# 2. Initialise the fetcher that will collect data for every symbol in the S&P500.
self.data_fetcher = DataFetcher(symbols, self.db_session)
# 3. Fetch data for only the first symbol to check whether `key_stats__date_short_interest` has
# changed relative to the last recorded fetch. If it has, all data will be fetched again.
self.date_of_last_snapshot: date = self.data_fetcher.get_date_of_last_snapshot()
if self.date_of_last_snapshot is not None:
self.prev_short_interest_date: date = self.data_fetcher.get_date_short_interest_for_last_snapshot()
single_symbol_data: Dict[str, Union[str, dict, datetime]] = DataFetcher.fetch_data_for_single_symbol(symbols[0])
self.cur_short_interest_date: date = single_symbol_data['key_stats']['date_short_interest']
if __name__ == '__main__':
squeeze_compass = SqueezeCompass()
if squeeze_compass.date_of_last_snapshot is not None:
if squeeze_compass.cur_short_interest_date > squeeze_compass.prev_short_interest_date:
print(f"{conf.TIMESTAMP()} | There has been an update to the short interest figures since last ",
f"snapshot on {squeeze_compass.date_of_last_snapshot}. Re-scraping data now.")
squeeze_compass.data_fetcher.fetch_new_daily_snapshot()
squeeze_compass.data_fetcher.save_snapshot_as_json()
else:
print(f"{conf.TIMESTAMP()} | prev_short_interest_date {squeeze_compass.prev_short_interest_date} | cur_short_interest_date {squeeze_compass.cur_short_interest_date}.")
print(f"{conf.TIMESTAMP()} | Data has not changed since the last snapshot on {squeeze_compass.date_of_last_snapshot}.")
else:
print(f"{conf.TIMESTAMP()} | Performing first data snapshot.")
squeeze_compass.data_fetcher.fetch_new_daily_snapshot()
squeeze_compass.data_fetcher.save_snapshot_as_json()
|
# https://www.interviewbit.com/problems/max-sum-contiguous-subarray/
# Max Sum Contiguous Subarray
class Solution:
# @param A : list of integers
# @return a list of integers
def maxset(A):
i = 0;
maxi = -1;
index = 0;
a = []
while i < len(A):
while i < len(A) and A[i] < 0:
i+=1
l = []
index = i
while i < len(A) and A[i] >= 0:
l.append(A[i])
i+=1
if (sum(l) > maxi):
a = l
maxi = sum(l)
return a |
import re
data = re.compile(r"([0-9]+) players; last marble is worth ([0-9]+) points")
players = []
circle = [0, 1]
with open("input.txt", "r") as f:
line = f.readline().strip()
n_players = int(data.match(line).group(1))
n_marbles = int(data.match(line).group(2))
for _ in range(n_players):
players.append(0)
current = 1
for i in range(2, n_marbles + 1):
if i % 23 == 0:
players[i % len(players) - 1] += i
players[i % len(players) - 1] += circle.pop(current - 7)
current -= 7
else:
current = (current + 2) % len(circle)
circle.insert(current, i)
print(max(players)) |
# Copyright (c) 2004-2021 Primate Labs Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import errno
import glob
import hashlib
import os
from pathlib import Path
import plistlib
import sys
import hkdf
import hashlib
import hmac
from math import ceil
import secrets
import uuid
import vpenc
from Crypto.Cipher import AES
def main():
if len(sys.argv) != 3:
print('Usage: <password> <encrypted VP document>')
return
password = sys.argv[1]
vp_path = sys.argv[2]
#path = Path(Path(), vp_path, 'vde.plist')
ctx = vpenc.VPEncryptionContext()
ctx.load(vp_path, password)
tags = ctx.load_plist('tags.plist')
items_path = Path(vp_path, 'pages')
items_plist_paths = items_path.rglob('*.plist')
for items_plist_path in items_plist_paths:
print(items_plist_path)
item_path = os.path.relpath(items_plist_path, vp_path)
data_path = os.path.splitext(item_path)[0]
info = ctx.load_plist(item_path)
print(info)
data = ctx.load_file(data_path)
print(data)
if __name__ == '__main__':
main()
|
"""Support for controlling GPIO pins of a Raspberry Pi."""
import logging
from RPi import GPIO # pylint: disable=import-error
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
DOMAIN = "rpi_gpio"
def setup(hass, config):
"""Set up the Raspberry PI GPIO component."""
def cleanup_gpio(event):
"""Stuff to do before stopping."""
GPIO.cleanup()
def prepare_gpio(event):
"""Stuff to do when home assistant starts."""
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_gpio)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, prepare_gpio)
GPIO.setmode(GPIO.BCM)
return True
def setup_output(port):
"""Set up a GPIO as output."""
GPIO.setup(port, GPIO.OUT)
def setup_input(port, pull_mode):
"""Set up a GPIO as input."""
GPIO.setup(port, GPIO.IN, GPIO.PUD_DOWN if pull_mode == "DOWN" else GPIO.PUD_UP)
def write_output(port, value):
"""Write a value to a GPIO."""
GPIO.output(port, value)
def read_input(port):
"""Read a value from a GPIO."""
return GPIO.input(port)
def edge_detect(port, event_callback, bounce):
"""Add detection for RISING and FALLING events."""
GPIO.add_event_detect(port, GPIO.BOTH, callback=event_callback, bouncetime=bounce)
|
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
import numpy as np
from scipy.spatial import distance
import pickle
import torch
from torch.autograd import Variable
class Node:
def __init__(self, parent, previous, n, c, centroid):
self.parent = parent
self.previous = previous # previous sibling (save parent, same depth)
self.next = n # next sibling
self.child = None # first child
self.centroid = centroid # clister center
self.density = 0
class Stack:
def __init__(self,depth, parent, data):
self.data = data
self.depth = depth
self.parent = parent
def addNode(centroids, tree, parent, depth, node_id,stack, data, labels):
n = np.shape(centroids)[0]
for i in range(0, n):
if i==0: # first node
tree.append(Node(parent, None, (node_id+1), None, centroids[i]))
elif i==n-1: # last node
tree.append(Node(parent, node_id-1, None, None, centroids[i]))
else:
tree.append(Node(parent, node_id-1, node_id+1, None, centroids[i]))
stack, density = splitData(stack, data, labels, depth, node_id , i)
tree[node_id].density = density
print('Node id ' + str(node_id) + ' Density: ' + str(density))
node_id = node_id+1
return tree, stack, node_id
def splitData(stack, data, labels, depth, parent, i):
clustered_data = data[labels==i]
density = np.shape(clustered_data)[0]
stack.append(Stack(depth+1, parent, clustered_data))
return stack, density
def closest_to_centroids(data, y, centroids, n_cl, num_dim):
#print(centroids.shape, y.shape)
min_dist = [555555555] * n_cl
new_centroids = np.zeros((n_cl, num_dim))
for idx, x in enumerate(data):
cur_centroid = centroids[y[idx]]
dist = euclidean(x, cur_centroid)
if min_dist[y[idx]] > dist:
min_dist[y[idx]] = dist
new_centroids[y[idx]] = x
return new_centroids
def construct(data, n_cl, density, max_depth):
tree = []
stack = []
node_id = 0 # to keep track of nodes in tree
depth = 0 # to keep track of depth in the tree
parent = None
# 1. generate n root nodes by k-means clustering
kmeans = MiniBatchKMeans(n_clusters=n_cl,batch_size=3000)
kmeans.fit(data)
y = kmeans.labels_
centroids = kmeans.cluster_centers_
new_centroids = closest_to_centroids(data, y, centroids, n_cl, data.shape[1])
# 2. Add root nodes to the tree
tree,stack,node_id = addNode(new_centroids, tree, parent, depth, node_id, stack, data, y)
# 3.
while stack:
data_subset = stack.pop()
depth = data_subset.depth
if np.shape(data_subset.data)[0] > density and depth < max_depth:
parent = data_subset.parent
#kmeans = KMeans(n_clusters=n_cl_2, n_jobs=-1)
kmeans = MiniBatchKMeans(n_clusters=n_cl,batch_size=3000)
kmeans.fit(data_subset.data)
y = kmeans.labels_
centroids = kmeans.cluster_centers_
new_centroids = closest_to_centroids(data, y, centroids, n_cl, data_subset.data.shape[1])
temp = node_id # id of a first child
tree,stack,node_id = addNode(new_centroids, tree, parent, depth, node_id, stack, data_subset.data, y)
tree[parent].child = temp
else:
continue
return tree
def euclidian(a,b):
return torch.sqrt(((a-b)**2).sum(-1))
# return (torch.abs(a-b)).sum(-1)
def euclidean(a, b):
return np.sqrt(((a-b)**2).sum(-1))
def convolution(a,b):
return torch.nn.functional.conv2d(a,b)
def cosine(a,b):
cos = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
return cos(a,b)
def measure_distance(current_node, data, tree, smallest_distance):
nodes = tree
dist = euclidian(data, nodes)
# val, best_node = torch.max(dist,-1) for cosine
val, best_node = torch.min(dist,-1)
best_node = best_node.type(torch.IntTensor).cpu().numpy()
return best_node, smallest_distance, nodes
def predict(tree, dataset):
prediction = predict_image(tree, dataset)
cent = prediction[0]
ids = prediction[1]
return cent, ids
def predict_image(tree, data):
current_node = 0
best_node = current_node
smallest_distance = 555555555
best_node, smallest_distance, nodes = measure_distance(current_node, data, tree, smallest_distance)
nodes = nodes.view(nodes.size()[1], nodes.size()[2])
result = torch.stack([nodes[best_node[i]] for i in range (0, np.shape(best_node)[0])])
return result, best_node
def save_tree(tree,filename):
# open the file for writing
fileObject = open(filename,'wb')
# this writes the object a to the
# file named 'testfile'
pickle.dump(tree, fileObject)
# here we close the fileObject
fileObject.close()
def load_tree(filename):
fileObject = open(filename,'r')
# load the object from the file into var b
tree = pickle.load(fileObject)
return tree
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.