content
stringlengths 5
1.05M
|
|---|
import datetime
from peewee import CharField, DateTimeField, SqliteDatabase, TextField, Model
db = SqliteDatabase('info.db')
class BaseModel(Model):
class Meta:
database = db
indexes = (
# create a no-unique on username and email
(('sex', 'email'), False),
)
@classmethod
def filters(cls, sex=None, email=None, page_number=1, items_per_page=20):
"""this filter code for example demo"""
if not sex and not email:
qs = cls.select()
elif sex and email:
qs = cls.select().where(cls.sex == sex, cls.email.contains(email))
elif sex:
qs = cls.select().where(cls.sex == sex)
elif email:
qs = cls.select().where(cls.email.contains(email))
cls.result = qs.order_by(cls.id).paginate(page_number, items_per_page)
return cls
@classmethod
def counts(cls):
return cls.result.count()
@classmethod
def values_list(cls, *args, **kwargs):
result = []
for arg in args:
qs_expression = "{0}.select({0}.{1}).iterator()".format(cls.__name__, arg)
for row in eval(qs_expression):
result.append(eval('row.{0}'.format(arg)))
return result
class ShanghaiPersonInfo(BaseModel):
create_datetime = DateTimeField(default=datetime.datetime.utcnow(), null=True)
username = CharField()
email = CharField()
phone = CharField()
sex = CharField()
zone = TextField()
|
# File: proofpoint_consts.py
# Copyright (c) 2017-2020 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
PP_API_BASE_URL = "https://tap-api-v2.proofpoint.com"
PP_API_PATH_CLICKS_BLOCKED = "/v2/siem/clicks/blocked"
PP_API_PATH_CLICKS_PERMITTED = "/v2/siem/clicks/permitted"
PP_API_PATH_MESSAGES_BLOCKED = "/v2/siem/messages/blocked"
PP_API_PATH_MESSAGES_DELIVERED = "/v2/siem/messages/delivered"
PP_API_PATH_ISSUES = "/v2/siem/issues"
PP_API_PATH_ALL = "/v2/siem/all"
PP_API_PATH_CAMPAIGN = "/v2/campaign/{}"
PP_API_PATH_FORENSICS = "/v2/forensics"
PP_API_PATH_DECODE = "/v2/url/decode"
# Constants relating to 'get_error_message_from_exception'
ERR_CODE_MSG = "Error code unavailable"
ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
TYPE_ERR_MSG = "Error occurred while connecting to the Proofpoint TAP Server. Please check the asset configuration and|or action parameters."
ERR_MSG_FORMAT_WITH_CODE = "Error Code: {}. Error Message: {}"
ERR_MSG_FORMAT_WITHOUT_CODE = "Error Message: {}"
# Constants relating to 'validate_integer'
INVALID_INTEGER_ERR_MSG = "Please provide a valid integer value in the {}"
INVALID_NON_NEGATIVE_INTEGER_ERR_MSG = "Please provide a valid non-negative integer value in the {}"
INITIAL_INGESTION_WINDOW_KEY = "'initial_ingestion_window' configuration parameter"
# Constant relating to 'handle_py_ver_compat_for_input_str'
PY_2TO3_ERR_MSG = "Error occurred while handling python 2to3 compatibility for the input string"
# Constant relating to fetching the python major version
ERR_FETCHING_PYTHON_VERSION = "Error occurred while fetching the Phantom server's Python major version"
# Constants relating to error messages while processing response from server
EMPTY_RESPONSE_MSG = "Status code: {}. Empty response and no information in the header"
HTML_RESPONSE_PARSE_ERR_MSG = "Cannot parse error details"
JSON_PARSE_ERR_MSG = 'Unable to parse JSON response. Error: {}'
SERVER_ERR_MSG = 'Error from server. Status Code: {} Data from server: {}'
SERVER_ERR_CANT_PROCESS_RESPONSE_MSG = "Can't process response from server. Status Code: {} Data from server: {}"
CONNECTION_REFUSED_ERR_MSG = "Error Details: Connection Refused from the Server"
SERVER_CONNECTION_ERR_MSG = "Error Connecting to server. Details: {}"
|
# Copyright 2020 Alexander Polishchuk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import asyncio
import functools
import json
from decimal import Decimal
from typing import List
from typing import Optional
from typing import Union
import aiohttp
from obm import exceptions
DEFAULT_TIMEOUT = 5 * 60
def _catch_network_errors(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except aiohttp.ServerTimeoutError:
self = args[0]
raise exceptions.NetworkTimeoutError(
f"The request to node was longer "
f"than timeout: {self.timeout}"
)
except aiohttp.ClientError as exc:
raise exceptions.NetworkError(exc)
return wrapper
class _DecimalEncoder(json.JSONEncoder):
def default(self, obj): # pylint: disable=method-hidden, arguments-differ
if isinstance(obj, Decimal):
return str(obj)
return super().default(obj)
class Connector(abc.ABC):
def __init__(
self,
rpc_host: str,
rpc_port: int,
loop: Optional[asyncio.AbstractEventLoop] = None,
session: Optional[aiohttp.ClientSession] = None,
timeout: Union[int, float] = DEFAULT_TIMEOUT,
):
if not isinstance(rpc_host, str):
raise TypeError(
f"PRC host must be a string, not '{type(rpc_host).__name__}'"
)
if not isinstance(rpc_port, int):
raise TypeError(
f"PRC port must be an integer, not '{type(rpc_port).__name__}'"
)
if session is not None:
if not isinstance(session, aiohttp.ClientTimeout):
raise TypeError(
f"Session must be a aiohttp.ClientSession, "
f"not '{type(session).__name__}'"
)
if timeout is not None:
if not isinstance(timeout, (float, int)):
raise TypeError(
f"Timeout must be a number, not '{type(timeout).__name__}'"
)
if timeout <= 0:
raise ValueError("Timeout must be greater than zero")
# TODO: Create auth here
url = f"{rpc_host}:{rpc_port}"
self.rpc_host = rpc_host
self.rpc_port = rpc_port
self.timeout = timeout
self.session = session
self.url = url if url.startswith("http") else "http://" + url
self.loop = loop or asyncio.get_event_loop()
def __getattribute__(self, item):
if item != "METHODS" and item in self.METHODS:
return functools.partial(self.wrapper, method=self.METHODS[item])
return super().__getattribute__(item)
async def __aenter__(self):
await self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
async def open(self):
if self.session is None:
self.session = aiohttp.ClientSession(
loop=self.loop,
headers=self.headers,
auth=self.auth,
timeout=aiohttp.ClientTimeout(total=self.timeout),
json_serialize=functools.partial(
json.dumps, cls=_DecimalEncoder
),
)
async def close(self):
if self.session is not None:
await self.session.close()
self.session = None
@_catch_network_errors
async def call(self, payload: dict) -> dict:
await self.open()
async with self.session.post(url=self.url, json=payload) as response:
return await response.json(
loads=functools.partial(json.loads, parse_float=Decimal)
)
@staticmethod
async def validate(response: dict) -> Union[dict, list]:
try:
if error := response.get("error"):
raise exceptions.NodeError(error)
return response["result"]
except KeyError:
raise exceptions.NodeInvalidResponceError(response)
@property
@abc.abstractmethod
def node(self) -> str:
...
@property
@abc.abstractmethod
def currency(self) -> str:
...
@abc.abstractmethod
async def wrapper(self, *args, method: str = None) -> Union[dict, list]:
...
# Unified interface
@property
@abc.abstractmethod
async def latest_block_number(self) -> int:
...
@abc.abstractmethod
async def create_address(self, password: str = "") -> str:
return await self.rpc_personal_new_account(password)
@abc.abstractmethod
async def estimate_fee(
self,
from_address: str = None,
to_address: str = None,
amount: str = None,
fee: Union[dict, Decimal] = None,
data: str = None,
conf_target: int = 1,
) -> Decimal:
...
@abc.abstractmethod
async def send_transaction(
self,
amount: Union[Decimal, float],
to_address: str,
from_address: str = None,
fee: Union[dict, Decimal] = None,
password: str = "",
subtract_fee_from_amount: bool = False,
) -> dict:
...
@abc.abstractmethod
async def fetch_recent_transactions(
self, limit: int = 10, **kwargs,
) -> List[dict]:
"""Fetches most recent transactions from a blockchain.
Args:
limit: The number of transactions to return. Defaults to 10.
Returns:
Most recent transactions list.
"""
@abc.abstractmethod
async def fetch_in_wallet_transaction(self, txid: str) -> dict:
"""Fetches the transaction by txid from a blockchain.
Args:
txid: Transaction ID to return.
Returns:
Dict that represent the transaction.
"""
async def fetch_in_wallet_transactions(
self, txids: List[str],
) -> List[dict]:
"""Fetches the transactions by txids from a blockchain.
Args:
txids: Transaction IDs to return.
Returns:
Dict that represent the transactions list.
"""
batch = [self.fetch_in_wallet_transaction(txid) for txid in txids]
return await asyncio.gather(*batch)
|
from collections import Sequence
from itertools import islice
import nltk
def batches(it, k):
"""Split iterable it into size-k batches.
Returns
-------
batches : iterable
Iterator over lists.
"""
it = iter(it)
while True:
batch = list(islice(it, k))
if not batch:
break
yield batch
def nltk_download(package):
# XXX we could set the download_dir to download to xtas_data/nltk_data
# and have everything in one place.
nltk.download(package, raise_on_error=True, quiet=False)
def tosequence(it):
"""Convert iterable it to a sequence if it isn't already one."""
return it if isinstance(it, Sequence) else list(it)
|
# encoding: utf-8
import json
import os
import random
from datetime import datetime, timedelta
from io import BytesIO, StringIO
from unittest import mock
from unittest.mock import MagicMock
import pytest
from pymarc import parse_xml_to_array
from pymarc.record import Record
from api.authenticator import BasicAuthenticationProvider
from api.bibliotheca import (
BibliothecaAPI,
BibliothecaBibliographicCoverageProvider,
BibliothecaCirculationSweep,
BibliothecaEventMonitor,
BibliothecaParser,
BibliothecaPurchaseMonitor,
CheckoutResponseParser,
ErrorParser,
EventParser,
ItemListParser,
MockBibliothecaAPI,
PatronCirculationParser,
)
from api.circulation import CirculationAPI, FulfillmentInfo, HoldInfo, LoanInfo
from api.circulation_exceptions import *
from api.web_publication_manifest import FindawayManifest
from core.metadata_layer import ReplacementPolicy, TimestampData
from core.mock_analytics_provider import MockAnalyticsProvider
from core.model import (
CirculationEvent,
Contributor,
DataSource,
DeliveryMechanism,
Edition,
ExternalIntegration,
Hyperlink,
Identifier,
LicensePool,
Measurement,
Representation,
Subject,
Timestamp,
Work,
WorkCoverageRecord,
create,
)
from core.scripts import RunCollectionCoverageProviderScript
from core.testing import DatabaseTest
from core.util.datetime_helpers import datetime_utc, utc_now
from core.util.http import BadResponseException
from core.util.web_publication_manifest import AudiobookManifest
from . import sample_data
class BibliothecaAPITest(DatabaseTest):
def setup_method(self):
super(BibliothecaAPITest, self).setup_method()
self.collection = MockBibliothecaAPI.mock_collection(self._db)
self.api = MockBibliothecaAPI(self._db, self.collection)
base_path = os.path.split(__file__)[0]
resource_path = os.path.join(base_path, "files", "bibliotheca")
@classmethod
def sample_data(self, filename):
return sample_data(filename, "bibliotheca")
class TestBibliothecaAPI(BibliothecaAPITest):
def setup_method(self):
super(TestBibliothecaAPI, self).setup_method()
self.collection = MockBibliothecaAPI.mock_collection(self._db)
self.api = MockBibliothecaAPI(self._db, self.collection)
def test_external_integration(self):
assert self.collection.external_integration == self.api.external_integration(
object()
)
def test__run_self_tests(self):
# Verify that BibliothecaAPI._run_self_tests() calls the right
# methods.
class Mock(MockBibliothecaAPI):
"Mock every method used by BibliothecaAPI._run_self_tests."
# First we will count the circulation events that happened in the
# last five minutes.
def get_events_between(self, start, finish):
self.get_events_between_called_with = (start, finish)
return [1, 2, 3]
# Then we will count the loans and holds for the default
# patron.
def patron_activity(self, patron, pin):
self.patron_activity_called_with = (patron, pin)
return ["loan", "hold"]
# Now let's make sure two Libraries have access to this
# Collection -- one library with a default patron and one
# without.
no_default_patron = self._library()
self.collection.libraries.append(no_default_patron)
with_default_patron = self._default_library
integration = self._external_integration(
"api.simple_authentication",
ExternalIntegration.PATRON_AUTH_GOAL,
libraries=[with_default_patron],
)
p = BasicAuthenticationProvider
integration.setting(p.TEST_IDENTIFIER).value = "username1"
integration.setting(p.TEST_PASSWORD).value = "password1"
# Now that everything is set up, run the self-test.
api = Mock(self._db, self.collection)
now = utc_now()
[no_patron_credential, recent_circulation_events, patron_activity] = sorted(
api._run_self_tests(self._db), key=lambda x: x.name
)
assert (
"Acquiring test patron credentials for library %s" % no_default_patron.name
== no_patron_credential.name
)
assert False == no_patron_credential.success
assert "Library has no test patron configured." == str(
no_patron_credential.exception
)
assert (
"Asking for circulation events for the last five minutes"
== recent_circulation_events.name
)
assert True == recent_circulation_events.success
assert "Found 3 event(s)" == recent_circulation_events.result
start, end = api.get_events_between_called_with
assert 5 * 60 == (end - start).total_seconds()
assert (end - now).total_seconds() < 2
assert (
"Checking activity for test patron for library %s"
% with_default_patron.name
== patron_activity.name
)
assert "Found 2 loans/holds" == patron_activity.result
patron, pin = api.patron_activity_called_with
assert "username1" == patron.authorization_identifier
assert "password1" == pin
def test_full_path(self):
id = self.api.library_id
assert "/cirrus/library/%s/foo" % id == self.api.full_path("foo")
assert "/cirrus/library/%s/foo" % id == self.api.full_path("/foo")
assert "/cirrus/library/%s/foo" % id == self.api.full_path(
"/cirrus/library/%s/foo" % id
)
def test_full_url(self):
id = self.api.library_id
assert (
"http://bibliotheca.test/cirrus/library/%s/foo" % id
== self.api.full_url("foo")
)
assert (
"http://bibliotheca.test/cirrus/library/%s/foo" % id
== self.api.full_url("/foo")
)
def test_request_signing(self):
# Confirm a known correct result for the Bibliotheca request signing
# algorithm.
self.api.queue_response(200)
response = self.api.request("some_url")
[request] = self.api.requests
headers = request[-1]["headers"]
assert "Fri, 01 Jan 2016 00:00:00 GMT" == headers["3mcl-Datetime"]
assert "2.0" == headers["3mcl-Version"]
expect = "3MCLAUTH a:HZHNGfn6WVceakGrwXaJQ9zIY0Ai5opGct38j9/bHrE="
assert expect == headers["3mcl-Authorization"]
# Tweak one of the variables that go into the signature, and
# the signature changes.
self.api.library_id = self.api.library_id + "1"
self.api.queue_response(200)
response = self.api.request("some_url")
request = self.api.requests[-1]
headers = request[-1]["headers"]
assert headers["3mcl-Authorization"] != expect
def test_replacement_policy(self):
mock_analytics = object()
policy = self.api.replacement_policy(self._db, analytics=mock_analytics)
assert isinstance(policy, ReplacementPolicy)
assert mock_analytics == policy.analytics
def test_bibliographic_lookup_request(self):
self.api.queue_response(200, content="some data")
response = self.api.bibliographic_lookup_request(["id1", "id2"])
[request] = self.api.requests
url = request[1]
# The request URL is the /items endpoint with the IDs concatenated.
assert url == self.api.full_url("items") + "/id1,id2"
# The response string is returned directly.
assert b"some data" == response
def test_bibliographic_lookup(self):
class MockItemListParser(object):
def parse(self, data):
self.parse_called_with = data
yield "item1"
yield "item2"
class Mock(MockBibliothecaAPI):
"""Mock the functionality used by bibliographic_lookup_request."""
def __init__(self):
self.item_list_parser = MockItemListParser()
def bibliographic_lookup_request(self, identifier_strings):
self.bibliographic_lookup_request_called_with = identifier_strings
return "parse me"
api = Mock()
identifier = self._identifier()
# We can pass in a list of identifier strings, a list of
# Identifier objects, or a single example of each:
for identifier, identifier_string in (
("id1", "id1"),
(identifier, identifier.identifier),
):
for identifier_list in ([identifier], identifier):
api.item_list_parser.parse_called_with = None
results = list(api.bibliographic_lookup(identifier_list))
# A list of identifier strings is passed into
# bibliographic_lookup_request().
assert [
identifier_string
] == api.bibliographic_lookup_request_called_with
# The response content is passed into parse()
assert "parse me" == api.item_list_parser.parse_called_with
# The results of parse() are yielded.
assert ["item1", "item2"] == results
def test_bad_response_raises_exception(self):
self.api.queue_response(500, content="oops")
identifier = self._identifier()
with pytest.raises(BadResponseException) as excinfo:
self.api.bibliographic_lookup(identifier)
assert "Got status code 500" in str(excinfo.value)
def test_put_request(self):
# This is a basic test to make sure the method calls line up
# right--there are more thorough tests in the circulation
# manager, which actually uses this functionality.
self.api.queue_response(200, content="ok, you put something")
response = self.api.request("checkout", "put this!", method="PUT")
# The PUT request went through to the correct URL and the right
# payload was sent.
[[method, url, args, kwargs]] = self.api.requests
assert "PUT" == method
assert self.api.full_url("checkout") == url
assert "put this!" == kwargs["data"]
# The response is what we'd expect.
assert 200 == response.status_code
assert b"ok, you put something" == response.content
def test_get_events_between_success(self):
data = self.sample_data("empty_end_date_event.xml")
self.api.queue_response(200, content=data)
now = utc_now()
an_hour_ago = now - timedelta(minutes=3600)
response = self.api.get_events_between(an_hour_ago, now)
[event] = list(response)
assert "d5rf89" == event[0]
def test_get_events_between_failure(self):
self.api.queue_response(500)
now = utc_now()
an_hour_ago = now - timedelta(minutes=3600)
pytest.raises(
BadResponseException, self.api.get_events_between, an_hour_ago, now
)
def test_update_availability(self):
# Test the Bibliotheca implementation of the update_availability
# method defined by the CirculationAPI interface.
# Create an analytics integration so we can make sure
# events are tracked.
integration, ignore = create(
self._db,
ExternalIntegration,
goal=ExternalIntegration.ANALYTICS_GOAL,
protocol="core.local_analytics_provider",
)
# Create a LicensePool that needs updating.
edition, pool = self._edition(
identifier_type=Identifier.THREEM_ID,
data_source_name=DataSource.THREEM,
with_license_pool=True,
collection=self.collection,
)
# We have never checked the circulation information for this
# LicensePool. Put some random junk in the pool to verify
# that it gets changed.
pool.licenses_owned = 10
pool.licenses_available = 5
pool.patrons_in_hold_queue = 3
assert None == pool.last_checked
# We do have a Work hanging around, but things are about to
# change for it.
work, is_new = pool.calculate_work()
assert any(
x
for x in work.coverage_records
if x.operation == WorkCoverageRecord.CLASSIFY_OPERATION
)
# Prepare availability information.
data = self.sample_data("item_metadata_single.xml")
# Change the ID in the test data so it looks like it's talking
# about the LicensePool we just created.
data = data.replace(b"ddf4gr9", pool.identifier.identifier.encode("utf8"))
# Update availability using that data.
self.api.queue_response(200, content=data)
self.api.update_availability(pool)
# The availability information has been updated, as has the
# date the availability information was last checked.
assert 1 == pool.licenses_owned
assert 1 == pool.licenses_available
assert 0 == pool.patrons_in_hold_queue
circulation_events = (
self._db.query(CirculationEvent)
.join(LicensePool)
.filter(LicensePool.id == pool.id)
)
assert 3 == circulation_events.count()
types = [e.type for e in circulation_events]
assert (
sorted(
[
CirculationEvent.DISTRIBUTOR_LICENSE_REMOVE,
CirculationEvent.DISTRIBUTOR_CHECKOUT,
CirculationEvent.DISTRIBUTOR_HOLD_RELEASE,
]
)
== sorted(types)
)
old_last_checked = pool.last_checked
assert old_last_checked is not None
# The work's CLASSIFY_OPERATION coverage record has been
# removed. In the near future its coverage will be
# recalculated to accommodate the new metadata.
assert any(
x
for x in work.coverage_records
if x.operation == WorkCoverageRecord.CLASSIFY_OPERATION
)
# Now let's try update_availability again, with a file that
# makes it look like the book has been removed from the
# collection.
data = self.sample_data("empty_item_bibliographic.xml")
self.api.queue_response(200, content=data)
self.api.update_availability(pool)
assert 0 == pool.licenses_owned
assert 0 == pool.licenses_available
assert 0 == pool.patrons_in_hold_queue
assert pool.last_checked is not old_last_checked
circulation_events = (
self._db.query(CirculationEvent)
.join(LicensePool)
.filter(LicensePool.id == pool.id)
)
assert 5 == circulation_events.count()
def test_marc_request(self):
# A request for MARC records between two dates makes an API
# call and yields a sequence of pymarc Record objects.
start = datetime_utc(2012, 1, 2, 3, 4, 5)
end = datetime_utc(2014, 5, 6, 7, 8, 9)
self.api.queue_response(200, content=self.sample_data("marc_records_two.xml"))
records = [x for x in self.api.marc_request(start, end, 10, 20)]
[(method, url, body, headers)] = self.api.requests
# A GET request was sent to the expected endpoint
assert method == "GET"
for expect in (
"/data/marc?" "startdate=2012-01-02T03:04:05",
"enddate=2014-05-06T07:08:09",
"offset=10",
"limit=20",
):
assert expect in url
# The queued response was converted into pymarc Record objects.
assert all(isinstance(x, Record) for x in records)
assert ["Siege and Storm", "Red Island House A Novel/"] == [
x.title() for x in records
]
# If the API returns an error, an appropriate exception is raised.
self.api.queue_response(404, content=self.sample_data("error_unknown.xml"))
with pytest.raises(RemoteInitiatedServerError) as excinfo:
[x for x in self.api.marc_request(start, end, 10, 20)]
def test_sync_bookshelf(self):
patron = self._patron()
circulation = CirculationAPI(
self._db,
self._default_library,
api_map={self.collection.protocol: MockBibliothecaAPI},
)
api = circulation.api_for_collection[self.collection.id]
api.queue_response(200, content=self.sample_data("checkouts.xml"))
circulation.sync_bookshelf(patron, "dummy pin")
# The patron should have two loans and two holds.
l1, l2 = patron.loans
h1, h2 = patron.holds
assert datetime_utc(2015, 3, 20, 18, 50, 22) == l1.start
assert datetime_utc(2015, 4, 10, 18, 50, 22) == l1.end
assert datetime_utc(2015, 3, 13, 13, 38, 19) == l2.start
assert datetime_utc(2015, 4, 3, 13, 38, 19) == l2.end
# The patron is fourth in line. The end date is an estimate
# of when the hold will be available to check out.
assert datetime_utc(2015, 3, 24, 15, 6, 56) == h1.start
assert datetime_utc(2015, 3, 24, 15, 7, 51) == h1.end
assert 4 == h1.position
# The hold has an end date. It's time for the patron to decide
# whether or not to check out this book.
assert datetime_utc(2015, 5, 25, 17, 5, 34) == h2.start
assert datetime_utc(2015, 5, 27, 17, 5, 34) == h2.end
assert 0 == h2.position
def test_place_hold(self):
patron = self._patron()
edition, pool = self._edition(with_license_pool=True)
self.api.queue_response(200, content=self.sample_data("successful_hold.xml"))
response = self.api.place_hold(patron, "pin", pool)
assert pool.identifier.type == response.identifier_type
assert pool.identifier.identifier == response.identifier
def test_place_hold_fails_if_exceeded_hold_limit(self):
patron = self._patron()
edition, pool = self._edition(with_license_pool=True)
self.api.queue_response(
400, content=self.sample_data("error_exceeded_hold_limit.xml")
)
pytest.raises(PatronHoldLimitReached, self.api.place_hold, patron, "pin", pool)
def test_get_audio_fulfillment_file(self):
"""Verify that get_audio_fulfillment_file sends the
request we expect.
"""
self.api.queue_response(200, content="A license")
response = self.api.get_audio_fulfillment_file("patron id", "bib id")
[[method, url, args, kwargs]] = self.api.requests
assert "POST" == method
assert url.endswith("GetItemAudioFulfillment")
assert (
"<AudioFulfillmentRequest><ItemId>bib id</ItemId><PatronId>patron id</PatronId></AudioFulfillmentRequest>"
== kwargs["data"]
)
assert 200 == response.status_code
assert b"A license" == response.content
def test_fulfill(self):
patron = self._patron()
# This miracle book is available either as an audiobook or as
# an EPUB.
work = self._work(
data_source_name=DataSource.BIBLIOTHECA, with_license_pool=True
)
[pool] = work.license_pools
# Let's fulfill the EPUB first.
self.api.queue_response(
200,
headers={"Content-Type": "presumably/an-acsm"},
content="this is an ACSM",
)
fulfillment = self.api.fulfill(patron, "password", pool, internal_format="ePub")
assert isinstance(fulfillment, FulfillmentInfo)
assert b"this is an ACSM" == fulfillment.content
assert pool.identifier.identifier == fulfillment.identifier
assert pool.identifier.type == fulfillment.identifier_type
assert pool.data_source.name == fulfillment.data_source_name
# The media type reported by the server is passed through.
assert "presumably/an-acsm" == fulfillment.content_type
# Now let's try the audio version.
license = self.sample_data("sample_findaway_audiobook_license.json")
self.api.queue_response(
200, headers={"Content-Type": "application/json"}, content=license
)
fulfillment = self.api.fulfill(patron, "password", pool, internal_format="MP3")
assert isinstance(fulfillment, FulfillmentInfo)
# Here, the media type reported by the server is not passed
# through; it's replaced by a more specific media type
assert DeliveryMechanism.FINDAWAY_DRM == fulfillment.content_type
# The document sent by the 'Findaway' server has been
# converted into a web publication manifest.
manifest = json.loads(fulfillment.content)
# The conversion process is tested more fully in
# test_findaway_license_to_webpub_manifest. This just verifies
# that the manifest contains information from the 'Findaway'
# document as well as information from the Work.
metadata = manifest["metadata"]
assert (
"abcdef01234789abcdef0123" == metadata["encrypted"]["findaway:checkoutId"]
)
assert work.title == metadata["title"]
# Now let's see what happens to fulfillment when 'Findaway' or
# 'Bibliotheca' sends bad information.
bad_media_type = "application/error+json"
bad_content = b"This is not my beautiful license document!"
self.api.queue_response(
200, headers={"Content-Type": bad_media_type}, content=bad_content
)
fulfillment = self.api.fulfill(patron, "password", pool, internal_format="MP3")
assert isinstance(fulfillment, FulfillmentInfo)
# The (apparently) bad document is just passed on to the
# client as part of the FulfillmentInfo, in the hopes that the
# client will know what to do with it.
assert bad_media_type == fulfillment.content_type
assert bad_content == fulfillment.content
def test_findaway_license_to_webpub_manifest(self):
work = self._work(with_license_pool=True)
[pool] = work.license_pools
document = self.sample_data("sample_findaway_audiobook_license.json")
# Randomly scramble the Findaway manifest to make sure it gets
# properly sorted when converted to a Webpub-like manifest.
document = json.loads(document)
document["items"].sort(key=lambda x: random.random())
document = json.dumps(document)
m = BibliothecaAPI.findaway_license_to_webpub_manifest
media_type, manifest = m(pool, document)
assert DeliveryMechanism.FINDAWAY_DRM == media_type
manifest = json.loads(manifest)
# We use the default context for Web Publication Manifest
# files, but we also define an extension context called
# 'findaway', which lets us include terms coined by Findaway
# in a normal Web Publication Manifest document.
context = manifest["@context"]
default, findaway = context
assert AudiobookManifest.DEFAULT_CONTEXT == default
assert {"findaway": FindawayManifest.FINDAWAY_EXTENSION_CONTEXT} == findaway
metadata = manifest["metadata"]
# Information about the book has been added to metadata.
# (This is tested more fully in
# core/tests/util/test_util_web_publication_manifest.py.)
assert work.title == metadata["title"]
assert pool.identifier.urn == metadata["identifier"]
assert "en" == metadata["language"]
# Information about the license has been added to an 'encrypted'
# object within metadata.
encrypted = metadata["encrypted"]
assert (
"http://librarysimplified.org/terms/drm/scheme/FAE" == encrypted["scheme"]
)
assert "abcdef01234789abcdef0123" == encrypted["findaway:checkoutId"]
assert "1234567890987654321ababa" == encrypted["findaway:licenseId"]
assert "3M" == encrypted["findaway:accountId"]
assert "123456" == encrypted["findaway:fulfillmentId"]
assert (
"aaaaaaaa-4444-cccc-dddd-666666666666" == encrypted["findaway:sessionKey"]
)
# Every entry in the license document's 'items' list has
# become a readingOrder item in the manifest.
reading_order = manifest["readingOrder"]
assert 79 == len(reading_order)
# The duration of each readingOrder item has been converted to
# seconds.
first = reading_order[0]
assert 16.201 == first["duration"]
assert "Track 1" == first["title"]
# There is no 'href' value for the readingOrder items because the
# files must be obtained through the Findaway SDK rather than
# through regular HTTP requests.
#
# Since this is a relatively small book, it only has one part,
# part #0. Within that part, the items have been sorted by
# their sequence.
for i, item in enumerate(reading_order):
assert None == item.get("href", None)
assert Representation.MP3_MEDIA_TYPE == item["type"]
assert 0 == item["findaway:part"]
assert i + 1 == item["findaway:sequence"]
# The total duration, in seconds, has been added to metadata.
assert 28371 == int(metadata["duration"])
class TestBibliothecaCirculationSweep(BibliothecaAPITest):
def test_circulation_sweep_discovers_work(self):
# Test what happens when BibliothecaCirculationSweep discovers a new
# work.
# Create an analytics integration so we can make sure
# events are tracked.
integration, ignore = create(
self._db,
ExternalIntegration,
goal=ExternalIntegration.ANALYTICS_GOAL,
protocol="core.local_analytics_provider",
)
# We know about an identifier, but nothing else.
identifier = self._identifier(
identifier_type=Identifier.BIBLIOTHECA_ID, foreign_id="ddf4gr9"
)
# We're about to get information about that identifier from
# the API.
data = self.sample_data("item_metadata_single.xml")
# Update availability using that data.
self.api.queue_response(200, content=data)
monitor = BibliothecaCirculationSweep(
self._db, self.collection, api_class=self.api
)
monitor.process_items([identifier])
# Validate that the HTTP request went to the /items endpoint.
request = self.api.requests.pop()
url = request[1]
assert url == self.api.full_url("items") + "/" + identifier.identifier
# A LicensePool has been created for the previously mysterious
# identifier.
[pool] = identifier.licensed_through
assert self.collection == pool.collection
assert False == pool.open_access
# Three circulation events were created for this license pool,
# marking the creation of the license pool, the addition of
# licenses owned, and the making of those licenses available.
circulation_events = (
self._db.query(CirculationEvent)
.join(LicensePool)
.filter(LicensePool.id == pool.id)
)
assert 3 == circulation_events.count()
types = [e.type for e in circulation_events]
assert (
sorted(
[
CirculationEvent.DISTRIBUTOR_LICENSE_ADD,
CirculationEvent.DISTRIBUTOR_TITLE_ADD,
CirculationEvent.DISTRIBUTOR_CHECKIN,
]
)
== sorted(types)
)
# Tests of the various parser classes.
#
class TestBibliothecaParser(BibliothecaAPITest):
def test_parse_date(self):
parser = BibliothecaParser()
v = parser.parse_date("2016-01-02T12:34:56")
assert datetime_utc(2016, 1, 2, 12, 34, 56) == v
assert None == parser.parse_date(None)
assert None == parser.parse_date("Some weird value")
class TestEventParser(BibliothecaAPITest):
def test_parse_empty_list(self):
data = self.sample_data("empty_event_batch.xml")
# By default, we consider an empty batch of events not
# as an error.
events = list(EventParser().process_all(data))
assert [] == events
# But if we consider not having events for a certain time
# period, then an exception should be raised.
no_events_error = True
with pytest.raises(RemoteInitiatedServerError) as excinfo:
list(EventParser().process_all(data, no_events_error))
assert (
"No events returned from server. This may not be an error, but treating it as one to be safe."
in str(excinfo.value)
)
def test_parse_empty_end_date_event(self):
data = self.sample_data("empty_end_date_event.xml")
[event] = list(EventParser().process_all(data))
(threem_id, isbn, patron_id, start_time, end_time, internal_event_type) = event
assert "d5rf89" == threem_id
assert "9781101190623" == isbn
assert None == patron_id
assert datetime_utc(2016, 4, 28, 11, 4, 6) == start_time
assert None == end_time
assert "distributor_license_add" == internal_event_type
class TestPatronCirculationParser(BibliothecaAPITest):
def test_parse(self):
data = self.sample_data("checkouts.xml")
collection = self.collection
loans_and_holds = PatronCirculationParser(collection).process_all(data)
loans = [x for x in loans_and_holds if isinstance(x, LoanInfo)]
holds = [x for x in loans_and_holds if isinstance(x, HoldInfo)]
assert 2 == len(loans)
assert 2 == len(holds)
[l1, l2] = sorted(loans, key=lambda x: x.identifier)
assert "1ad589" == l1.identifier
assert "cgaxr9" == l2.identifier
expect_loan_start = datetime_utc(2015, 3, 20, 18, 50, 22)
expect_loan_end = datetime_utc(2015, 4, 10, 18, 50, 22)
assert expect_loan_start == l1.start_date
assert expect_loan_end == l1.end_date
[h1, h2] = sorted(holds, key=lambda x: x.identifier)
# This is the book on reserve.
assert collection.id == h1.collection_id
assert DataSource.BIBLIOTHECA == h1.data_source_name
assert "9wd8" == h1.identifier
expect_hold_start = datetime_utc(2015, 5, 25, 17, 5, 34)
expect_hold_end = datetime_utc(2015, 5, 27, 17, 5, 34)
assert expect_hold_start == h1.start_date
assert expect_hold_end == h1.end_date
assert 0 == h1.hold_position
# This is the book on hold.
assert "d4o8r9" == h2.identifier
assert collection.id == h2.collection_id
assert DataSource.BIBLIOTHECA == h2.data_source_name
expect_hold_start = datetime_utc(2015, 3, 24, 15, 6, 56)
expect_hold_end = datetime_utc(2015, 3, 24, 15, 7, 51)
assert expect_hold_start == h2.start_date
assert expect_hold_end == h2.end_date
assert 4 == h2.hold_position
class TestCheckoutResponseParser(BibliothecaAPITest):
def test_parse(self):
data = self.sample_data("successful_checkout.xml")
due_date = CheckoutResponseParser().process_all(data)
assert datetime_utc(2015, 4, 16, 0, 32, 36) == due_date
class TestErrorParser(BibliothecaAPITest):
def test_exceeded_limit(self):
"""The normal case--we get a helpful error message which we turn into
an appropriate circulation exception.
"""
msg = self.sample_data("error_exceeded_limit.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, PatronLoanLimitReached)
assert "Patron cannot loan more than 12 documents" == error.message
def test_exceeded_hold_limit(self):
msg = self.sample_data("error_exceeded_hold_limit.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, PatronHoldLimitReached)
assert "Patron cannot have more than 15 holds" == error.message
def test_wrong_status(self):
msg = self.sample_data("error_no_licenses.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, NoLicenses)
assert (
"the patron document status was CAN_WISH and not one of CAN_LOAN,RESERVATION"
== error.message
)
problem = error.as_problem_detail_document()
assert "The library currently has no licenses for this book." == problem.detail
assert 404 == problem.status_code
def test_internal_server_error_beomces_remote_initiated_server_error(self):
"""Simulate the message we get when the server goes down."""
msg = "The server has encountered an error"
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
assert BibliothecaAPI.SERVICE_NAME == error.service_name
assert 502 == error.status_code
assert msg == error.message
doc = error.as_problem_detail_document()
assert 502 == doc.status_code
assert "Integration error communicating with Bibliotheca" == doc.detail
def test_unknown_error_becomes_remote_initiated_server_error(self):
"""Simulate the message we get when the server gives a vague error."""
msg = self.sample_data("error_unknown.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
assert BibliothecaAPI.SERVICE_NAME == error.service_name
assert "Unknown error" == error.message
def test_remote_authentication_failed_becomes_remote_initiated_server_error(self):
"""Simulate the message we get when the error message is
'Authentication failed' but our authentication information is
set up correctly.
"""
msg = self.sample_data("error_authentication_failed.xml")
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
assert BibliothecaAPI.SERVICE_NAME == error.service_name
assert "Authentication failed" == error.message
def test_malformed_error_message_becomes_remote_initiated_server_error(self):
msg = """<weird>This error does not follow the standard set out by Bibliotheca.</weird>"""
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
assert BibliothecaAPI.SERVICE_NAME == error.service_name
assert "Unknown error" == error.message
def test_blank_error_message_becomes_remote_initiated_server_error(self):
msg = """<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Message/></Error>"""
error = ErrorParser().process_all(msg)
assert isinstance(error, RemoteInitiatedServerError)
assert BibliothecaAPI.SERVICE_NAME == error.service_name
assert "Unknown error" == error.message
class TestBibliothecaEventParser(object):
# Sample event feed to test out the parser.
TWO_EVENTS = """<LibraryEventBatch xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<PublishId>1b0d6667-a10e-424a-9f73-fb6f6d41308e</PublishId>
<PublishDateTimeInUTC>2014-04-14T13:59:05.6920303Z</PublishDateTimeInUTC>
<LastEventDateTimeInUTC>2014-04-03T00:00:34</LastEventDateTimeInUTC>
<Events>
<CloudLibraryEvent>
<LibraryId>test-library</LibraryId>
<EventId>event-1</EventId>
<EventType>CHECKIN</EventType>
<EventStartDateTimeInUTC>2014-04-03T00:00:23</EventStartDateTimeInUTC>
<EventEndDateTimeInUTC>2014-04-03T00:00:23</EventEndDateTimeInUTC>
<ItemId>theitem1</ItemId>
<ISBN>900isbn1</ISBN>
<PatronId>patronid1</PatronId>
<EventPublishDateTimeInUTC>2014-04-14T13:59:05</EventPublishDateTimeInUTC>
</CloudLibraryEvent>
<CloudLibraryEvent>
<LibraryId>test-library</LibraryId>
<EventId>event-2</EventId>
<EventType>CHECKOUT</EventType>
<EventStartDateTimeInUTC>2014-04-03T00:00:34</EventStartDateTimeInUTC>
<EventEndDateTimeInUTC>2014-04-02T23:57:37</EventEndDateTimeInUTC>
<ItemId>theitem2</ItemId>
<ISBN>900isbn2</ISBN>
<PatronId>patronid2</PatronId>
<EventPublishDateTimeInUTC>2014-04-14T13:59:05</EventPublishDateTimeInUTC>
</CloudLibraryEvent>
</Events>
</LibraryEventBatch>
"""
def test_parse_event_batch(self):
# Parsing the XML gives us two events.
event1, event2 = EventParser().process_all(self.TWO_EVENTS)
(threem_id, isbn, patron_id, start_time, end_time, internal_event_type) = event1
assert "theitem1" == threem_id
assert "900isbn1" == isbn
assert "patronid1" == patron_id
assert CirculationEvent.DISTRIBUTOR_CHECKIN == internal_event_type
assert start_time == end_time
(threem_id, isbn, patron_id, start_time, end_time, internal_event_type) = event2
assert "theitem2" == threem_id
assert "900isbn2" == isbn
assert "patronid2" == patron_id
assert CirculationEvent.DISTRIBUTOR_CHECKOUT == internal_event_type
# Verify that start and end time were parsed correctly.
correct_start = datetime_utc(2014, 4, 3, 0, 0, 34)
correct_end = datetime_utc(2014, 4, 2, 23, 57, 37)
assert correct_start == start_time
assert correct_end == end_time
class TestErrorParser(object):
# Some sample error documents.
NOT_LOANABLE = '<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Code>Gen-001</Code><Message>the patron document status was CAN_HOLD and not one of CAN_LOAN,RESERVATION</Message></Error>'
ALREADY_ON_LOAN = '<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Code>Gen-001</Code><Message>the patron document status was LOAN and not one of CAN_LOAN,RESERVATION</Message></Error>'
TRIED_TO_RETURN_UNLOANED_BOOK = '<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Code>Gen-001</Code><Message>The patron has no eBooks checked out</Message></Error>'
TRIED_TO_HOLD_LOANABLE_BOOK = '<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Code>Gen-001</Code><Message>the patron document status was CAN_LOAN and not one of CAN_HOLD</Message></Error>'
TRIED_TO_HOLD_BOOK_ON_LOAN = '<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Code>Gen-001</Code><Message>the patron document status was LOAN and not one of CAN_HOLD</Message></Error>'
ALREADY_ON_HOLD = '<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Code>Gen-001</Code><Message>the patron document status was HOLD and not one of CAN_HOLD</Message></Error>'
TRIED_TO_CANCEL_NONEXISTENT_HOLD = '<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Code>Gen-001</Code><Message>The patron does not have the book on hold</Message></Error>'
TOO_MANY_LOANS = '<Error xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Code>Gen-001</Code><Message>Patron cannot loan more than 12 documents</Message></Error>'
def test_exception(self):
parser = ErrorParser()
error = parser.process_all(self.NOT_LOANABLE)
assert isinstance(error, NoAvailableCopies)
error = parser.process_all(self.ALREADY_ON_LOAN)
assert isinstance(error, AlreadyCheckedOut)
error = parser.process_all(self.ALREADY_ON_HOLD)
assert isinstance(error, AlreadyOnHold)
error = parser.process_all(self.TOO_MANY_LOANS)
assert isinstance(error, PatronLoanLimitReached)
error = parser.process_all(self.TRIED_TO_CANCEL_NONEXISTENT_HOLD)
assert isinstance(error, NotOnHold)
error = parser.process_all(self.TRIED_TO_RETURN_UNLOANED_BOOK)
assert isinstance(error, NotCheckedOut)
error = parser.process_all(self.TRIED_TO_HOLD_LOANABLE_BOOK)
assert isinstance(error, CurrentlyAvailable)
# This is such a weird case we don't have a special
# exception for it.
error = parser.process_all(self.TRIED_TO_HOLD_BOOK_ON_LOAN)
assert isinstance(error, CannotHold)
class TestBibliothecaPurchaseMonitor(BibliothecaAPITest):
@pytest.fixture()
def default_monitor(self):
return BibliothecaPurchaseMonitor(
self._db,
self.collection,
api_class=MockBibliothecaAPI,
analytics=MockAnalyticsProvider(),
)
@pytest.fixture()
def initialized_monitor(self):
collection = MockBibliothecaAPI.mock_collection(
self._db, name="Initialized Purchase Monitor Collection"
)
monitor = BibliothecaPurchaseMonitor(
self._db, collection, api_class=MockBibliothecaAPI
)
Timestamp.stamp(
self._db,
service=monitor.service_name,
service_type=Timestamp.MONITOR_TYPE,
collection=collection,
)
return monitor
@pytest.mark.parametrize(
"specified_default_start, expected_default_start",
[
("2011", datetime_utc(year=2011, month=1, day=1)),
("2011-10", datetime_utc(year=2011, month=10, day=1)),
("2011-10-05", datetime_utc(year=2011, month=10, day=5)),
("2011-10-05T15", datetime_utc(year=2011, month=10, day=5, hour=15)),
(
"2011-10-05T15:27",
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),
),
(
"2011-10-05T15:27:33",
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),
),
(
"2011-10-05 15:27:33",
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),
),
(
"2011-10-05T15:27:33.123456",
datetime_utc(
year=2011,
month=10,
day=5,
hour=15,
minute=27,
second=33,
microsecond=123456,
),
),
(
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),
),
(None, None),
],
)
def test_optional_iso_date_valid_dates(
self, specified_default_start, expected_default_start, default_monitor
):
# ISO 8601 strings, `datetime`s, or None are valid.
actual_default_start = default_monitor._optional_iso_date(
specified_default_start
)
if expected_default_start is not None:
assert isinstance(actual_default_start, datetime)
assert actual_default_start == expected_default_start
def test_monitor_intrinsic_start_time(self, default_monitor, initialized_monitor):
# No `default_start` time is specified for either `default_monitor` or
# `initialized_monitor`, so each monitor's `default_start_time` should
# match the monitor class's intrinsic start time.
for monitor in [default_monitor, initialized_monitor]:
expected_intrinsic_start = BibliothecaPurchaseMonitor.DEFAULT_START_TIME
intrinsic_start = monitor._intrinsic_start_time(self._db)
assert isinstance(intrinsic_start, datetime)
assert intrinsic_start == expected_intrinsic_start
assert intrinsic_start == monitor.default_start_time
@pytest.mark.parametrize(
"specified_default_start, override_timestamp, expected_start",
[
(
"2011-10-05T15:27",
False,
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),
),
(
"2011-10-05T15:27:33",
False,
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),
),
(None, False, None),
(None, True, None),
(
"2011-10-05T15:27",
True,
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),
),
(
"2011-10-05T15:27:33",
True,
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),
),
],
)
def test_specified_start_trumps_intrinsic_default_start(
self, specified_default_start, override_timestamp, expected_start
):
# When a valid `default_start` parameter is specified, it -- not the monitor's
# intrinsic default -- will always become the monitor's `default_start_time`.
monitor = BibliothecaPurchaseMonitor(
self._db,
self.collection,
api_class=MockBibliothecaAPI,
default_start=specified_default_start,
override_timestamp=override_timestamp,
)
monitor_intrinsic_default = monitor._intrinsic_start_time(self._db)
assert isinstance(monitor.default_start_time, datetime)
assert isinstance(monitor_intrinsic_default, datetime)
if specified_default_start:
assert monitor.default_start_time == expected_start
else:
assert (
abs(
(
monitor_intrinsic_default - monitor.default_start_time
).total_seconds()
)
<= 1
)
# If no `default_date` specified, then `override_timestamp` must be false.
if not specified_default_start:
assert monitor.override_timestamp is False
# For an uninitialized monitor (no timestamp), the monitor's `default_start_time`,
# whether from a specified `default_start` or the monitor's intrinsic start time,
# will be the actual start time. The cut-off will be roughly the current time, in
# either case.
expected_cutoff = utc_now()
with mock.patch.object(
monitor, "catch_up_from", return_value=None
) as catch_up_from:
monitor.run()
actual_start, actual_cutoff, progress = catch_up_from.call_args[0]
assert abs((expected_cutoff - actual_cutoff).total_seconds()) <= 1
assert actual_cutoff == progress.finish
assert actual_start == monitor.default_start_time
assert progress.start == monitor.default_start_time
@pytest.mark.parametrize(
"specified_default_start, override_timestamp, expected_start",
[
(
"2011-10-05T15:27",
False,
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),
),
(
"2011-10-05T15:27:33",
False,
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),
),
(None, False, None),
(None, True, None),
(
"2011-10-05T15:27",
True,
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),
),
(
"2011-10-05T15:27:33",
True,
datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),
),
],
)
def test_specified_start_can_override_timestamp(
self, specified_default_start, override_timestamp, expected_start
):
monitor = BibliothecaPurchaseMonitor(
self._db,
self.collection,
api_class=MockBibliothecaAPI,
default_start=specified_default_start,
override_timestamp=override_timestamp,
)
# For an initialized monitor, the `default_start_time` will be derived from
# `timestamp.finish`, unless overridden by a specified `default_start` when
# `override_timestamp` is specified as True.
ts = Timestamp.stamp(
self._db,
service=monitor.service_name,
service_type=Timestamp.MONITOR_TYPE,
collection=monitor.collection,
)
start_time_from_ts = ts.finish - BibliothecaPurchaseMonitor.OVERLAP
expected_actual_start_time = (
expected_start if monitor.override_timestamp else start_time_from_ts
)
expected_cutoff = utc_now()
with mock.patch.object(
monitor, "catch_up_from", return_value=None
) as catch_up_from:
monitor.run()
actual_start, actual_cutoff, progress = catch_up_from.call_args[0]
assert abs((expected_cutoff - actual_cutoff).total_seconds()) <= 1
assert actual_cutoff == progress.finish
assert actual_start == expected_actual_start_time
assert progress.start == expected_actual_start_time
@pytest.mark.parametrize("input", [("invalid"), ("2020/10"), (["2020-10-05"])])
def test_optional_iso_date_invalid_dates(self, input, default_monitor):
with pytest.raises(ValueError) as excinfo:
default_monitor._optional_iso_date(input)
def test_catch_up_from(self, default_monitor):
# catch_up_from() slices up its given timespan, calls
# purchases() to find purchases for each slice, processes each
# purchase using process_record(), and sets a checkpoint for each
# slice that is unambiguously in the past.
today = utc_now().date()
# _checkpoint() will be called after processing this slice
# because it's a full slice that ends before today.
full_slice = [datetime_utc(2014, 1, 1), datetime_utc(2014, 1, 2), True]
# _checkpoint() is not called after processing this slice
# because it's not a full slice.
incomplete_slice = [datetime_utc(2015, 1, 1), datetime_utc(2015, 1, 2), False]
# _checkpoint() is not called after processing this slice,
# even though it's supposedly complete, because today isn't
# over yet.
today_slice = [today - timedelta(days=1), today, True]
# _checkpoint() is not called after processing this slice
# because it doesn't end in the past.
future_slice = [today + timedelta(days=1), today + timedelta(days=2), True]
default_monitor.slice_timespan = MagicMock(
return_value=[full_slice, incomplete_slice, today_slice, future_slice]
)
default_monitor.purchases = MagicMock(return_value=["A record"])
default_monitor.process_record = MagicMock()
default_monitor._checkpoint = MagicMock()
# Execute.
progress = TimestampData()
start = datetime_utc(2019, 1, 1)
cutoff = datetime_utc(2020, 1, 1)
default_monitor.catch_up_from(start, cutoff, progress)
# slice_timespan was called once.
default_monitor.slice_timespan.assert_called_once_with(
start, cutoff, timedelta(days=1)
)
# purchases() was called on each slice it returned.
default_monitor.purchases.assert_has_calls(
[
mock.call(*x[:2])
for x in (full_slice, incomplete_slice, today_slice, future_slice)
]
)
# Each purchases() call returned a single record, which was
# passed into process_record along with the start date of the
# current slice.
default_monitor.process_record.assert_has_calls(
[
mock.call("A record", x[0])
for x in [full_slice, incomplete_slice, today_slice, future_slice]
]
)
# TimestampData.achievements was set to the total number of
# records processed.
assert progress.achievements == "MARC records processed: 4"
# Only one of our contrived time slices -- the first one --
# was a full slice that ended before the current
# date. _checkpoint was called on that slice, and only that
# slice.
default_monitor._checkpoint.assert_called_once_with(
progress, start, full_slice[0], "MARC records processed: 1"
)
def test__checkpoint(self, default_monitor):
# The _checkpoint method allows the BibliothecaPurchaseMonitor
# to preserve its progress in case of a crash.
# The Timestamp for the default monitor shows that it has
# a start date but it's never successfully completed.
timestamp_obj = default_monitor.timestamp()
assert timestamp_obj.achievements is None
assert timestamp_obj.start == BibliothecaPurchaseMonitor.DEFAULT_START_TIME
assert timestamp_obj.finish is None
timestamp_data = TimestampData()
finish = datetime_utc(2020, 1, 1)
achievements = "Some achievements"
default_monitor._checkpoint(
timestamp_data, timestamp_obj.start, finish, achievements
)
# Calling _checkpoint creates the impression that the monitor
# completed at the checkpoint, even though in point of fact
# it's still running.
timestamp_obj = default_monitor.timestamp()
assert timestamp_obj.achievements == achievements
assert timestamp_obj.start == BibliothecaPurchaseMonitor.DEFAULT_START_TIME
assert timestamp_obj.finish == finish
def test_purchases(self, default_monitor):
# The purchases() method calls marc_request repeatedly, handling
# pagination.
# Mock three pages that contain 50, 50, and 49 items.
default_monitor.api.marc_request = MagicMock(
side_effect=[[1] * 50, [2] * 50, [3] * 49]
)
start = datetime_utc(2020, 1, 1)
end = datetime_utc(2020, 1, 2)
records = [x for x in default_monitor.purchases(start, end)]
# marc_request was called repeatedly with increasing offsets
# until it returned fewer than 50 results.
default_monitor.api.marc_request.assert_has_calls(
[mock.call(start, end, offset, 50) for offset in (1, 51, 101)]
)
# Every "record" it returned was yielded as part of a single
# stream.
assert ([1] * 50) + ([2] * 50) + ([3] * 49) == records
def test_process_record(self, default_monitor, caplog):
# process_record may create a LicensePool, trigger the
# bibliographic coverage provider, and/or issue a "license
# added" analytics event, based on the identifier found in a
# MARC record.
purchase_time = utc_now()
analytics = MockAnalyticsProvider()
default_monitor.analytics = analytics
ensure_coverage = MagicMock()
default_monitor.bibliographic_coverage_provider.ensure_coverage = (
ensure_coverage
)
# Try some cases that won't happen in real life.
multiple_control_numbers = b"""<?xml version="1.0" encoding="UTF-8" ?><marc:collection xmlns:marc="http://www.loc.gov/MARC21/slim" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/MARC21/slim http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd"><marc:record><marc:leader>01034nam a22002413a 4500</marc:leader><marc:controlfield tag="001">ehasb89</marc:controlfield><marc:controlfield tag="001">abcde</marc:controlfield></marc:record></marc:collection>"""
no_control_number = b"""<?xml version="1.0" encoding="UTF-8" ?><marc:collection xmlns:marc="http://www.loc.gov/MARC21/slim" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/MARC21/slim http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd"><marc:record><marc:leader>01034nam a22002413a 4500</marc:leader></marc:record></marc:collection>"""
for bad_record, expect_error in (
(
multiple_control_numbers,
"Ignoring MARC record with multiple Bibliotheca control numbers.",
),
(
no_control_number,
"Ignoring MARC record with no Bibliotheca control number.",
),
):
[marc] = parse_xml_to_array(BytesIO(bad_record))
assert default_monitor.process_record(marc, purchase_time) is None
assert expect_error in caplog.messages[-1]
# Now, try the two real cases.
[ehasb89, oock89] = parse_xml_to_array(
StringIO(self.sample_data("marc_records_two.xml").decode("utf8"))
)
# If the book is new to this collection, it's run through
# BibliothecaBibliographicCoverageProvider.ensure_coverage to
# give it initial bibliographic and circulation data.
pool = default_monitor.process_record(ehasb89, purchase_time)
assert pool.identifier.identifier == "ehasb89"
assert pool.identifier.type == Identifier.BIBLIOTHECA_ID
assert pool.data_source.name == DataSource.BIBLIOTHECA
assert self.collection == pool.collection
ensure_coverage.assert_called_once_with(pool.identifier, force=True)
# An analytics event is issued to mark the time at which the
# book was first purchased.
assert analytics.count == 1
assert analytics.event_type == "distributor_title_add"
assert analytics.time == purchase_time
# If the book is already in this collection, ensure_coverage
# is not called.
pool, ignore = LicensePool.for_foreign_id(
self._db,
DataSource.BIBLIOTHECA,
Identifier.BIBLIOTHECA_ID,
"3oock89",
collection=self.collection,
)
pool2 = default_monitor.process_record(oock89, purchase_time)
assert pool == pool2
assert ensure_coverage.call_count == 1 # i.e. was not called again.
# But an analytics event is still issued to mark the purchase.
assert analytics.count == 2
assert analytics.event_type == "distributor_title_add"
assert analytics.time == purchase_time
def test_end_to_end(self, default_monitor):
# Limited end-to-end test of the BibliothecaPurchaseMonitor.
# Set the default start time to one minute in the past, so the
# monitor doesn't feel the need to make more than one call to
# the MARC endpoint.
default_monitor.override_timestamp = True
start_time = utc_now() - timedelta(minutes=1)
default_monitor.default_start_time = start_time
# There will be two calls to the mock API: one to the MARC
# endpoint, which will tell us about the purchase of a single
# book, and one to the metadata endpoint for information about
# that book.
api = default_monitor.api
api.queue_response(200, content=self.sample_data("marc_records_one.xml"))
api.queue_response(200, content=self.sample_data("item_metadata_single.xml"))
default_monitor.run()
# One book was created.
work = self._db.query(Work).one()
# Bibliographic information came from the coverage provider,
# not from our fake MARC record (which is actually for a
# different book).
assert work.title == "The Incense Game"
# Licensing information was also taken from the coverage
# provider.
[lp] = work.license_pools
assert lp.identifier.identifier == "ddf4gr9"
assert default_monitor.collection == lp.collection
assert lp.licenses_owned == 1
assert lp.licenses_available == 1
# An analytics event was issued to commemorate the addition of
# the book to the collection.
assert default_monitor.analytics.event_type == "distributor_title_add"
# The timestamp has been updated; the next time the monitor
# runs it will ask for purchases that haven't happened yet.
default_monitor.override_timestamp = False
timestamp = default_monitor.timestamp()
assert timestamp.achievements == "MARC records processed: 1"
assert timestamp.finish > start_time
class TestBibliothecaEventMonitor(BibliothecaAPITest):
@pytest.fixture()
def default_monitor(self):
return BibliothecaEventMonitor(
self._db, self.collection, api_class=MockBibliothecaAPI
)
@pytest.fixture()
def initialized_monitor(self):
collection = MockBibliothecaAPI.mock_collection(
self._db, name="Initialized Monitor Collection"
)
monitor = BibliothecaEventMonitor(
self._db, collection, api_class=MockBibliothecaAPI
)
Timestamp.stamp(
self._db,
service=monitor.service_name,
service_type=Timestamp.MONITOR_TYPE,
collection=collection,
)
return monitor
def test_run_once(self):
# run_once() slices the time between its start date
# and the current time into five-minute intervals, and asks for
# data about one interval at a time.
now = utc_now()
one_hour_ago = now - timedelta(hours=1)
two_hours_ago = now - timedelta(hours=2)
# Simulate that this script last ran 24 hours ago
before_timestamp = TimestampData(start=two_hours_ago, finish=one_hour_ago)
api = MockBibliothecaAPI(self._db, self.collection)
api.queue_response(200, content=self.sample_data("item_metadata_single.xml"))
# Setting up making requests in 5-minute intervals in the hour slice.
for i in range(1, 15):
api.queue_response(
200, content=self.sample_data("empty_end_date_event.xml")
)
monitor = BibliothecaEventMonitor(self._db, self.collection, api_class=api)
after_timestamp = monitor.run_once(before_timestamp)
# Fifteen requests were made to the API:
#
# 1. Looking up detailed information about the single book
# whose event we found.
#
# 2. Retrieving the 'slices' of events between 2 hours ago and
# 1 hour ago in 5 minute intervals.
assert 15 == len(api.requests)
# There is no second 'detailed information' lookup because both events
# relate to the same book.
# A LicensePool was created for the identifier referred to
# in empty_end_date_event.xml.
[pool] = self.collection.licensepools
assert "d5rf89" == pool.identifier.identifier
# But since the metadata retrieved in the follow-up request
# was for a different book, no Work and no Edition have been
# created. (See test_handle_event for what happens when the
# API cooperates.)
assert None == pool.work
assert None == pool.presentation_edition
# The timeframe covered by that run starts a little before the
# 'finish' date associated with the old timestamp, and ends
# around the time run_once() was called.
#
# The events we found were both from 2016, but that's not
# considered when setting the timestamp.
assert one_hour_ago - monitor.OVERLAP == after_timestamp.start
self.time_eq(after_timestamp.finish, now)
# The timestamp's achivements have been updated.
assert "Events handled: 13." == after_timestamp.achievements
# In earlier versions, the progress timestamp's `counter`
# property was manipulated to put the monitor in different
# states that would improve its reliability in different
# failure scenarios. With the addition of the
# BibliothecaPurchaseMonitor, the reliability of
# BibliothecaEventMonitor became much less important, so the
# complex code has been removed.
assert None == after_timestamp.counter
# To prove this, run the monitor again, catching up between
# after_timestamp.start (the current time, minus 5 minutes and
# a little bit), and the current time.
#
# This is going to result in two more API calls, one for the
# "5 minutes" and one for the "little bit".
api.queue_response(200, content=self.sample_data("empty_event_batch.xml"))
api.queue_response(200, content=self.sample_data("empty_event_batch.xml"))
monitor.run_once(after_timestamp)
# Two more requests were made, but no events were found for the
# corresponding time slices, so nothing happened.
#
# Previously the lack of any events would have been treated as
# an error.
assert 17 == len(api.requests)
assert "Events handled: 0." == after_timestamp.achievements
def test_handle_event(self):
api = MockBibliothecaAPI(self._db, self.collection)
api.queue_response(200, content=self.sample_data("item_metadata_single.xml"))
analytics = MockAnalyticsProvider()
monitor = BibliothecaEventMonitor(
self._db, self.collection, api_class=api, analytics=analytics
)
now = utc_now()
monitor.handle_event(
"ddf4gr9",
"9781250015280",
None,
now,
None,
CirculationEvent.DISTRIBUTOR_LICENSE_ADD,
)
# The collection now has a LicensePool corresponding to the book
# we just loaded.
[pool] = self.collection.licensepools
assert "ddf4gr9" == pool.identifier.identifier
# The book has a presentation-ready work and we know its
# bibliographic metadata.
assert True == pool.work.presentation_ready
assert "The Incense Game" == pool.work.title
# The LicensePool's circulation information has been changed
# to reflect what we know about the book -- that we have one
# license which (as of the instant the event happened) is
# available.
assert 1 == pool.licenses_owned
assert 1 == pool.licenses_available
# Three analytics events were collected: one for the license
# add event itself, one for the 'checkin' that made the new
# license available, and a redundant 'license add' event which
# was registered with analytics but which did not affect the
# counts.
#
# In earlier versions a fourth analytics event would have been
# issued, for the creation of a new LicensePool, but that is now
# solely the job of the BibliothecaPurchasMonitor.
assert 3 == analytics.count
class TestBibliothecaPurchaseMonitorWhenMultipleCollections(BibliothecaAPITest):
def test_multiple_service_type_timestamps_with_start_date(self):
# Start with multiple collections that have timestamps
# because they've run before.
collections = [
MockBibliothecaAPI.mock_collection(self._db, name="Collection 1"),
MockBibliothecaAPI.mock_collection(self._db, name="Collection 2"),
]
for c in collections:
Timestamp.stamp(
self._db,
service=BibliothecaPurchaseMonitor.SERVICE_NAME,
service_type=Timestamp.MONITOR_TYPE,
collection=c,
)
# Instantiate the associated monitors with a start date.
monitors = [
BibliothecaPurchaseMonitor(
self._db, c, api_class=BibliothecaAPI, default_start="2011-02-03"
)
for c in collections
]
assert len(monitors) == len(collections)
# Ensure that we get monitors and not an exception.
for m in monitors:
assert isinstance(m, BibliothecaPurchaseMonitor)
class TestItemListParser(BibliothecaAPITest):
def test_contributors_for_string(cls):
authors = list(
ItemListParser.contributors_from_string(
"Walsh, Jill Paton; Sayers, Dorothy L."
)
)
assert [x.sort_name for x in authors] == [
"Walsh, Jill Paton",
"Sayers, Dorothy L.",
]
assert [x.roles for x in authors] == [
[Contributor.AUTHOR_ROLE],
[Contributor.AUTHOR_ROLE],
]
# Parentheticals are stripped.
[author] = ItemListParser.contributors_from_string(
"Baum, Frank L. (Frank Lyell)"
)
assert "Baum, Frank L." == author.sort_name
# Contributors may have two levels of entity reference escaping,
# one of which will have already been handled by the initial parse.
# So, we'll test zero and one escapings here.
authors = list(
ItemListParser.contributors_from_string(
u"Raji Codell, Esmé; Raji Codell, Esmé"
)
)
author_names = [a.sort_name for a in authors]
assert len(authors) == 2
assert len(set(author_names)) == 1
assert all(u"Raji Codell, Esmé" == name for name in author_names)
# It's possible to specify some role other than AUTHOR_ROLE.
narrators = list(
ItemListParser.contributors_from_string(
"Callow, Simon; Mann, Bruce; Hagon, Garrick", Contributor.NARRATOR_ROLE
)
)
for narrator in narrators:
assert [Contributor.NARRATOR_ROLE] == narrator.roles
assert ["Callow, Simon", "Mann, Bruce", "Hagon, Garrick"] == [
narrator.sort_name for narrator in narrators
]
def test_parse_genre_string(self):
def f(genre_string):
genres = ItemListParser.parse_genre_string(genre_string)
assert all([x.type == Subject.BISAC for x in genres])
return [x.name for x in genres]
assert ["Children's Health", "Health"] == f("Children&#39;s Health,Health,")
assert [
"Action & Adventure",
"Science Fiction",
"Fantasy",
"Magic",
"Renaissance",
] == f(
"Action &amp; Adventure,Science Fiction, Fantasy, Magic,Renaissance,"
)
def test_item_list(cls):
data = cls.sample_data("item_metadata_list_mini.xml")
data = list(ItemListParser().parse(data))
# There should be 2 items in the list.
assert 2 == len(data)
cooked = data[0]
assert "The Incense Game" == cooked.title
assert "A Novel of Feudal Japan" == cooked.subtitle
assert Edition.BOOK_MEDIUM == cooked.medium
assert "eng" == cooked.language
assert "St. Martin's Press" == cooked.publisher
assert datetime_utc(year=2012, month=9, day=17) == cooked.published
primary = cooked.primary_identifier
assert "ddf4gr9" == primary.identifier
assert Identifier.THREEM_ID == primary.type
identifiers = sorted(cooked.identifiers, key=lambda x: x.identifier)
assert ["9781250015280", "9781250031112", "ddf4gr9"] == [
x.identifier for x in identifiers
]
[author] = cooked.contributors
assert "Rowland, Laura Joh" == author.sort_name
assert [Contributor.AUTHOR_ROLE] == author.roles
subjects = [x.name for x in cooked.subjects]
assert ["Children's Health", "Mystery & Detective"] == sorted(subjects)
[pages] = cooked.measurements
assert Measurement.PAGE_COUNT == pages.quantity_measured
assert 304 == pages.value
[alternate, image, description] = sorted(cooked.links, key=lambda x: x.rel)
assert "alternate" == alternate.rel
assert alternate.href.startswith("http://ebook.3m.com/library")
# We have a full-size image...
assert Hyperlink.IMAGE == image.rel
assert Representation.JPEG_MEDIA_TYPE == image.media_type
assert image.href.startswith("http://ebook.3m.com/delivery")
assert "documentID=ddf4gr9" in image.href
assert "&size=NORMAL" not in image.href
# ... and a thumbnail, which we obtained by adding an argument
# to the main image URL.
thumbnail = image.thumbnail
assert Hyperlink.THUMBNAIL_IMAGE == thumbnail.rel
assert Representation.JPEG_MEDIA_TYPE == thumbnail.media_type
assert thumbnail.href == image.href + "&size=NORMAL"
# We have a description.
assert Hyperlink.DESCRIPTION == description.rel
assert description.content.startswith("<b>Winner")
def test_multiple_contributor_roles(self):
data = self.sample_data("item_metadata_audio.xml")
[data] = list(ItemListParser().parse(data))
names_and_roles = []
for c in data.contributors:
[role] = c.roles
names_and_roles.append((c.sort_name, role))
# We found one author and three narrators.
assert (
sorted(
[
("Riggs, Ransom", "Author"),
("Callow, Simon", "Narrator"),
("Mann, Bruce", "Narrator"),
("Hagon, Garrick", "Narrator"),
]
)
== sorted(names_and_roles)
)
class TestBibliographicCoverageProvider(TestBibliothecaAPI):
"""Test the code that looks up bibliographic information from Bibliotheca."""
def test_script_instantiation(self):
"""Test that RunCollectionCoverageProviderScript can instantiate
this coverage provider.
"""
script = RunCollectionCoverageProviderScript(
BibliothecaBibliographicCoverageProvider,
self._db,
api_class=MockBibliothecaAPI,
)
[provider] = script.providers
assert isinstance(provider, BibliothecaBibliographicCoverageProvider)
assert isinstance(provider.api, MockBibliothecaAPI)
def test_process_item_creates_presentation_ready_work(self):
# Test the normal workflow where we ask Bibliotheca for data,
# Bibliotheca provides it, and we create a presentation-ready work.
identifier = self._identifier(identifier_type=Identifier.BIBLIOTHECA_ID)
identifier.identifier = "ddf4gr9"
# This book has no LicensePools.
assert [] == identifier.licensed_through
# Run it through the BibliothecaBibliographicCoverageProvider
provider = BibliothecaBibliographicCoverageProvider(
self.collection, api_class=MockBibliothecaAPI
)
data = self.sample_data("item_metadata_single.xml")
# We can't use self.api because that's not the same object
# as the one created by the coverage provider.
provider.api.queue_response(200, content=data)
[result] = provider.process_batch([identifier])
assert identifier == result
# A LicensePool was created and populated with format and availability
# information.
[pool] = identifier.licensed_through
assert 1 == pool.licenses_owned
assert 1 == pool.licenses_available
[lpdm] = pool.delivery_mechanisms
assert (
"application/epub+zip (application/vnd.adobe.adept+xml)"
== lpdm.delivery_mechanism.name
)
# A Work was created and made presentation ready.
assert "The Incense Game" == pool.work.title
assert True == pool.work.presentation_ready
def test_internal_formats(self):
m = ItemListParser.internal_formats
def _check_format(input, expect_medium, expect_format, expect_drm):
medium, formats = m(input)
assert medium == expect_medium
[format] = formats
assert expect_format == format.content_type
assert expect_drm == format.drm_scheme
rep = Representation
adobe = DeliveryMechanism.ADOBE_DRM
findaway = DeliveryMechanism.FINDAWAY_DRM
book = Edition.BOOK_MEDIUM
# Verify that we handle the known strings from Bibliotheca
# appropriately.
_check_format("EPUB", book, rep.EPUB_MEDIA_TYPE, adobe)
_check_format("EPUB3", book, rep.EPUB_MEDIA_TYPE, adobe)
_check_format("PDF", book, rep.PDF_MEDIA_TYPE, adobe)
_check_format("MP3", Edition.AUDIO_MEDIUM, None, findaway)
# Now Try a string we don't recognize from Bibliotheca.
medium, formats = m("Unknown")
# We assume it's a book.
assert Edition.BOOK_MEDIUM == medium
# But we don't know which format.
assert [] == formats
|
import os
import pytest
import sqlite3
from twdft.env import TWDFT_DATA_DIR
@pytest.fixture(
params=["Awful Site Name With Space", "Bad & Badder Site", "Test Site/1"]
)
def bad_site_names(request):
print("\n-------------------------------------")
print(f"fixturename : {request.fixturename}")
yield request.param
@pytest.fixture(
params=[
("Port of Harwich", "2010-10-10"),
("Port of Felixtowe", "2010-10-11"),
("Port of Leith", "2019-05-01"),
]
)
def date_location(request):
print("\n--------------------------------------")
print(f"fixturename : {request.fixturename}")
print(f"scope : {request.scope}")
print(f"function : {request.function.__name__}")
print(f"cls : {request.cls}")
print(f"module : {request.module.__name__}")
print(f"fspath : {request.fspath}")
yield request.param
os.unlink("/home/lemon/.task-test/pending.data")
@pytest.fixture(
params=[
("Port of Harwich", "2010-10-10T10:30"),
("Port of Felixtowe", "2010-10-11T10:30"),
("Port of Leith", "2019-05-01T10:30"),
]
)
def date_time_location(request):
print("\n--------------------------------------")
print(f"fixturename : {request.fixturename}")
print(f"scope : {request.scope}")
print(f"function : {request.function.__name__}")
print(f"cls : {request.cls}")
print(f"module : {request.module.__name__}")
print(f"fspath : {request.fspath}")
yield request.param
os.unlink("/home/lemon/.task-test/pending.data")
@pytest.fixture
def date_natural_location():
yield "20 August 2018"
# we don't unlink here because the error means no data is created
# os.unlink("/home/lemon/.task-test/pending.data")
@pytest.fixture
def date_time_natural_location():
yield {"date": "20 August 2018", "time": "10:30am"}
os.unlink("/home/lemon/.task-test/pending.data")
TEST_DB = os.path.join(TWDFT_DATA_DIR, "test-twdft.db")
INSPECTORS = ["John McClaren", "Kelvin Muclaleik", "Steven Chrosssol", "Aiden Snarlo"]
@pytest.fixture
def test_db():
with sqlite3.connect(TEST_DB) as conn:
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS site")
c.execute(
"""
CREATE TABLE site(
id INTEGER PRIMARY KEY,
name TEXT,
site_type TEXT,
sub_category TEXT,
address_1 TEXT,
address_2 TEXT,
town TEXT,
county TEXT,
country TEXT,
postcode TEXT,
site_category TEXT,
freq_target TEXT,
created TEXT,
notes TEXT,
last_inspection TEXT,
next_inspection TEXT,
pfsp_approval TEXT,
pfsp_expiry TEXT,
unlocode TEXT,
pfso TEXT,
pso TEXT,
pfsa_approval TEXT,
pfsa_expiry TEXT,
team TEXT,
created_by TEXT,
last_updated TEXT,
updated_by TEXT,
afp_loc TEXT,
rdf TEXT,
classification TEXT,
article24 TEXT,
psa_approval TEXT,
inspection_due TEXT
)
"""
)
conn.commit()
c.execute(
"""
DROP TABLE IF EXISTS inspection
"""
)
conn.commit()
c.execute(
"""
CREATE TABLE inspection(
id INTEGER PRIMARY KEY,
site INTEGER,
date TEXT,
status TEXT,
time TEXT,
FOREIGN KEY(site) REFERENCES site(id)
)
"""
)
conn.commit()
c.execute("DROP TABLE IF EXISTS inspector")
c.execute(
"""
create table inspector(
id integer primary key,
first_name text,
last_name text
)
"""
)
conn.commit()
for i in INSPECTORS:
first = i.split(" ")[0]
last = i.split(" ")[1]
c.execute(
"INSERT INTO inspector(first_name, last_name) VALUES (?,?)",
(first, last),
)
c.execute("DROP TABLE IF EXISTS inspector_inspections")
c.execute(
"""
CREATE TABLE inspector_inspections(
inspector INTEGER,
inspection INTEGER,
FOREIGN KEY (inspector) REFERENCES inspector(id),
FOREIGN KEY (inspection) REFERENCES inspection(id)
)
"""
)
conn.commit()
c.execute(
f"""
INSERT INTO site(
name,
site_type,
sub_category,
address_1,
address_2,
town,
county,
country,
postcode,
site_category,
freq_target,
created,
notes,
last_inspection,
next_inspection,
pfsp_approval,
pfsp_expiry,
unlocode,
pfso,
pso,
pfsa_approval,
pfsa_expiry,
team,
created_by,
last_updated,
updated_by,
afp_loc,
rdf,
classification,
article24,
psa_approval,
inspection_due
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)""",
(
"Macmillian Port",
"Port",
"COG 1",
"Main Precinct",
"Blubbery",
"Tinseltown",
"Northampton",
"UK",
"ZE1 1QP",
"A",
"12",
"05-06-2014 10:57",
"Moyra Hemphill used to be the PFSO here but moved on to Liverpool.",
"15-06-2017 00:00",
"",
"16-11-2017 00:00",
"",
"GBSUL-001",
"Harvey Lemon",
"",
"24-12-2012 0:00",
"01-04-2018 0:00",
"Maritime East",
"20",
"19-05-2016 0:00",
"103",
"",
"0",
"UK PF",
"1",
"19-09-2014 0:00",
"20-10-2019 0:00",
),
)
conn.commit()
# single inspector
c.execute(
"""
INSERT INTO inspection(site, date, status, time)
VALUES (1, "2018-10-10", "forwardlook", "2pm");
"""
)
insp_id = c.lastrowid
c.execute(f"""INSERT INTO inspector_inspections VALUES (?,?)""", (1, insp_id))
# double inspector
c.execute(
"""
INSERT INTO inspection(site, date, status, time)
VALUES (1, "2028-10-10", "forwardlook", "10:30");
"""
)
insp_id = c.lastrowid
c.execute(f"""INSERT INTO inspector_inspections VALUES (?,?)""", (1, insp_id))
c.execute(f"""INSERT INTO inspector_inspections VALUES (?,?)""", (2, insp_id))
# three man inspector
c.execute(
"""
INSERT INTO inspection(site, date, status, time)
VALUES (1, "2038-10-13", "planning", "12:30");
"""
)
insp_id = c.lastrowid
c.execute(f"""INSERT INTO inspector_inspections VALUES (?,?)""", (1, insp_id))
c.execute(f"""INSERT INTO inspector_inspections VALUES (?,?)""", (2, insp_id))
c.execute(f"""INSERT INTO inspector_inspections VALUES (?,?)""", (3, insp_id))
conn.commit()
yield TEST_DB
c.execute("DROP TABLE inspection")
c.execute("DROP TABLE inspector_inspections")
c.execute("DROP TABLE site")
c.execute("DROP TABLE inspector")
|
import os
import sys
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def calculateTileGridStatistics(tile_grid_shape, tile_size_x: int, tile_size_y: int):
"""Calculates all necessary grid statistics based on tile shape and size
Parameters
----------
tile_grid_shape : Tuple[int, int]
Tuple of int representing the number of tiles that fit in the x and y dimensions of the original image respectively.
tile_size_x : int
Number of pixels that in the x dimension of the original image
tile_size_y : int
Number of pixels that in the y dimension of the original image
Returns
-------
[int] total_n_tiles
Total number of tiles in the original image
[ndarray] tile_grid_array
An array representing the layout of the tiles
[int] original_x
length of the original image in the x dimension
[int] original_y
length of the original image in the y dimension
"""
total_n_tiles = tile_grid_shape[0]*tile_grid_shape[1]
# Create range list of the tiles
total_tiles_list = list(range(1,total_n_tiles+1))
# Reshape the range list into the tile grid of the original image.
# We swap the elements of the grid because the rest of the pipeline sees x and y as horizontal vs vertical, but numpy sees it as an array, where x = vertical movement
swapped_grid = (tile_grid_shape[1],tile_grid_shape[0])
tile_grid_array = np.reshape(total_tiles_list, swapped_grid)
# Creating an empty array the size of an original image
original_x = tile_grid_array.shape[1] * tile_size_x
original_y = tile_grid_array.shape[0] * tile_size_y
return total_n_tiles, tile_grid_array, original_x, original_y
# This requires that the input csv had a column X and column Y
def transformTileCoordinateSystem(path_to_csv: str, tile_grid_shape, tile_size_x, tile_size_y):
decoded_df = pd.read_csv(path_to_csv)
original_x_column = []
original_y_column = []
total_n_tiles, tile_grid_array, _, _ = calculateTileGridStatistics(tile_grid_shape, tile_size_x, tile_size_y)
for row in decoded_df.itertuples():
# extract X and Y coordinates of the respective tile the spot belongs to
row_location, col_location = np.where(tile_grid_array==row.Tile) # this returns rows and columns, NOT X and Y, which is the opposite
# unpacking the array structure of the return tuple of np.where
y_tile_location, x_tile_location = row_location[0], col_location[0]
# Calculate how many pixels to add in order to plot the spot in the correct tile in the original image
x_adder = x_tile_location * tile_size_x
y_adder = y_tile_location * tile_size_y
# Calculate the position in the original image
x_coordinate = row.X + x_adder
y_coordinate = row.Y + y_adder
original_x_column.append(x_coordinate)
original_y_column.append(y_coordinate)
decoded_df['Original_X'] = original_x_column
decoded_df['Original_Y'] = original_y_column
return decoded_df
if __name__=="__main__":
decoded_genes = "/media/david/Puzzles/starfish_test_data/ExampleInSituSequencing/results2/decoded/concat_decoded_genes.csv"
tranformTileCoordinateSystem(decoded_genes, (2,2), 700,500)
|
"""
Handles incomming log records with buffer
"""
import datetime
import json
import logging
import traceback
from typing import Optional, Dict
from logging.handlers import BufferingHandler
from . import config, seqsender
LOG = logging.getLogger(config.LOGGER_NAME)
class SeqPyLoggerHandler(BufferingHandler):
"""
Handles incomming log records with buffer
"""
def __init__(self, capacity=10, formatter_style="%"):
self.formatter_style = formatter_style
super().__init__(capacity=capacity)
def flush(self):
try:
self.acquire()
if len(self.buffer) > 0:
self.send_to_seq()
super().flush()
finally:
self.release()
def send_to_seq(self):
"""Prepares record for sending to seq"""
batch_objects = []
for record in self.buffer:
# format_message monkey patchs LogRecord with .message
record_args = SeqPyLoggerHandler.format_message(
record, self.formatter_style
)
record_object = SeqPyLoggerHandler.format_record_for_seq(record)
record_object.update(record_args)
ex = SeqPyLoggerHandler.add_exception(record)
if ex is not None:
record_object.update({"@x": ex})
batch_objects.append(json.dumps(record_object, separators=(",", ":")))
seqsender.SeqSender.send(batch_objects)
@staticmethod
def format_record_for_seq(record: logging.LogRecord) -> dict:
"""Creates seq record object
Parameters
----------
record : logging.LogRecord
Record to handle
Returns
-------
dict
formatted according to seq specs
"""
record_object = {
"@t": datetime.datetime.fromtimestamp(record.created).strftime(
"%Y-%m-%dT%H:%M:%S.%f%Z"
),
"@l": record.levelname,
"@mt": record.msg,
"@m": record.message,
"@@Logger": record.name,
"@@Path": record.pathname,
"@@Line": record.lineno,
"@@Function": record.funcName,
"@@Thread": record.threadName,
"@@Pid": record.process,
"@@Environment": config.ENVIRONMENT,
}
return record_object
@staticmethod
def add_exception(record: logging.LogRecord) -> Optional[str]:
"""Addeds traceback data to log message
Parameters
----------
record : logging.LogRecord
LogMessage to handle
Returns
-------
Optional[str]
Returns message with added traceback or None
"""
if record.exc_info and any(record.exc_info):
return str.join("", traceback.format_exception(*record.exc_info))
return None
@staticmethod
def format_message(
record: logging.LogRecord, formatter_style: str
) -> Dict[str, str]:
"""Prepares message shape for sending to seq as this requires a different syntax
Parameters
----------
record : logging.LogRecord
LogMessage to handle
formatter_style : str
Log message formatting style should be "%"
Returns
-------
dict
dict of extra arguments in message
"""
record_args: Dict[str, str] = {}
if formatter_style != "%":
logging.warning("SeqPyLogger Unimplemented formatting style")
return record_args
# Prevent logging not str type
if not isinstance(record.msg, str):
record.msg = str(record.msg)
try:
record.message = record.msg % record.args
except TypeError:
LOG.warning("SeqPyLogger message formatting failed - (%s)", record.msg)
record.message = record.msg
for i, arg in enumerate(record.args):
record_args.update({"arg_%d" % i: str(arg)})
record.msg = record.msg.replace("%s", "{arg_%d}" % i, 1)
return record_args
|
import logging as _logging
log = _logging.getLogger('stolos')
import os.path as _p
import pkg_resources as _pkg_resources
__version__ = _pkg_resources.get_distribution(
_p.basename(_p.dirname(_p.abspath(__file__)))).version
class Uninitialized(Exception):
msg = (
"Before you use Stolos, please initialize it."
" You probably just want to call stolos.api.initialize()'")
def __getattr__(self, *args, **kwargs):
raise Uninitialized(Uninitialized.msg)
def __repr__(self):
return "Stolos Not Initialized. %s" % Uninitialized.msg
def __str__(self):
return repr(self)
def get_NS():
"""Returns a namespace containing configuration variables. Stolos must be
initialized before NS is set. This ensures that relevant configuration is
properly defined.
Users of stolos can just call stolos.api.initialize()
Developers of stolos need to ensure that either they are using the api or,
for if developing on internals, that stolos.initializer.initialize(...) is
called for the the module(s) you are working on. Keep in mind that only
the api and Stolos's runner.py should initialize Stolos normally
"""
try:
return NS
except NameError:
raise Uninitialized(Uninitialized.msg)
__all__ = ['api']
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QRect
from PyQt5.QtWidgets import QMessageBox, QDesktopWidget, QCheckBox
from SQL_functions import *
import pymysql
from selenium import webdriver
import os
import time
class Ui_Details_song(QtWidgets.QWidget):
res_data = ''
AlbumName = ''
switch_window = QtCore.pyqtSignal(list)
switch_window2 = QtCore.pyqtSignal(str)
def setupUi(self, Details_song, data):
Details_song.setObjectName("Details_song")
Details_song.resize(800, 600)
Details_song.setMinimumSize(QtCore.QSize(800, 600))
Details_song.setMaximumSize(QtCore.QSize(800, 600))
self.verticalLayout = QtWidgets.QVBoxLayout(Details_song)
self.verticalLayout.setObjectName("verticalLayout")
self.label_6 = QtWidgets.QLabel(Details_song)
font = QtGui.QFont()
font.setPointSize(25)
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.verticalLayout.addWidget(self.label_6)
self.Song_name = QtWidgets.QLabel(Details_song)
font = QtGui.QFont()
font.setPointSize(20)
self.Song_name.setFont(font)
self.Song_name.setObjectName("Song_name")
self.verticalLayout.addWidget(self.Song_name)
self.Album_name = QtWidgets.QLabel(Details_song)
font = QtGui.QFont()
font.setPointSize(20)
self.Album_name.setFont(font)
self.Album_name.setObjectName("Album_name")
self.verticalLayout.addWidget(self.Album_name)
self.Author_name = QtWidgets.QLabel(Details_song)
font = QtGui.QFont()
font.setPointSize(20)
self.Author_name.setFont(font)
self.Author_name.setObjectName("Author_name")
self.Author_name.setWordWrap(True)
self.verticalLayout.addWidget(self.Author_name)
self.Lyrics = QtWidgets.QPushButton(Details_song)
self.Lyrics.setObjectName("Lyrics")
self.verticalLayout.addWidget(self.Lyrics)
#self.Video = QtWidgets.QPushButton(Details_song)
#self.Video.setObjectName("Video")
#self.verticalLayout.addWidget(self.Video)
self.Return_to_results = QtWidgets.QPushButton(Details_song)
self.Return_to_results.setObjectName("Return_to_results")
self.verticalLayout.addWidget(self.Return_to_results)
self.res_data = data
#self.get_album()
self.retranslateUi(Details_song)
QtCore.QMetaObject.connectSlotsByName(Details_song)
self.location_on_the_screen()
self.Return_to_results.clicked.connect(self.return_button)
self.Lyrics.clicked.connect(self.open_lyrics)
def return_button(self):
self.switch_window.emit(list())
def location_on_the_screen(self):
ag = QDesktopWidget().availableGeometry()
widget = self.geometry()
x = ag.width()/2 - widget.width()/2
y = ag.height()/2 - widget.height()/2
self.move(x, y)
def open_lyrics(self):
self.switch_window2.emit(self.res_data['Lyrics URL'])
"""
def get_album(self):
connection = pymysql.connect(...)
with connection:
self.AlbumName = select_where(connection, "Name", "Albums join Songs_in_albums as sia on Albums.ID = sia.ID_Album", "sia.ID_Song = %s", self.res_data['ID'])[0]['Name']
connection.commit()
"""
def retranslateUi(self, Details_song):
_translate = QtCore.QCoreApplication.translate
Details_song.setWindowTitle(_translate("Details_song", "Music Search System | Details"))
self.label_6.setText(_translate("Details_song", "Details"))
self.Song_name.setText(_translate("Details_song", "Song name: " + self.res_data['Name']))
self.Album_name.setText(_translate("Details_song", "Album name: " + self.res_data['Album']))
self.Author_name.setText(_translate("Details_song", "Author(s) name: " + self.res_data['Artists']))
self.Lyrics.setText(_translate("Details_song", "-> Click to open lyrics <-"))
self.Return_to_results.setText(_translate("Details_song", "Return to results"))
#self.Video.setText(_translate("Details_song", "Open video on YouTube"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Details_song = QtWidgets.QWidget()
ui = Ui_Details_song()
ui.setupUi(Details_song)
Details_song.show()
sys.exit(app.exec_())
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class Secret(pulumi.CustomResource):
"""
Provides a resource to manage AWS Secrets Manager secret metadata. To manage a secret value, see the [`aws_secretsmanager_secret_version` resource](/docs/providers/aws/r/secretsmanager_secret_version.html).
"""
def __init__(__self__, __name__, __opts__=None, description=None, kms_key_id=None, name=None, recovery_window_in_days=None, rotation_lambda_arn=None, rotation_rules=None, tags=None):
"""Create a Secret resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if description and not isinstance(description, basestring):
raise TypeError('Expected property description to be a basestring')
__self__.description = description
"""
A description of the secret.
"""
__props__['description'] = description
if kms_key_id and not isinstance(kms_key_id, basestring):
raise TypeError('Expected property kms_key_id to be a basestring')
__self__.kms_key_id = kms_key_id
"""
Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be used to encrypt the secret values in the versions stored in this secret. If you don't specify this value, then Secrets Manager defaults to using the AWS account's default CMK (the one named `aws/secretsmanager`). If the default KMS CMK with that name doesn't yet exist, then AWS Secrets Manager creates it for you automatically the first time.
"""
__props__['kmsKeyId'] = kms_key_id
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
Specifies the friendly name of the new secret. The secret name can consist of uppercase letters, lowercase letters, digits, and any of the following characters: `/_+=.@-` Spaces are not permitted.
"""
__props__['name'] = name
if recovery_window_in_days and not isinstance(recovery_window_in_days, int):
raise TypeError('Expected property recovery_window_in_days to be a int')
__self__.recovery_window_in_days = recovery_window_in_days
"""
Specifies the number of days that AWS Secrets Manager waits before it can delete the secret. This value can range from 7 to 30 days. The default value is 30.
"""
__props__['recoveryWindowInDays'] = recovery_window_in_days
if rotation_lambda_arn and not isinstance(rotation_lambda_arn, basestring):
raise TypeError('Expected property rotation_lambda_arn to be a basestring')
__self__.rotation_lambda_arn = rotation_lambda_arn
"""
Specifies the ARN of the Lambda function that can rotate the secret.
"""
__props__['rotationLambdaArn'] = rotation_lambda_arn
if rotation_rules and not isinstance(rotation_rules, dict):
raise TypeError('Expected property rotation_rules to be a dict')
__self__.rotation_rules = rotation_rules
"""
A structure that defines the rotation configuration for this secret. Defined below.
"""
__props__['rotationRules'] = rotation_rules
if tags and not isinstance(tags, dict):
raise TypeError('Expected property tags to be a dict')
__self__.tags = tags
"""
Specifies a key-value map of user-defined tags that are attached to the secret.
"""
__props__['tags'] = tags
__self__.arn = pulumi.runtime.UNKNOWN
"""
Amazon Resource Name (ARN) of the secret.
"""
__self__.rotation_enabled = pulumi.runtime.UNKNOWN
"""
Specifies whether automatic rotation is enabled for this secret.
"""
super(Secret, __self__).__init__(
'aws:secretsmanager/secret:Secret',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'arn' in outs:
self.arn = outs['arn']
if 'description' in outs:
self.description = outs['description']
if 'kmsKeyId' in outs:
self.kms_key_id = outs['kmsKeyId']
if 'name' in outs:
self.name = outs['name']
if 'recoveryWindowInDays' in outs:
self.recovery_window_in_days = outs['recoveryWindowInDays']
if 'rotationEnabled' in outs:
self.rotation_enabled = outs['rotationEnabled']
if 'rotationLambdaArn' in outs:
self.rotation_lambda_arn = outs['rotationLambdaArn']
if 'rotationRules' in outs:
self.rotation_rules = outs['rotationRules']
if 'tags' in outs:
self.tags = outs['tags']
|
import lab as B
import numpy as np
import pytest
from .test_architectures import generate_data
from .util import nps # noqa
@pytest.mark.parametrize("dim_lv", [0, 4])
def test_loglik_batching(nps, dim_lv):
model = nps.construct_gnp(dim_lv=dim_lv)
xc, yc, xt, yt = generate_data(nps)
# Test a high number of samples, a number which also isn't a multiple of the batch
# size.
logpdfs = B.mean(
nps.loglik(model, xc, yc, xt, yt, num_samples=4000, batch_size=128)
)
assert np.isfinite(B.to_numpy(logpdfs))
|
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorforce.models import Model
class ConstantModel(Model):
"""
Utility class to return constant actions of a desired shape and with given bounds.
"""
def __init__(self, states_spec, actions_spec, config):
self.action_values = config.action_values
super(ConstantModel, self).__init__(
states_spec=states_spec,
actions_spec=actions_spec,
config=config
)
def tf_actions_and_internals(self, states, internals, update, deterministic):
actions = dict()
for name, action in self.actions_spec.items():
shape = (tf.shape(input=next(iter(states.values())))[0],) + action['shape']
actions[name] = tf.fill(dims=shape, value=self.action_values[name])
return actions, internals
def tf_loss_per_instance(self, states, internals, actions, terminal, reward, update):
# Nothing to be done here, loss is 0.
return tf.zeros_like(tensor=reward)
|
#!/usr/bin/env python
from roslib.message import get_message_class
import rospy
import rosgraph
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2 as cv
import numpy as np
VERBOSE=False
class NoisePublisher():
def __init__(self):
rospy.init_node('image_denoising', anonymous=True)
prefix = rospy.get_param('~prefix', "/denoised")
self.noise = rospy.get_param('~noise')
rate = 30
r = rospy.Rate(rate)
self.topics_selected = rospy.get_param('/topics', '')
self.topics_ = dict({})
self._master = rosgraph.Master(rospy.get_name())
self.all_topics_info = self._master.getTopicTypes()
if len(self.topics_selected) == 0:
rospy.loginfo("No topic selected, please add camera rgb or depth topics")
if VERBOSE :
print "subscribed to /camera/rgb/image_raw"
self.bridge = CvBridge()
for topic in self.topics_selected:
msg_name = [ty for tp, ty in self.all_topics_info if tp == topic][0]
self.pub_ = rospy.Publisher(prefix+topic,Image,queue_size=1)
sub_ = rospy.Subscriber(topic, get_message_class(msg_name), callback = self.callback, callback_args = topic)
#msg_ = Image
#self.topics_[topic] = [sub_, pub_, msg_]
rospy.loginfo("Topics with noise: std = " + str(self.noise))
#while not rospy.is_shutdown():
#for topic in self.topics_selected:
# pub_.publish(self.topics_[topic][2])
#r.sleep()
def callback(self, msg, topic):
#self.topics_[topic][2] = msg
try:
cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
except CvBridgeError as e:
print(e)
row,col,ch = cv_image.shape
mean = 0
var = self.noise
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
cv_image = cv_image.astype(np.float)
noisy = cv_image + gauss
#msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
noisy = noisy.astype(np.uint8)
self.pub_.publish(self.bridge.cv2_to_imgmsg(noisy, "bgr8"))
if __name__ == '__main__':
try:
d = NoisePublisher()
rospy.spin()
except rospy.ROSInterruptException: pass
|
import logging
from glob import glob
from pathlib import Path
from ..core.filesystem import rename_files, rename_folders
from ..model.filesystem import OptionParser, Options
from . import common
logger = logging.getLogger(__name__)
def rename(options: Options):
function = rename_folders if options.dir_mode else rename_files
result = function(
options.workdir,
options.pattern,
options.to,
ignore_case=not options.case_sensitive,
recursive=options.recursive
)
if not result:
logger.warning(
f"Değişiklik yapılmadı: {options.pattern=} {options.to=}"
)
def main():
args = OptionParser().parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
common.initialize_logging(level=log_level)
for path in args.paths:
paths = [Path(p) for p in glob(path)]
for path in paths:
if path.is_dir():
options = Options.from_system_args(path)
rename(options)
else:
logger.error(f"{path.name} is not valid path")
if __name__ == "__main__":
main()
|
#! python3
# -*- coding: shift-jis -*-
import wx
import mwx
from mwx.controls import Icon
class Frame(mwx.Frame):
def __init__(self, *args, **kwargs):
mwx.Frame.__init__(self, *args, **kwargs)
## Do not use menu IDs in [ID_LOWEST(4999):D_HIGHEST(5999)]
self.menubar["File"][0:0] = [
(101, "&Open\tctrl+o", "Opent the document", Icon('open'),
lambda v: print("You chose File->Open")),
(102, "&Save\tctrl+s", "Save the document", Icon('save'),
lambda v: print("You chose File->Save")),
(),
]
self.menubar["View"] = [
(103, "&one", "1", wx.ITEM_RADIO, lambda v: print("You chose", 1)),
(104, "&two", "2", wx.ITEM_RADIO, lambda v: print("You chose", 2)),
(105, "&foo", "3", wx.ITEM_RADIO, lambda v: print("You chose", 3)),
(),
(106, "&Check", "check update test", wx.ITEM_CHECK, Icon('v'),
print, #<wx._core.CommandEvent>
print, #<wx._core.UpdateUIEvent>
print), #<wx._core.MenuEvent>
(),
("&Print", (
(111, "setup\tctrl+shift+s", Icon('+'), lambda v: print("setup")),
(112, "preview\tctrl+shift+p", Icon('-'), lambda v: print("preview")),
),
),
]
self.menubar["Test/&Submenu"] = [ # add nested submenu into new menu
("&Print", (
(121, "setup", Icon('+'), lambda v: print("setup")),
(122, "preview", Icon('-'), lambda v: print("preview")),
),
),
]
self.menubar["Test/&Submenu/&Print2"] = [ # add nested submenu into new menu
(121, "setup", Icon('+'), lambda v: print("setup")),
(122, "preview", Icon('-'), lambda v: print("preview")),
]
self.menubar.reset()
if __name__ == '__main__':
app = wx.App()
frm = Frame(None)
frm.Show()
app.MainLoop()
|
"""Provides a scripting component.
Inputs:
x: The x script variable
y: The y script variable
Output:
a: The a output variable"""
__author__ = "trk"
__version__ = "2020.11.24"
import rhinoscriptsyntax as rs
import Rhino
import scriptcontext as sc
def BrepFootPrint(breps):
edgecrvs = []
for brep in breps:
edgecrvs.extend([e.DuplicateCurve() for e in brep.Edges])
# for brep in breps:
# edges.extend(brep.Edges)
# edgecrvs = [e.DuplicateCurve() for e in edges]
# flats = [Rhino.Geometry.Curve.ProjectToPlane(e, rs.WorldXYPlane()) for e in edgecrvs]
crvregion = Rhino.Geometry.Curve.CreateBooleanRegions(edgecrvs, Rhino.Geometry.Plane.WorldXY, True, Rhino.RhinoDoc.ActiveDoc.ModelAbsoluteTolerance)
outcrvs = []
for i in range(crvregion.RegionCount):
outcrvs.extend(crvregion.RegionCurves(i))
return outcrvs
a = BrepFootPrint(breps)
|
import os
import sys
import shutil
import logging
from git import Repo
from . import increment_version
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
if len(sys.argv) > 2:
version_filename = os.path.join(sys.argv[2], "version.py")
else:
version_filename = "version.py"
_locs = {}
try:
_code_file = open(version_filename, "r")
_code = _code_file.read()
_code_file.close()
exec(_code, {}, _locs)
except IOError as e:
logging.info("No such file: %s" % version_filename)
version = _locs.get("version", "0.0.0")
# get current branch
branch = Repo("./").active_branch.name
if sys.argv[1] == "precommit":
version = increment_version(version)
with open(version_filename, "w") as _code_file:
_code = "branch = \"%s\"\nversion = \"%s\"\n" % (branch, version)
_code_file.write(_code)
# add changed version file
Repo("./").git.add(version_filename)
elif sys.argv[1] == "postcommit":
Repo("./").git.tag("%s_%s" % (branch, version))
elif sys.argv[1] == "init":
_dirname = os.path.dirname(__file__)
if len(sys.argv) > 2:
_version_root = sys.argv[2]
else:
_version_root = "./"
_pre_commit_fn = os.path.join(".git", "hooks", "pre-commit")
_post_commit_fn = os.path.join(".git", "hooks", "post-commit")
shutil.copy(os.path.join(_dirname, "pre-commit"), _pre_commit_fn)
shutil.copy(os.path.join(_dirname, "post-commit"), _post_commit_fn)
with open(_pre_commit_fn, "a") as _file:
_file.write(" %s" % _version_root)
with open(_post_commit_fn, "a") as _file:
_file.write(" %s" % _version_root)
|
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
from .dash_app import app
from dash.dependencies import Input, Output, State, ALL # ClientsideFunction
collapses = \
html.Div([
dbc.Collapse(html.Div(
[dbc.Spinner(
dcc.Upload(id='upload-file',
children=html.Div(
['Drag and Drop or ',
html.A('Select Files',
style=
{'font-weight': 'bold',
'text-decoration': 'underline'})]),
className='dragndrop'), fullscreen=True,
fullscreen_class_name='loading_spinner_bg',
spinner_class_name='loading_spinner')]),
# accept='.json',
id='collapse_load', is_open=False),
dbc.Collapse(html.Div(
[html.Div('Save setting as : ', className='label-param-s'),
dbc.Input(id='save_as_input', placeholder="settings.json",
className='label-param-l'),
html.Button('Go', id='save-as-button',
className='ufontm centered')], className='r_flex g-0'),
id='collapse_save', is_open=False)])
@app.callback(
Output("collapse_load", "is_open"),
Output("collapse_save", "is_open"),
Input("load-button", "n_clicks"),
Input("save-button", "n_clicks"),
State("collapse_load", "is_open"),
State("collapse_save", "is_open"),
)
def toggle_collapse(n, n1, is_open, is_open2):
ctx = dash.callback_context.triggered[0]['prop_id']
if n or n1:
if ctx == 'save-button.n_clicks':
if is_open is True:
return not is_open, not is_open2
return is_open, not is_open2
else:
if is_open2 is True:
return not is_open, not is_open2
return not is_open, is_open2
return is_open, is_open2
|
# Auditor for patch files
# Patches should be declared as text/plain (also .py files),
# independent of what the browser says, and
# the "patch" keyword should get set automatically.
import posixpath
patchtypes = ('.diff', '.patch')
sourcetypes = ('.diff', '.patch', '.py')
def ispatch(file, types):
return posixpath.splitext(file)[1] in types
def patches_text_plain(db, cl, nodeid, newvalues):
if ispatch(newvalues['name'], sourcetypes):
newvalues['type'] = 'text/plain'
def patches_keyword(db, cl, nodeid, newvalues):
# Check whether there are any new files
newfiles = set(newvalues.get('files',()))
if nodeid:
newfiles -= set(db.bug.get(nodeid, 'files'))
# Check whether any of these is a patch
newpatch = False
for fileid in newfiles:
if ispatch(db.file.get(fileid, 'name'), patchtypes):
newpatch = True
break
if newpatch:
# Add the patch keyword if its not already there
patchid = db.keyword.lookup("patch")
oldkeywords = []
if nodeid:
oldkeywords = db.bug.get(nodeid, 'keywords')
if patchid in oldkeywords:
# This is already marked as a patch
return
if 'keywords' not in newvalues:
newvalues['keywords'] = oldkeywords
newvalues['keywords'].append(patchid)
def init(db): pass
# db.file.audit('create', patches_text_plain)
# db.bug.audit('create', patches_keyword)
# db.bug.audit('set', patches_keyword)
|
# Note: Most of the codes are copied from https://github.com/ZhiwenShao/PyTorch-JAANet/blob/master/network.py
# Please check with the repo and original authors if you want to redistribute.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class LocalConv2dReLU(nn.Module):
def __init__(
self,
local_h_num,
local_w_num,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
activation_type="ReLU",
):
super(LocalConv2dReLU, self).__init__()
self.local_h_num = local_h_num
self.local_w_num = local_w_num
self.bns = nn.ModuleList(
[nn.BatchNorm2d(in_channels) for i in range(local_h_num * local_w_num)]
)
if activation_type == "ReLU":
self.relus = nn.ModuleList(
[nn.ReLU(inplace=True) for i in range(local_h_num * local_w_num)]
)
elif activation_type == "PReLU":
self.relus = nn.ModuleList(
[nn.PReLU() for i in range(local_h_num * local_w_num)]
)
self.convs = nn.ModuleList(
[
nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
)
for i in range(local_h_num * local_w_num)
]
)
def forward(self, x):
h_splits = torch.split(x, int(x.size(2) / self.local_h_num), 2)
h_out = []
for i in range(len(h_splits)):
start = True
w_splits = torch.split(
h_splits[i], int(h_splits[i].size(3) / self.local_w_num), 3
)
for j in range(len(w_splits)):
bn_out = self.bns[i * len(w_splits) + j](w_splits[j].contiguous())
bn_out = self.relus[i * len(w_splits) + j](bn_out)
conv_out = self.convs[i * len(w_splits) + j](bn_out)
if start:
h_out.append(conv_out)
start = False
else:
h_out[i] = torch.cat((h_out[i], conv_out), 3)
if i == 0:
out = h_out[i]
else:
out = torch.cat((out, h_out[i]), 2)
return out
class HierarchicalMultiScaleRegionLayer(nn.Module):
def __init__(
self,
local_group,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
activation_type="ReLU",
):
super(HierarchicalMultiScaleRegionLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
)
self.local_conv_branch1 = LocalConv2dReLU(
local_group[0][0],
local_group[0][1],
out_channels,
int(out_channels / 2),
kernel_size,
stride,
padding,
dilation,
groups,
bias,
activation_type,
)
self.local_conv_branch2 = LocalConv2dReLU(
local_group[1][0],
local_group[1][1],
int(out_channels / 2),
int(out_channels / 4),
kernel_size,
stride,
padding,
dilation,
groups,
bias,
activation_type,
)
self.local_conv_branch3 = LocalConv2dReLU(
local_group[2][0],
local_group[2][1],
int(out_channels / 4),
int(out_channels / 4),
kernel_size,
stride,
padding,
dilation,
groups,
bias,
activation_type,
)
self.bn = nn.BatchNorm2d(out_channels)
if activation_type == "ReLU":
self.relu = nn.ReLU(inplace=True)
elif activation_type == "PReLU":
self.relu = nn.PReLU()
def forward(self, x):
x = self.conv(x)
local_branch1 = self.local_conv_branch1(x)
local_branch2 = self.local_conv_branch2(local_branch1)
local_branch3 = self.local_conv_branch3(local_branch2)
local_out = torch.cat((local_branch1, local_branch2, local_branch3), 1)
out = x + local_out
out = self.bn(out)
out = self.relu(out)
return out
class HLFeatExtractor(nn.Module):
def __init__(self, input_dim, unit_dim=8):
super(HLFeatExtractor, self).__init__()
self.feat_extract = nn.Sequential(
nn.Conv2d(
input_dim, unit_dim * 12, kernel_size=3, stride=1, padding=1, bias=True
),
nn.BatchNorm2d(unit_dim * 12),
nn.ReLU(inplace=True),
nn.Conv2d(
unit_dim * 12,
unit_dim * 12,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
nn.BatchNorm2d(unit_dim * 12),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(
unit_dim * 12,
unit_dim * 16,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
nn.BatchNorm2d(unit_dim * 16),
nn.ReLU(inplace=True),
nn.Conv2d(
unit_dim * 16,
unit_dim * 16,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
nn.BatchNorm2d(unit_dim * 16),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(
unit_dim * 16,
unit_dim * 20,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
nn.BatchNorm2d(unit_dim * 20),
nn.ReLU(inplace=True),
nn.Conv2d(
unit_dim * 20,
unit_dim * 20,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
nn.BatchNorm2d(unit_dim * 20),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
def forward(self, x):
out = self.feat_extract(x)
return out
class HMRegionLearning(nn.Module):
def __init__(self, input_dim=3, unit_dim=8):
super(HMRegionLearning, self).__init__()
self.multiscale_feat = nn.Sequential(
HierarchicalMultiScaleRegionLayer(
[[8, 8], [4, 4], [2, 2]],
input_dim,
unit_dim * 4,
kernel_size=3,
stride=1,
padding=1,
activation_type="ReLU",
),
nn.MaxPool2d(kernel_size=2, stride=2),
HierarchicalMultiScaleRegionLayer(
[[8, 8], [4, 4], [2, 2]],
unit_dim * 4,
unit_dim * 8,
kernel_size=3,
stride=1,
padding=1,
activation_type="ReLU",
),
nn.MaxPool2d(kernel_size=2, stride=2),
)
def forward(self, x):
multiscale_feat = self.multiscale_feat(x)
return multiscale_feat
def generate_map(
map,
crop_size,
map_size,
spatial_ratio,
fill_coeff,
center1_x,
center1_y,
center2_x,
center2_y,
):
spatial_scale = float(map_size) / crop_size
half_AU_size = round((map_size - 1) / 2.0 * spatial_ratio)
centers = np.array([[center1_x, center1_y], [center2_x, center2_y]])
for center_ind in range(centers.shape[0]):
AU_center_x = round(centers[center_ind, 0] * spatial_scale)
AU_center_y = round(centers[center_ind, 1] * spatial_scale)
start_w = round(AU_center_x - half_AU_size)
start_h = round(AU_center_y - half_AU_size)
end_w = round(AU_center_x + half_AU_size)
end_h = round(AU_center_y + half_AU_size)
# treat landmark coordinates as starting from 0 rather than 1
start_h = max(start_h, 0)
start_h = min(start_h, map_size - 1)
start_w = max(start_w, 0)
start_w = min(start_w, map_size - 1)
end_h = max(end_h, 0)
end_h = min(end_h, map_size - 1)
end_w = max(end_w, 0)
end_w = min(end_w, map_size - 1)
for h in range(int(start_h), int(end_h) + 1):
for w in range(int(start_w), int(end_w) + 1):
map[h, w] = max(
1
- (abs(h - AU_center_y) + abs(w - AU_center_x))
* fill_coeff
/ (map_size * spatial_ratio),
map[h, w],
)
class AlignNet(nn.Module):
def __init__(
self,
crop_size,
map_size,
au_num,
land_num,
input_dim,
unit_dim=8,
spatial_ratio=0.14,
fill_coeff=0.56,
):
super(AlignNet, self).__init__()
self.align_feat = HLFeatExtractor(input_dim=input_dim, unit_dim=unit_dim)
self.align_output = nn.Sequential(
nn.Linear(4000, unit_dim * 64), nn.Linear(unit_dim * 64, land_num * 2)
)
self.crop_size = crop_size
self.map_size = map_size
self.au_num = au_num
self.land_num = land_num
self.spatial_ratio = spatial_ratio
self.fill_coeff = fill_coeff
def forward(self, x):
align_feat_out = self.align_feat(x)
align_feat = align_feat_out.view(align_feat_out.size(0), -1)
align_output = self.align_output(align_feat)
aus_map = torch.zeros(
(align_output.size(0), self.au_num, self.map_size + 8, self.map_size + 8)
)
for i in range(align_output.size(0)):
land_array = align_output[i, :]
land_array = land_array.data.cpu().numpy()
str_dt = np.append(
land_array[0 : len(land_array) : 2], land_array[1 : len(land_array) : 2]
)
arr2d = np.array(str_dt).reshape((2, self.land_num))
ruler = abs(arr2d[0, 22] - arr2d[0, 25])
# au1
generate_map(
aus_map[i, 0],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 4],
arr2d[1, 4] - ruler / 2,
arr2d[0, 5],
arr2d[1, 5] - ruler / 2,
)
# au2
generate_map(
aus_map[i, 1],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 1],
arr2d[1, 1] - ruler / 3,
arr2d[0, 8],
arr2d[1, 8] - ruler / 3,
)
# au4
generate_map(
aus_map[i, 2],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 2],
arr2d[1, 2] + ruler / 3,
arr2d[0, 7],
arr2d[1, 7] + ruler / 3,
)
# au6
generate_map(
aus_map[i, 3],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 24],
arr2d[1, 24] + ruler,
arr2d[0, 29],
arr2d[1, 29] + ruler,
)
# for bp4d
if self.au_num == 12:
# au7
generate_map(
aus_map[i, 4],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 21],
arr2d[1, 21],
arr2d[0, 26],
arr2d[1, 26],
)
# au10
generate_map(
aus_map[i, 5],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 43],
arr2d[1, 43],
arr2d[0, 45],
arr2d[1, 45],
)
# au12 au14 au15
generate_map(
aus_map[i, 6],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 31],
arr2d[1, 31],
arr2d[0, 37],
arr2d[1, 37],
)
aus_map[i, 7] = aus_map[i, 6]
aus_map[i, 8] = aus_map[i, 6]
# au17
generate_map(
aus_map[i, 9],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 39],
arr2d[1, 39] + ruler / 2,
arr2d[0, 41],
arr2d[1, 41] + ruler / 2,
)
# au23 au24
generate_map(
aus_map[i, 10],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 34],
arr2d[1, 34],
arr2d[0, 40],
arr2d[1, 40],
)
aus_map[i, 11] = aus_map[i, 10]
# for disfa
elif self.au_num == 8:
# au9
generate_map(
aus_map[i, 4],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 15],
arr2d[1, 15] - ruler / 2,
arr2d[0, 17],
arr2d[1, 17] - ruler / 2,
)
# au12
generate_map(
aus_map[i, 5],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 31],
arr2d[1, 31],
arr2d[0, 37],
arr2d[1, 37],
)
# au25
generate_map(
aus_map[i, 6],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 34],
arr2d[1, 34],
arr2d[0, 40],
arr2d[1, 40],
)
# au26
generate_map(
aus_map[i, 7],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 39],
arr2d[1, 39] + ruler / 2,
arr2d[0, 41],
arr2d[1, 41] + ruler / 2,
)
# for gft
elif self.au_num == 10:
# au10
generate_map(
aus_map[i, 4],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 43],
arr2d[1, 43],
arr2d[0, 45],
arr2d[1, 45],
)
# au12 au14 au15
generate_map(
aus_map[i, 5],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 31],
arr2d[1, 31],
arr2d[0, 37],
arr2d[1, 37],
)
aus_map[i, 6] = aus_map[i, 5]
aus_map[i, 7] = aus_map[i, 5]
# au23 au24
generate_map(
aus_map[i, 8],
self.crop_size,
self.map_size + 8,
self.spatial_ratio,
self.fill_coeff,
arr2d[0, 34],
arr2d[1, 34],
arr2d[0, 40],
arr2d[1, 40],
)
aus_map[i, 9] = aus_map[i, 8]
return align_feat_out, align_output, aus_map
class LocalAttentionRefine(nn.Module):
def __init__(self, au_num, unit_dim=8):
super(LocalAttentionRefine, self).__init__()
self.local_aus_attention = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(1, unit_dim * 8, kernel_size=3, stride=1, bias=True),
nn.BatchNorm2d(unit_dim * 8),
nn.ReLU(inplace=True),
nn.Conv2d(
unit_dim * 8, unit_dim * 8, kernel_size=3, stride=1, bias=True
),
nn.BatchNorm2d(unit_dim * 8),
nn.ReLU(inplace=True),
nn.Conv2d(
unit_dim * 8, unit_dim * 8, kernel_size=3, stride=1, bias=True
),
nn.BatchNorm2d(unit_dim * 8),
nn.ReLU(inplace=True),
nn.Conv2d(unit_dim * 8, 1, kernel_size=3, stride=1, bias=True),
nn.Sigmoid(),
)
for i in range(au_num)
]
)
def forward(self, x):
for i in range(len(self.local_aus_attention)):
initial_au_map = x[:, i, :, :]
initial_au_map = initial_au_map.unsqueeze(1)
au_map = self.local_aus_attention[i](initial_au_map)
if i == 0:
aus_map = au_map
else:
aus_map = torch.cat((aus_map, au_map), 1)
return aus_map
class LocalAUNetv1(nn.Module):
def __init__(self, au_num, input_dim, unit_dim=8):
super(LocalAUNetv1, self).__init__()
self.local_aus_branch = nn.ModuleList(
[
HLFeatExtractor(input_dim=input_dim, unit_dim=unit_dim)
for i in range(au_num)
]
)
def forward(self, feat, aus_map):
for i in range(len(self.local_aus_branch)):
au_map = aus_map[:, i, :, :]
au_map = au_map.unsqueeze(1)
au_feat = feat * au_map
output_au_feat = self.local_aus_branch[i](au_feat)
if i == 0:
aus_feat = output_au_feat
else:
aus_feat = aus_feat + output_au_feat
# average over all AUs
aus_feat = aus_feat / float(len(self.local_aus_branch))
return aus_feat
class LocalAUNetv2(nn.Module):
def __init__(self, au_num, input_dim, unit_dim=8):
super(LocalAUNetv2, self).__init__()
self.local_aus_branch = nn.ModuleList(
[
HLFeatExtractor(input_dim=input_dim, unit_dim=unit_dim)
for i in range(au_num)
]
)
self.local_aus_output = nn.ModuleList(
[
nn.Sequential(nn.Linear(4000, unit_dim * 8), nn.Linear(unit_dim * 8, 2))
for i in range(au_num)
]
)
def forward(self, feat, aus_map):
for i in range(len(self.local_aus_branch)):
au_map = aus_map[:, i, :, :]
au_map = au_map.unsqueeze(1)
au_feat = feat * au_map
output_au_feat = self.local_aus_branch[i](au_feat)
reshape_output_au_feat = output_au_feat.view(output_au_feat.size(0), -1)
au_output = self.local_aus_output[i](reshape_output_au_feat)
au_output = au_output.view(au_output.size(0), 2, int(au_output.size(1) / 2))
au_output = F.log_softmax(au_output, dim=1)
if i == 0:
aus_feat = output_au_feat
aus_output = au_output
else:
aus_feat = aus_feat + output_au_feat
aus_output = torch.cat((aus_output, au_output), 2)
# average over all AUs
aus_feat = aus_feat / float(len(self.local_aus_branch))
return aus_feat, aus_output
class AUNet(nn.Module):
def __init__(self, au_num, input_dim=12000, unit_dim=8):
super(AUNet, self).__init__()
self.au_output = nn.Sequential(
nn.Linear(input_dim, unit_dim * 64), nn.Linear(unit_dim * 64, au_num * 2)
)
def forward(self, x):
x = x.view(x.size(0), -1)
au_output = self.au_output(x)
au_output = au_output.view(au_output.size(0), 2, int(au_output.size(1) / 2))
au_output = F.log_softmax(au_output, dim=1)
return au_output
network_dict = {
"HLFeatExtractor": HLFeatExtractor,
"HMRegionLearning": HMRegionLearning,
"AlignNet": AlignNet,
"LocalAttentionRefine": LocalAttentionRefine,
"LocalAUNetv1": LocalAUNetv1,
"LocalAUNetv2": LocalAUNetv2,
"AUNet": AUNet,
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shutil
import time
# moudule
import variable
def clone_Bitbucket(path, repo):
URL = f'https://{variable.bitbucket_user}:{variable.bitbucket_pwd}@bitbucket.org/{variable.bitbucket_team}/{repo}.git'
os.system(f'git clone {URL}')
return
def pull_Bitbucket(repo_path, branch):
os.chdir(repo_path)
os.system(f'git pull origin {branch}')
return
def push_Bitbucket(repo_path, msg, branch):
os.chdir(repo_path)
os.system('git add .')
os.system(f'git commit -m {msg}')
os.system(f'git push origin {branch}')
return
def update(current_path, db_path, license_path):
""" 將最新的 DB、License 更新到對應的倉庫 """
repo_dir = os.path.join(current_path, "update_repo")
branch = 'master'
commit_time = time.strftime("%Y%m%d_%H%M", time.localtime())
msg = 'License Request server update time : '+commit_time
if not os.path.exists(repo_dir):
os.mkdir(repo_dir)
# update database
repo_path = os.path.join(repo_dir, variable.bitbucket_db_repo)
if not os.path.exists(repo_path):
clone_Bitbucket(repo_path, variable.bitbucket_db_repo)
else:
pull_Bitbucket(repo_path, branch)
if not os.path.exists(os.path.join(repo_path, "CustomerInfoDBv2")):
os.mkdir(os.path.join(repo_path, "CustomerInfoDBv2"))
db_path_des = os.path.join(repo_path, "CustomerInfoDBv2", variable.license_DB)
shutil.copyfile(db_path, db_path_des)
push_Bitbucket(repo_path, msg, branch)
# update license
repo_path = os.path.join(repo_dir, variable.bitbucket_license_repo)
if not os.path.exists(repo_path):
clone_Bitbucket(repo_path, variable.bitbucket_license_repo)
else:
pull_Bitbucket(repo_path, branch)
license_path_des = os.path.join(repo_path, 'share','license','license')
shutil.copyfile(license_path, license_path_des)
push_Bitbucket(repo_path, msg, branch)
return
if __name__ == '__main__':
current_path = os.getcwd()
db_path = f'{current_path}/public/static/{variable.license_DB}'
license_path = f'{current_path}/public/static/license'
update(current_path, db_path, license_path)
|
from django.utils.translation import gettext as _
from rest_framework.test import APITestCase
from paste.constants import _setting
from tests.utils import create_snippet
class SnippetModelTestCase(APITestCase):
"""Tests for the snippet model."""
def test_str_titled(self):
"""A titled snippet's string representation must include its title and
its pk.
"""
snippet = create_snippet('foo', title='bar')
self.assertEqual(str(snippet), f'bar ({snippet.pk})')
def test_str_untitled(self):
"""An untitled snippet's string representation must include the
localized string "Untitled" and its pk.
"""
snippet = create_snippet('foo')
self.assertEqual(str(snippet), f'{_("Untitled")} ({snippet.pk})')
class SettingsTestCase(APITestCase):
"""Tests for the app settings."""
def test_no_dict(self):
"""Setting must have its requested default value if no PASTE dict is
defined in project settings.
"""
value = _setting('FORBID_ANONYMOUS', False)
self.assertFalse(value)
def test_no_key(self):
"""Setting must have its requested default value if no related key
exists in the PASTE dict of project settings.
"""
with self.settings(PASTE={'GUESS_LEXER': True}):
value = _setting('FORBID_ANONYMOUS', False)
self.assertFalse(value)
def test_override(self):
"""Setting must have the value of the item of the project settings'
PASTE dict with the same key.
"""
with self.settings(PASTE={'FORBID_ANONYMOUS': True}):
value = _setting('FORBID_ANONYMOUS', False)
self.assertTrue(value)
|
import re
__all__ = ["plugin_def_list"]
DEFINITION_LIST_PATTERN = re.compile(r"([^\n]+\n(:[ \t][^\n]+\n)+\n?)+")
def parse_def_list(block, m, state):
lines = m.group(0).split("\n")
definition_list_items = []
for line in lines:
if not line:
continue
if line.strip()[0] == ":":
definition_list_items.append(
{"type": "def_list_item", "text": line[1:].strip()}
)
else:
definition_list_items.append(
{"type": "def_list_header", "text": line.strip()}
)
return {"type": "def_list", "children": definition_list_items}
def render_html_def_list(text):
return "<dl>\n" + text + "</dl>\n"
def render_html_def_list_header(text):
return "<dt>" + text + "</dt>\n"
def render_html_def_list_item(text):
return "<dd>" + text + "</dd>\n"
def plugin_def_list(md):
md.block.register_rule("def_list", DEFINITION_LIST_PATTERN, parse_def_list)
md.block.rules.append("def_list")
if md.renderer.NAME == "html":
md.renderer.register("def_list", render_html_def_list)
md.renderer.register("def_list_header", render_html_def_list_header)
md.renderer.register("def_list_item", render_html_def_list_item)
|
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from common_util import mylog
class HostEventHandler(FileSystemEventHandler):
def __init__(self, host):
self._host = host
def dispatch(self, event):
# todo:29 use these events directly, instead of scanning the tree
self._host.acquire_lock()
self._host.signal()
super(HostEventHandler, self).dispatch(event)
self._host.release_lock()
def on_moved(self, event):
super(HostEventHandler, self).on_moved(event)
# print event.event_type
what = 'directory' if event.is_directory else 'file'
# logging.info("Moved %s: from %s to %s", what, event.src_path,
# event.dest_path)
mylog('$$$$$$$ MOVED $$$$$$$$$$ {} $$$$$$$$$$$$$$$$$$$$'.format(event.src_path), '7')
def on_created(self, event):
super(HostEventHandler, self).on_created(event)
# print event.event_type
what = 'directory' if event.is_directory else 'file'
# logging.info("Created %s: %s", what, event.src_path)
mylog('$$$$$$$ Created $$$$$$$$$$ {} $$$$$$$$$$$$$$$$$$$$'.format(event.src_path), '7')
def on_deleted(self, event):
super(HostEventHandler, self).on_deleted(event)
# print event.event_type
what = 'directory' if event.is_directory else 'file'
# logging.info("Deleted %s: %s", what, event.src_path)
mylog('$$$$$$$ Deleted $$$$$$$$$$ {} $$$$$$$$$$$$$$$$$$$$'.format(event.src_path), '7')
def on_modified(self, event):
super(HostEventHandler, self).on_modified(event)
# print event.event_type
what = 'directory' if event.is_directory else 'file'
# logging.info("Modified %s: %s", what, event.src_path)
mylog('$$$$$$$ Modified $$$$$$$$$$ {} $$$$$$$$$$$$$$$$$$$$'.format(event.src_path), '7')
class WatchdogWorker(object):
def __init__(self, host):
self.shutdown_requested = False
# maps cloud roots -> the observer for that root
self.observers = {}
self.observer = Observer()
self.observer.start()
self.event_handler = HostEventHandler(host)
def watch_path(self, cloud_root):
mylog('Watching path <{}>'.format(cloud_root))
self.observers[cloud_root] = \
self.observer.schedule(self.event_handler, cloud_root, recursive=True)
def watch_all_clouds(self, clouds):
for mirror in clouds:
if not (mirror.root_directory in self.observers.keys()):
self.watch_path(mirror.root_directory)
|
import requests
import urllib.request
import urllib3
import datetime
from bs4 import BeautifulSoup
import pandas as pd
import feedparser
from datetime import date
from datetime import datetime
import re
from warnings import warn
_country = 'Canada'
_src_cat = 'Government Website'
_columns = ['start_date', 'country', 'region', 'subregion', 'source_url', 'source_category', 'source_title', 'source_full_text']
def _load_ontario(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Ontario.
"""
# Start searching at `end_date` date
end_str = end_date.strftime('%Y/%m/%d')
start_str = start_date.strftime('%Y/%m/%d')
base_url = 'https://news.ontario.ca/en/search?content_type=all&utf8=%E2%9C%93&date_range_end=' + end_str + '&date_range_start=' + start_str + '&date_select=desc&page='
region = 'Ontario'
subregion = ''
# Specific structure for news.contario.ca/archive
rows = []
page = 1
while True:
if verbose: print('Searching page ', page)
target = base_url + str(page)
response = requests.get(target)
soup = BeautifulSoup(response.text, "html.parser")
articles = soup.findAll('article')
if len(articles) == 0:
if verbose: print('No articles found.')
return pd.DataFrame(rows, columns=_columns)
for article in articles:
smallersoup = BeautifulSoup(str(article), "html.parser")
link = smallersoup.findAll('a')[0]['href']
title = smallersoup.findAll('a')[0].string
pub_date = datetime.strptime(smallersoup.time.string.replace('.', ''), "%B %d, %Y %I:%M %p")
if pub_date < start_date:
return pd.DataFrame(rows, columns=_columns)
if pub_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
response = requests.get(link)
linksoup = BeautifulSoup(response.text, "html.parser")
full_text = linksoup.article.text
row = [pub_date, _country, region, subregion, link, _src_cat, title, full_text]
rows.append(row)
page += 1
def _load_manitoba(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved.
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Manitoba.
"""
month_start = datetime(start_date.year, start_date.month, 1) # If the date range does not begin on the start of the month it skips the month in its entirety.
dates_between = pd.date_range(start=month_start, end=end_date, freq="MS")
url_base = 'https://news.gov.mb.ca'
# reversed to account for the most recent to least recent convention adopted when loading articles
targets = reversed([url_base + '/news/index.html?month=' + str(date.month) + '&year=' + str(date.year) + '&day=01&bgnG=GO&d=' for date in dates_between])
region = 'Manitoba'
subregion = ''
rows = []
for target in targets:
if verbose:
print('Searching link', target)
if target.startswith(url_base):
response = requests.get(target)
soup = BeautifulSoup(response.text, "html.parser")
items = soup.findAll("div", {"class": "maincontent"})
smallersoup = BeautifulSoup(str(items), "html.parser")
for article in smallersoup.findAll('h2'):
a = article.a
relative_link = a['href']
link = url_base + relative_link.split('..')[-1]
title = a.string
response = requests.get(link)
linksoup = BeautifulSoup(response.text, "html.parser")
date_text = linksoup.findAll("span", {"class": "article_date"})[0].string
pub_date = datetime.strptime(date_text, '%B %d, %Y')
if pub_date < start_date:
return pd.DataFrame(rows, columns=_columns)
if pub_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
full_text = linksoup.findAll("div", {"class": ""})[0].text
row = [pub_date, _country, region, subregion, link, _src_cat, title, full_text]
rows.append(row)
return pd.DataFrame(rows, columns=_columns)
def _load_british_columbia(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of British Columbia.
"""
region = 'British Columbia'
subregion = ''
query_url = 'https://news.gov.bc.ca/Search?FromDate=' + start_date.strftime('%Y/%m/%d') + '&toDate=' + end_date.strftime('%Y/%m/%d') + '&Page='
rows = []
page = 1
while True:
if verbose: print("Page ", page)
target = query_url + str(page)
response = requests.get(target)
soup = BeautifulSoup(response.text, "html.parser")
items = soup.findAll("div", {"class": "article"})
if not items:
return pd.DataFrame(rows, columns=_columns)
for article in items:
smallersoup = BeautifulSoup(str(article), "html.parser")
title = smallersoup.a.string
date_text = smallersoup.findAll("div", {"class" : "item-date"})[0].string
pub_date = datetime.strptime(date_text, '%A, %B %d, %Y %I:%M %p')
if pub_date < start_date:
return pd.DataFrame(rows, columns=_columns)
if pub_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
link = smallersoup.a['href']
response = requests.get(link)
linksoup = BeautifulSoup(response.text, "html.parser")
get_article = linksoup.findAll("article")
if get_article:
full_text = get_article[0].text
else:
if verbose: print("Couldn't retrieve full text for link: ", link)
continue
row = [pub_date, _country, region, subregion, link, _src_cat, title, full_text]
rows.append(row)
page += 1
def _load_new_brunswick(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of New Brunswick.
"""
region = 'New Brunswick'
sub_region = ''
url_base = "https://www2.gnb.ca/"
url = url_base + "content/gnb/en/news/recent_news.html?mainContent_par_newslist_start="
start = 0
rows = []
while True:
if verbose: print("Page {}".format(str(start // 25 + 1)))
response = requests.get(url + str(start))
soup = BeautifulSoup(response.content, "html.parser")
article_div = soup.find('div', class_="none padded")
article_soup = BeautifulSoup(str(article_div), 'html.parser')
articles = article_soup.find_all('li')
if len(articles) == 1: # Only button that says "previous page"
return pd.DataFrame(rows, columns=_columns)
for article in articles:
small_soup = BeautifulSoup(str(article), 'html.parser')
ar_date_str = small_soup.find('span', class_="post_date")
if ar_date_str:
ar_date = datetime.strptime(ar_date_str.text, "%d %B %Y")
if ar_date < start_date:
if verbose: print("Stopping search at date {}".format(ar_date))
return pd.DataFrame(rows, columns=_columns)
if ar_date > end_date:
continue
a = article.a
title = a.text
relative_link = a['href']
link = url_base + relative_link
article_page = requests.get(link)
body_soup = BeautifulSoup(article_page.content, 'html.parser')
body = body_soup.find('div', class_="articleBody").text
row = [ar_date, _country, region, sub_region, link, _src_cat, title, body]
rows.append(row)
start += 25 # articles per page
def _load_nova_scotia(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Nova Scotia.
"""
region = 'Nova Scotia'
sub_region = ''
url_base = "https://novascotia.ca/news"
page = 1
rows = []
while True:
url = url_base + "/search/?page=" + str(page)
if verbose: print("Searching page {}".format(page))
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
titles = soup.find_all('dt', class_="RelTitle")
summaries = soup.find_all('dd', class_="RelSummary")
if not (titles or summaries):
return pd.DataFrame(rows, columns=_columns)
for title, summary in zip(titles, summaries):
if title['lang'] == "fr": continue
ar_date = datetime.strptime(summary.time.text, "%B %d, %Y - %I:%M %p")
if ar_date < start_date:
if verbose: print("Stopping search at date {}".format(ar_date))
return pd.DataFrame(rows, columns=_columns)
if ar_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
relative_link = title.a['href'].split('..', 1)[1]
link = url_base + relative_link
ar_response = requests.get(link)
ar_soup = BeautifulSoup(ar_response.content, 'html.parser')
body = ar_soup.find('div', {'id' : 'releaseBody'}).text
row = [ar_date, _country, region, sub_region, link, _src_cat, title.text, body]
rows.append(row)
page += 1
def _load_northwest_territories(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of the Northwest Territories.
"""
region = 'Northwest Territories'
sub_region = ''
url_base = "https://www.gov.nt.ca/"
page = 0
rows = []
while True:
url = url_base + "en/newsroom?page=" + str(page)
if verbose: print("Searching page {}".format(page + 1))
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
ar_boxes = soup.find_all('div', class_ = re.compile('views-row')) # regex accounts for inconsistent `div` class names
if not ar_boxes:
return pd.Dataframe(rows, columns=_columns)
for box in ar_boxes:
boxed_soup = BeautifulSoup(str(box), 'html.parser') # parse each div
date_str = boxed_soup.find('span').text
ar_date = datetime.strptime(date_str, "%B %d, %Y")
if ar_date < start_date:
if verbose: print("Stopping search at date {}".format(ar_date))
return pd.DataFrame(rows, columns=_columns)
if ar_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
title_a = boxed_soup.find('a')
title = title_a.text
relative_link = title_a['href']
link = url_base + relative_link
ar_res = requests.get(link)
ar_soup = BeautifulSoup(ar_res.content, 'html.parser')
body = ar_soup.find('div', class_ = "field-item even").text
row = [ar_date, _country, region, sub_region, link, _src_cat, title, body]
rows.append(row)
page += 1
def _load_saskatchewan(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Saskatchewan.
"""
region = 'Saskatchewan'
sub_region = ''
url_base = "https://www.saskatchewan.ca/government/news-and-media?page="
page = 1
rows = []
while True:
url = url_base + str(page)
if verbose: print("Searching page {}".format(page))
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
article_list = soup.find('ul', class_="results")
article_soup = BeautifulSoup(str(article_list), 'html.parser')
list_items = article_soup.find_all('li')
if not list_items:
return pd.DataFrame(rows, columns=_columns)
for item in list_items:
date_str = item.time['datetime']
ar_date = datetime.strptime(date_str, "%Y-%m-%d")
if ar_date < start_date:
if verbose: print("Stopping search at date {}".format(ar_date))
return pd.DataFrame(rows, columns=_columns)
if ar_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
title = item.a.text
link = item.a['href']
body_soup = BeautifulSoup(requests.get(link).content, 'html.parser')
body = body_soup.find('section', class_="general-content").text
row = [ar_date, _country, region, sub_region, link, _src_cat, title, body]
rows.append(row)
page += 1
def _load_nunavut(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Nunavut.
Parameters: datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved.
"""
region = 'Nunavut'
sub_region = ''
url_base = "https://gov.nu.ca"
page = 0
rows = []
while True:
url = url_base + "/news?page=" + str(page)
if verbose: print("Searching page {}".format(page + 1))
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
main_section = soup.find('section', {"id" : "block-system-main"})
main_section_soup = BeautifulSoup(str(main_section), 'html.parser')
divs = main_section_soup.find_all('div', re.compile('views-row(.*)'))
if not divs:
return pd.DataFrame(rows, columns=_columns)
for div in divs:
div_soup = BeautifulSoup(str(div), 'html.parser')
date_str = div_soup.find('span', class_="date-display-single").text
ar_date = datetime.strptime(date_str, "%d %B %Y")
if ar_date < start_date:
if verbose: print("Stopping search at date {}".format(ar_date))
return pd.DataFrame(rows, columns=_columns)
if ar_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
a = div_soup.find('a')
title = a.text
link = url_base + a['href']
body_soup = BeautifulSoup(requests.get(link).content, 'html.parser')
body = body_soup.find('div', class_="region region-content").text
row = [ar_date, _country, region, sub_region, link, _src_cat, title, body]
rows.append(row)
page += 1
def _load_yukon(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of the Yukon.
"""
region = 'Yukon'
sub_region = ''
url_base = "https://yukon.ca"
page = 0
rows = []
while True:
url = url_base + "/news?page=" + str(page)
if verbose: print("Searching page {}".format(page + 1))
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
main_div = soup.find('div', class_ = "view-content")
if not main_div:
return pd.DataFrame(rows, columns=_columns)
main_div_soup = BeautifulSoup(str(main_div), 'html.parser')
divs = main_div_soup.find_all('div', re.compile('views-row(.*)'))
for div in divs:
div_soup = BeautifulSoup(str(div), 'html.parser')
date_str = div_soup.find('small').text
ar_date = datetime.strptime(date_str, "%B %d, %Y")
if ar_date < start_date:
if verbose: print("Stopping search at date {}".format(ar_date))
return pd.DataFrame(rows, columns=_columns)
if ar_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
a = div_soup.find('a')
title = a.text
link = url_base + a['href']
body_soup = BeautifulSoup(requests.get(link).content, 'html.parser')
body = body_soup.find('div', class_="region region-content").text
row = [ar_date, _country, region, sub_region, link, _src_cat, title, body]
rows.append(row)
page += 1
def _load_pei(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Prince Edward Island.
"""
region = 'Prince Edward Island'
sub_region = ''
url_base = "https://www.princeedwardisland.ca"
page = 0
rows = []
while True:
url = url_base + "/news?page=" + str(page)
if verbose: print("Searching page {}".format(page + 1))
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
divs = soup.find_all('div', class_="right content views-fieldset")
if not divs:
return pd.DataFrame(rows, columns=_columns)
for div in divs:
div_soup = BeautifulSoup(str(div), 'html.parser')
date_str = div_soup.find('div', class_="date").text
ar_date = datetime.strptime(date_str, "%A, %B %d, %Y")
if ar_date < start_date:
if verbose: print("Stopping search at date {}".format(ar_date))
return pd.DataFrame(rows, columns=_columns)
if ar_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
a = div_soup.find('a')
title = a.text
link = url_base + a['href']
body_soup = BeautifulSoup(requests.get(link).content, 'html.parser')
body = body_soup.find('div', class_="maincontentmain").text
row = [ar_date, _country, region, sub_region, link, _src_cat, title, body]
rows.append(row)
page += 1
def _load_alberta(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Alberta.
"""
region = 'Alberta'
sub_region = ''
days_back = (datetime.today() - start_date).days
url = "https://www.alberta.ca/NewsRoom/newsroom.cfm?numDaysBack=" + str(days_back + 1)
rows = []
response = requests.get(url)
soup = BeautifulSoup(response.content, 'xml')
links = [link.text for link in soup.find_all('link')[2:]] # First two links are not articles
titles = [title.text for title in soup.find_all('title')[2:]] # First two titles are not articles
dates = [date.text for date in soup.find_all('pubDate')]
for link, title, date in zip(links, titles, dates):
ar_date = datetime.strptime(date, "%a, %d %b %Y %H:%M:%S -0600")
if ar_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
if verbose: print('Searching date ' + ar_date.strftime('%B %d, %Y'))
ar_page_soup = BeautifulSoup(requests.get(link).content, 'html.parser')
ar_main = ar_page_soup.find('main')
body_soup = BeautifulSoup(str(ar_main), 'html.parser')
body = body_soup.find('div', class_="goa-grid-100-100-100").text
row = [ar_date, _country, region, sub_region, link, _src_cat, title, body]
rows.append(row)
return pd.DataFrame(rows, columns=_columns)
def _load_quebec(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Quebec.
"""
region = 'Quebec'
sub_region = ''
url_base = "http://www.fil-information.gouv.qc.ca/Pages/Articles.aspx?lang=en&Page="
page = 1
rows = []
while True:
url = url_base + str(page)
if verbose: print("Searching page {}".format(page))
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
sections = soup.find_all('section', {"id" : "articles"})
for section in sections:
date_str = section.time['datetime']
ar_date = datetime.strptime(date_str, "%Y-%m-%d")
if ar_date < start_date:
if verbose: print("Stopping search at date {}".format(ar_date))
return pd.DataFrame(rows, columns=_columns)
if ar_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
for a in section.find_all('a'):
link = a['href']
title = a.text.replace('\r', '')
title = title.replace('\n', '')
body_soup = BeautifulSoup(requests.get(link).content, 'html.parser')
body = body_soup.find('div', class_="article").text
row = [ar_date, _country, region, sub_region, link, _src_cat, title, body]
rows.append(row)
if not soup.find('li', class_='last'): # No 'go to last page' indicates that this is the last page
if verbose: print("Stopping search at date {}".format(ar_date))
return pd.DataFrame(rows, columns=_columns)
page += 1
def _load_newfoundland(start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of Newfoundland.
"""
region = 'Newfoundland and Labrador'
sub_region = ''
current_year = datetime.today().year
rows = []
for year in range(current_year, start_date.year - 1, -1): # Searches range backwards
url = "https://www.gov.nl.ca/releases/r/?ny=" + str(year) + "&nm=&ntype=&ndept="
http = urllib3.PoolManager()
response = http.request('GET', url)
soup = BeautifulSoup(response.data, 'html.parser')
news_results = soup.find('div', class_ = "news-results")
dates = news_results.find_all('h2')
ar_lists = news_results.find_all('ul')
for date, ar_list in zip(dates, ar_lists):
ar_date = datetime.strptime(date.text + " " + str(year), "%B %d %Y")
if ar_date < start_date:
return pd.DataFrame(rows, columns=_columns)
if ar_date > end_date: # Articles that follow the `end_date` parameter are ignored
continue
if verbose: print("Searching date: " + ar_date.strftime("%B %d %Y"))
for article in ar_list:
title = article.a.text
link = article.a['href']
body_response = http.request('GET', link)
body_soup = BeautifulSoup(body_response.data, 'html.parser')
body = body_soup.find('div', class_ = "entry-content").text
row = [ar_date, _country, region, sub_region, link, _src_cat, title, body]
rows.append(row)
return pd.DataFrame(rows, columns=_columns)
def _load_province(province, start_date=datetime(2020, 1, 1), end_date=datetime.today(), verbose=True):
"""
Parameters:
- `province`
string, represents the name of the province or territory whose releases are to be retrieved
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, only the releases published before Jan 1 2020 are retrieved
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of the specified province or territory.
"""
switcher = {'alberta' : _load_alberta,
'british columbia' : _load_british_columbia,
'manitoba' : _load_manitoba,
'new brunswick' : _load_new_brunswick,
'newfoundland' : _load_newfoundland,
'northwest territories' : _load_northwest_territories,
'nova scotia' : _load_nova_scotia,
'nunavut' : _load_nunavut,
'ontario' : _load_ontario,
'pei' : _load_pei,
'quebec' : _load_quebec,
'saskatchewan' : _load_saskatchewan,
'yukon' : _load_yukon,
}
if province.lower() not in switcher:
warn("Province \'{}\' not recognized".format(province))
return None
if verbose: print("\nLoading {} Releases between {} and {}\n".format(province.upper(), start_date.strftime('%B %d, %Y'), end_date.strftime('%B %d, %Y')))
if start_date > end_date:
if verbose: print("Cannot search between {} and {}".format(start_date, end_date))
return pd.DataFrame([], columns=_columns)
try:
df = switcher[province.lower()](start_date=start_date, end_date=end_date, verbose=verbose)
except:
df = pd.DataFrame([], columns=_columns)
print("Could not load new articles for province", province)
return df
def _csv_path(province):
"""
Returns the relative CSV path for a given province string
"""
return 'sources/' + province.replace(' ', '').lower() + '.csv'
def load_province(province, start_date=None, end_date=datetime.today(), update_csv=False, verbose=True):
"""
Parameters:
- `province`
string, the name of the province or territory to be loaded
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, this is set to None, which indicates that the program should begin searching from the last possible date in the CSV
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `update_csv`
boolean, whether or not the results from the search should be saved to a CSV. By default, this is set to False.
- `verbose`
boolean, whether or not the function should print updates
Returns: a DataFrame containing news releases from the government of the specified province or territory.
"""
try:
province_df = pd.read_csv(_csv_path(province))
errant_columns = [col for col in province_df.columns if col not in _columns]
province_df = province_df.drop(errant_columns, axis=1)
start_length = len(province_df.index)
province_df["start_date"] = pd.to_datetime(province_df["start_date"])
# Get dates later than in the CSV, unless the `start_date` parameter is not None and gives a later date on which to begin searching. If it's None, a default value of Jan 1 2020 is used.
largest_date = province_df["start_date"].max()
new_start = max(largest_date, start_date or datetime(2020, 1, 1))
late_additions = _load_province(province, start_date=new_start, end_date=end_date, verbose=verbose)
df = late_additions.append(province_df)
# Get dates earlier than in the CSV, unless the `end_date` parameter gives an earlier date on which to stop searching
# end_date=datetime.today() sets the parameter to a default value and allows the program to avoid coslty searches before beginning date.
if start_date is not None:
if start_date < datetime(2020, 1, 1):
warn('WARNING: Going back further than government news websites extend may lead to unexpected behaviour.')
earliest_date = province_df["start_date"].min()
early_additions = _load_province(province, start_date=start_date, end_date=min(end_date, earliest_date), verbose=verbose)
df = df.append(early_additions)
except:
start_length = 0
print("Could not read file with path", _csv_path(province))
df = _load_province(province, start_date=(start_date or datetime(2020, 1, 1)), end_date=end_date, verbose=verbose)
object_columns = df.dtypes[df.dtypes == 'object'].index.values
df[object_columns] = df[object_columns].replace('\n',' ', regex=True)
df[object_columns] = df[object_columns].replace('\r',' ', regex=True)
df = df.drop_duplicates(['source_full_text']) # Potentially useful to look into dropping duplicates based on other attributes
end_length = len(df.index)
if update_csv:
df.to_csv(_csv_path(province))
if verbose:
print('Articles added: ' + str(end_length - start_length))
return df
def load_provinces(start_date=None, end_date=datetime.today(), update_csv=False, verbose=False):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, this is set to None, which indicates that the program should begin searching from the last possible date in the CSV
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date.
- `update_csv`
boolean, whether or not the results from the search should be saved to a CSV. By default, this is set to False.
- `verbose`
boolean, whether or not the function should print updates. By default, this is set to False.
Returns: a dictionary mapping the names of provinces and territories to DataFrames containing information about their new releases.
"""
provinces = ['alberta', 'british columbia', 'manitoba', 'new brunswick', 'newfoundland', 'northwest territories', 'nova scotia', 'nunavut', 'ontario', 'pei', 'quebec', 'saskatchewan', 'yukon']
province_dfs = [load_province(province, start_date=start_date, end_date=end_date, update_csv=update_csv, verbose=verbose) for province in provinces]
return dict(zip(provinces, province_dfs))
def load_all(start_date=None, end_date=datetime.today(), update_csv=False, verbose=False):
"""
Parameters:
- `start_date`
datetime object, the date of the earliest news release to be retrieved. By default, this is set to None, which indicates that the program should begin searching from the last possible date in the CSV
- `end_date`
datetime object, the date of the latest news release to be retrieved. By default, this is set to the current date
- `update_csv`
boolean, whether or not the results from the search should be saved to a CSV. By default, this is set to False.
- `verbose`
boolean, whether or not the function should print updates (False by default)
Returns: a DataFrame containing the information from all provinces and territories.
"""
full_df = pd.DataFrame([], columns=_columns)
province_dict = load_provinces(start_date=start_date, end_date=end_date, update_csv=update_csv, verbose=verbose)
full_df = pd.concat(province_dict.values(), ignore_index=True)
return full_df
|
import nqs.core as core
from nqs.resources.extensions import py
def write(name: str):
T=core.compile(name)
py.write(T,name)
|
import platform
from rich import print
from .base_info import BaseInfo
class OsInfo(BaseInfo):
"""
Print the users OS
"""
def __init__(self):
super().__init__()
def query_os(self):
"""
Get information about OS.
"""
system = platform.platform()
return system
def print_os_info(self, system):
print(f'[bold blue]{system}')
|
from app import db
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
def __init__(self, username, password, name, email) -> None:
self.username = username
self.password = password
self.email = email
def __repr__(self):
return '<User %r>' % self.username
class TradeLog(db.Model):
__tablename__ = 'trocas'
id = db.Column(db.Integer, primary_key=True)
troca = db.Column(db.Text) # registro da troca
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
trade_owner = db.relationship('User', foreign_keys=user_id)
def __init__(self, troca, user_id) -> None:
self.troca = troca
self.user_id = user_id
def __repr__(self):
return '<Troca %r>' % self.id
|
import re
import sys
korean_pattern_str = '가-힣'
number_pattern_str = '0-9'
alphabet_pattern_str = 'a-zA-Z'
puntuation_pattern_str = '.,?!'
doublespace_pattern = re.compile(r'\s+')
repeatchars_pattern = re.compile(r'(\w)\\1{3,}')
def normalize(doc, english=False, number=False, punctuation=False,
remove_repeat=0, remains=None, pattern=None):
"""
Arguments
---------
doc : str
Input string to be normalized
english : Boolean
If True, it remains alphabet
number : Boolean
If True, it remains number
punctuation : Boolean
If True, it remains symbols '.,?!'
remove_repeat : int
If it is positive integer, it shortens repeated characters.
remains : None or str
User specfied characters that user wants to remain
pattern : None or re.Pattern
User specified regular expression pattern to use for normalization.
For example, to remain Korean and alphabets,
>>> patterm = re.compile('[^가-힣a-zA-Z]')
Returns
-------
doc : str
Normalized string
"""
if sys.version_info.major >= 3 and sys.version_info.minor <= 6:
if not isinstance(pattern, re._pattern_type):
pattern = initialize_pattern(english, number, punctuation, remains)
elif sys.version_info.major >= 3 and sys.version_info.minor >= 7:
if not isinstance(pattern, re.Pattern):
pattern = initialize_pattern(english, number, punctuation, remains)
else:
if not isinstance(pattern, re.Pattern):
pattern = initialize_pattern(english, number, punctuation, remains)
if remove_repeat > 0:
doc = repeatchars_pattern.sub('\\1' * remove_repeat, doc)
doc = pattern.sub(' ', doc)
return doublespace_pattern.sub(' ', doc).strip()
def initialize_pattern(english=False, number=False, punctuation=False, remains=None):
"""
Arguments
---------
english : Boolean
If True, it remains alphabet
number : Boolean
If True, it remains number
punctuation : Boolean
If True, it remains symbols '.,?!'
remains : None or str
User specfied characters that user wants to remain
Returns
-------
pattern : re.Pattern
Regular expression pattern
Usage
-----
>>> initialize_pattern(english=True)
$ re.compile(r'[^가-힣a-zA-Z]', re.UNICODE)
"""
pattern = korean_pattern_str
if english:
pattern += alphabet_pattern_str
if number:
pattern += number_pattern_str
if punctuation:
pattern += puntuation_pattern_str
if isinstance(remains, str):
pattern += remains
return re.compile(r'[^%s]' % pattern)
|
import aria2p
from JavHelper.core.ini_file import return_default_config_string
aria2 = aria2p.API(
aria2p.Client(
host=return_default_config_string('aria_address'),
port=int(return_default_config_string('aria_port') or 0),
secret=return_default_config_string('aria_token')
)
)
|
##############################################################################################
##############################################################################################
## ##
## ## ## ##### ### # ##### ###### ###### ##### ##### ##### ### # ##
## ## # ## ####### # ## # # ## # ## ## ## # # # ## # ##
## ## ### ## ### ### # ## # # ## # ## ## ## # # # ## # ##
## ## ## ## ## ## ## # ## # # ## ###### ###### ##### ##### ##### # ## # ##
## ## ## ## ## ### ### # ## # # ## # ## ## ## # # # ## # ##
## #### #### ####### # ### # ## # ## ## ## # # # ### ##
## ## ## ##### # ## ##### ###### ## ## ##### ##### ##### # ## ##
## ##
##############################################################################################
##############################################################################################
from typing import List
from Initializers.Initializer import basic_initializer
from JIT import trans_to_jit
import numpy as np
class Dense2d(object):
def __init__(
self,
w:int, # input_shape
h:int, # output_shape
initializer:basic_initializer=None,
use_bias:bool=True,
):
self.w_dim = w
self.h_dim = h
self.use_bias = use_bias
self.initializer = initializer
self.init_params()
self.forward_out = None
def init_params(self):
if self.initializer is not None:
self.weight = np.array(self.initializer.initial([self.h_dim, self.w_dim]))
# self.bias = self.initializer.initial([self.h_dim])
else:
self.weight = np.zeros([self.h_dim, self.w_dim, ])
## bias is suggested as initialized with zeros in my case
if self.use_bias:
self.bias = np.zeros([self.h_dim])
else:
self.bias = None
def print_params(self):
print('weight =')
for w in self.weight:
print(w)
if self.use_bias:
print('bias =', '\n', self.bias)
def __call__(self, x):
return self.forward(x)
@trans_to_jit
def forward(self, x):
if isinstance(x[0], float): # no batch size
x = [x] # batch_size = 1
x = np.array(x)
self.forward_input = x
return x.dot(self.weight.T) + self.bias
@trans_to_jit
def backward(self, d_loss:List[float], lr:float):
'''
@d_loss: shape = [w_grad]
'''
if isinstance(d_loss, float): d_loss = [d_loss]
assert isinstance(d_loss[0], float)
assert len(d_loss) == self.h_dim
d_loss = np.array(d_loss)
## todo: not consider the activation function yet
# the simpliest is just average forward_input
self.forward_input = self.forward_input.mean(axis=0)
#
if self.use_bias:
b_grad = d_loss
self.bias -= lr * b_grad
##
w_grad = d_loss[:,None].dot(self.forward_input[None,])
self.weight -= lr * w_grad
#
new_d_loss = d_loss.dot(self.weight)
return new_d_loss
class MLP(object):
def __init__(self,
input_h,
output_h,
hidden_size,
layer_num:int=1,
initializer:basic_initializer=None
):
self.hidden_size = hidden_size
self.layer_num = layer_num
self.input_h = input_h
self.output_h = output_h
self.initializer = initializer
self.mlp = self.mlp_builder()
def mlp_builder(self):
## define the mlp builder
mlp = []
if self.layer_num > 1:
mlp.append(Dense2d(self.input_h, self.hidden_size, initializer=self.initializer))
mlp.extend([Dense2d(self.hidden_size, self.hidden_size, initializer=self.initializer) for _ in range(self.layer_num-2)])
mlp.append(Dense2d(self.hidden_size, self.output_h, initializer=self.initializer))
else:
mlp.append(Dense2d(self.input_h, self.output_h, initializer=self.initializer))
return mlp
def __call__(self, x):
return self.forward(x)
def forward(self, x):
for ds in self.mlp:
x = ds(x)
return x
def backward(self, grad, lr):
for ds in self.mlp[::-1]:
grad = ds.backward(grad, lr)
return grad
|
from tests import utils
import bpy
import animation_retarget
class TestAddon(utils.BaseTestCase):
def test_blinfo(self):
self.assertIsNotNone(animation_retarget.bl_info)
def test_enabled(self):
self.assertIn('animation_retarget', bpy.context.preferences.addons)
|
from interfaces.expr import Number, BinOp, UnaryOp, Bool, Signal
class Visitor:
def dispatch(self, node):
assert node
""" Note that it does not necessary returns object of Expr class """
if isinstance(node, BinOp):
return self.visit_binary_op(node)
if isinstance(node, UnaryOp):
return self.visit_unary_op(node)
if isinstance(node, Bool):
return self.visit_bool(node)
if isinstance(node, Signal):
return self.visit_signal(node)
if isinstance(node, Number):
return self.visit_number(node)
if isinstance(node, tuple):
return self.visit_tuple(node)
assert 0, 'unknown node type ' + str(node.__class__) + ': ' + str(node)
def visit_binary_op(self, binary_op:BinOp):
return BinOp(binary_op.name,
self.dispatch(binary_op.arg1),
self.dispatch(binary_op.arg2))
def visit_unary_op(self, unary_op:UnaryOp):
return UnaryOp(unary_op.name, self.dispatch(unary_op.arg))
def visit_bool(self, bool_const:Bool):
return bool_const
def visit_signal(self, signal:Signal):
return signal
def visit_number(self, number:Number):
return number
def visit_tuple(self, node:tuple):
return node
|
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import typing
from copy import deepcopy
from io import StringIO
from sys import stderr
import pandas as pd
from kubernetes import client
import mlrun
from mlrun.db import get_run_db
from mlrun.k8s_utils import get_k8s_helper
from mlrun.runtimes.constants import MPIJobCRDVersions
from ..artifacts import TableArtifact
from ..config import config
from ..utils import get_in, helpers, logger
from .generators import selector
class RunError(Exception):
pass
mlrun_key = "mlrun/"
class _ContextStore:
def __init__(self):
self._context = None
def get(self):
return self._context
def set(self, context):
self._context = context
global_context = _ContextStore()
cached_mpijob_crd_version = None
# resolve mpijob runtime according to the mpi-operator's supported crd-version
# if specified on mlrun config set it likewise,
# if not specified, try resolving it according to the mpi-operator, otherwise set to default
# since this is a heavy operation (sending requests to k8s/API), and it's unlikely that the crd version
# will change in any context - cache it
def resolve_mpijob_crd_version(api_context=False):
global cached_mpijob_crd_version
if not cached_mpijob_crd_version:
# config override everything
mpijob_crd_version = config.mpijob_crd_version
if not mpijob_crd_version:
in_k8s_cluster = get_k8s_helper(
silent=True
).is_running_inside_kubernetes_cluster()
if in_k8s_cluster:
k8s_helper = get_k8s_helper()
namespace = k8s_helper.resolve_namespace()
# try resolving according to mpi-operator that's running
res = k8s_helper.list_pods(
namespace=namespace, selector="component=mpi-operator"
)
if len(res) > 0:
mpi_operator_pod = res[0]
mpijob_crd_version = mpi_operator_pod.metadata.labels.get(
"crd-version"
)
elif not in_k8s_cluster and not api_context:
# connect will populate the config from the server config
# TODO: something nicer
get_run_db()
mpijob_crd_version = config.mpijob_crd_version
# If resolution failed simply use default
if not mpijob_crd_version:
mpijob_crd_version = MPIJobCRDVersions.default()
if mpijob_crd_version not in MPIJobCRDVersions.all():
raise ValueError(
f"unsupported mpijob crd version: {mpijob_crd_version}. "
f"supported versions: {MPIJobCRDVersions.all()}"
)
cached_mpijob_crd_version = mpijob_crd_version
return cached_mpijob_crd_version
def calc_hash(func, tag=""):
# remove tag, hash, date from calculation
tag = tag or func.metadata.tag
status = func.status
func.metadata.tag = ""
func.metadata.hash = ""
func.status = None
func.metadata.updated = None
data = json.dumps(func.to_dict(), sort_keys=True).encode()
h = hashlib.sha1()
h.update(data)
hashkey = h.hexdigest()
func.metadata.tag = tag
func.metadata.hash = hashkey
func.status = status
return hashkey
def log_std(db, runobj, out, err="", skip=False, show=True):
if out:
iteration = runobj.metadata.iteration
if iteration:
line = "> " + "-" * 15 + f" Iteration: ({iteration}) " + "-" * 15 + "\n"
out = line + out
if show:
print(out, flush=True)
if db and not skip:
uid = runobj.metadata.uid
project = runobj.metadata.project or ""
db.store_log(uid, project, out.encode(), append=True)
if err:
logger.error(f"exec error - {err}")
print(err, file=stderr)
raise RunError(err)
class AsyncLogWriter:
def __init__(self, db, runobj):
self.db = db
self.uid = runobj.metadata.uid
self.project = runobj.metadata.project or ""
self.iter = runobj.metadata.iteration
def write(self, data):
if self.db:
self.db.store_log(self.uid, self.project, data, append=True)
def flush(self):
# todo: verify writes are large enough, if not cache and use flush
pass
def add_code_metadata(path=""):
if path:
if "://" in path:
return None
if os.path.isfile(path):
path = os.path.dirname(path)
path = path or "./"
try:
from git import (
GitCommandNotFound,
InvalidGitRepositoryError,
NoSuchPathError,
Repo,
)
except ImportError:
return None
try:
repo = Repo(path, search_parent_directories=True)
remotes = [remote.url for remote in repo.remotes]
if len(remotes) > 0:
return f"{remotes[0]}#{repo.head.commit.hexsha}"
except (GitCommandNotFound, InvalidGitRepositoryError, NoSuchPathError, ValueError):
pass
return None
def set_if_none(struct, key, value):
if not struct.get(key):
struct[key] = value
def results_to_iter(results, runspec, execution):
if not results:
logger.error("got an empty results list in to_iter")
return
iter = []
failed = 0
running = 0
for task in results:
if task:
state = get_in(task, ["status", "state"])
id = get_in(task, ["metadata", "iteration"])
struct = {
"param": get_in(task, ["spec", "parameters"], {}),
"output": get_in(task, ["status", "results"], {}),
"state": state,
"iter": id,
}
if state == "error":
failed += 1
err = get_in(task, ["status", "error"], "")
logger.error(f"error in task {execution.uid}:{id} - {err}")
elif state != "completed":
running += 1
iter.append(struct)
if not iter:
execution.set_state("completed", commit=True)
logger.warning("warning!, zero iteration results")
return
if hasattr(pd, "json_normalize"):
df = pd.json_normalize(iter).sort_values("iter")
else:
df = pd.io.json.json_normalize(iter).sort_values("iter")
header = df.columns.values.tolist()
summary = [header] + df.values.tolist()
if not runspec:
return summary
criteria = runspec.spec.hyper_param_options.selector
item, id = selector(results, criteria)
if runspec.spec.selector and not id:
logger.warning(
f"no best result selected, check selector ({criteria}) or results"
)
if id:
logger.info(f"best iteration={id}, used criteria {criteria}")
task = results[item] if id and results else None
execution.log_iteration_results(id, summary, task)
csv_buffer = StringIO()
df.to_csv(csv_buffer, index=False, line_terminator="\n", encoding="utf-8")
execution.log_artifact(
TableArtifact(
"iteration_results",
body=csv_buffer.getvalue(),
header=header,
viewer="table",
),
local_path="iteration_results.csv",
)
if failed:
execution.set_state(
error=f"{failed} of {len(results)} tasks failed, check logs in db for details",
commit=False,
)
elif running == 0:
execution.set_state("completed", commit=False)
execution.commit()
def generate_function_image_name(function):
project = function.metadata.project or config.default_project
tag = function.metadata.tag or "latest"
_, repository = helpers.get_parsed_docker_registry()
if not repository:
repository = "mlrun"
return f".{repository}/func-{project}-{function.metadata.name}:{tag}"
def set_named_item(obj, item):
if isinstance(item, dict):
obj[item["name"]] = item
else:
obj[item.name] = item
def get_item_name(item, attr="name"):
if isinstance(item, dict):
return item.get(attr)
else:
return getattr(item, attr, None)
def apply_kfp(modify, cop, runtime):
modify(cop)
# Have to do it here to avoid circular dependencies
from .pod import AutoMountType
# Check if modifier is one of the known mount modifiers. We need to use startswith since the modifier itself is
# a nested function returned from the modifier function (such as 'v3io_cred.<locals>._use_v3io_cred')
modifier_name = modify.__qualname__
if any(
modifier_name.startswith(mount_modifier)
for mount_modifier in AutoMountType.all_mount_modifiers()
):
runtime.spec.mount_applied = True
api = client.ApiClient()
for k, v in cop.pod_labels.items():
runtime.metadata.labels[k] = v
for k, v in cop.pod_annotations.items():
runtime.metadata.annotations[k] = v
if cop.container.env:
env_names = [
e.name if hasattr(e, "name") else e["name"] for e in runtime.spec.env
]
for e in api.sanitize_for_serialization(cop.container.env):
name = e["name"]
if name in env_names:
runtime.spec.env[env_names.index(name)] = e
else:
runtime.spec.env.append(e)
env_names.append(name)
cop.container.env.clear()
if cop.volumes and cop.container.volume_mounts:
vols = api.sanitize_for_serialization(cop.volumes)
mounts = api.sanitize_for_serialization(cop.container.volume_mounts)
runtime.spec.update_vols_and_mounts(vols, mounts)
cop.volumes.clear()
cop.container.volume_mounts.clear()
return runtime
def get_resource_labels(function, run=None, scrape_metrics=None):
scrape_metrics = (
scrape_metrics if scrape_metrics is not None else config.scrape_metrics
)
run_uid, run_name, run_project, run_owner = None, None, None, None
if run:
run_uid = run.metadata.uid
run_name = run.metadata.name
run_project = run.metadata.project
run_owner = run.metadata.labels.get("owner")
labels = deepcopy(function.metadata.labels)
labels[mlrun_key + "class"] = function.kind
labels[mlrun_key + "project"] = run_project or function.metadata.project
labels[mlrun_key + "function"] = str(function.metadata.name)
labels[mlrun_key + "tag"] = str(function.metadata.tag or "latest")
labels[mlrun_key + "scrape-metrics"] = str(scrape_metrics)
if run_uid:
labels[mlrun_key + "uid"] = run_uid
if run_name:
labels[mlrun_key + "name"] = run_name
if run_owner:
labels[mlrun_key + "owner"] = run_owner
return labels
def generate_resources(mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu"):
"""get pod cpu/memory/gpu resources dict"""
resources = {}
if gpus:
resources[gpu_type] = gpus
if mem:
resources["memory"] = mem
if cpu:
resources["cpu"] = cpu
return resources
def get_func_selector(project, name=None, tag=None):
s = [f"{mlrun_key}project={project}"]
if name:
s.append(f"{mlrun_key}function={name}")
s.append(f"{mlrun_key}tag={tag or 'latest'}")
return s
def parse_function_selector(selector: typing.List[str]) -> typing.Tuple[str, str, str]:
project, name, tag = None, None, None
for criteria in selector:
if f"{mlrun_key}project=" in criteria:
project = criteria[f"{mlrun_key}project=":]
if f"{mlrun_key}function=" in criteria:
name = criteria[f"{mlrun_key}function=":]
if f"{mlrun_key}tag=" in criteria:
tag = criteria[f"{mlrun_key}tag=":]
return project, name, tag
class k8s_resource:
kind = ""
per_run = False
per_function = False
k8client = None
def deploy_function(self, function):
pass
def release_function(self, function):
pass
def submit_run(self, function, runobj):
pass
def get_object(self, name, namespace=None):
return None
def get_status(self, name, namespace=None):
return None
def del_object(self, name, namespace=None):
pass
def list_objects(self, namespace=None, selector=[], states=None):
return []
def get_pods(self, name, namespace=None, master=False):
return {}
def clean_objects(self, namespace=None, selector=[], states=None):
if not selector and not states:
raise ValueError("labels selector or states list must be specified")
items = self.list_objects(namespace, selector, states)
for item in items:
self.del_object(item.metadata.name, item.metadata.namespace)
def enrich_function_from_dict(function, function_dict):
override_function = mlrun.new_function(runtime=function_dict, kind=function.kind)
for attribute in [
"volumes",
"volume_mounts",
"env",
"resources",
"image_pull_policy",
"replicas",
"node_name",
"node_selector",
"affinity",
"priority_class_name",
]:
override_value = getattr(override_function.spec, attribute, None)
if override_value:
if attribute == "env":
for env_dict in override_value:
function.set_env(env_dict["name"], env_dict["value"])
elif attribute == "volumes":
function.spec.update_vols_and_mounts(override_value, [])
elif attribute == "volume_mounts":
# volume mounts don't have a well defined identifier (like name for volume) so we can't merge,
# only override
function.spec.volume_mounts = override_value
elif attribute == "resources":
# don't override it there are limits and requests but both are empty
if override_value.get("limits", {}) or override_value.get(
"requests", {}
):
setattr(function.spec, attribute, override_value)
else:
setattr(function.spec, attribute, override_value)
return function
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySecurityRiskRiskprofileQueryModel(object):
def __init__(self):
self._request_from = None
self._request_id = None
self._risk_object = None
self._risk_object_value = None
self._risk_profile = None
@property
def request_from(self):
return self._request_from
@request_from.setter
def request_from(self, value):
self._request_from = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
@property
def risk_object(self):
return self._risk_object
@risk_object.setter
def risk_object(self, value):
if isinstance(value, list):
self._risk_object = list()
for i in value:
self._risk_object.append(i)
@property
def risk_object_value(self):
return self._risk_object_value
@risk_object_value.setter
def risk_object_value(self, value):
if isinstance(value, list):
self._risk_object_value = list()
for i in value:
self._risk_object_value.append(i)
@property
def risk_profile(self):
return self._risk_profile
@risk_profile.setter
def risk_profile(self, value):
if isinstance(value, list):
self._risk_profile = list()
for i in value:
self._risk_profile.append(i)
def to_alipay_dict(self):
params = dict()
if self.request_from:
if hasattr(self.request_from, 'to_alipay_dict'):
params['request_from'] = self.request_from.to_alipay_dict()
else:
params['request_from'] = self.request_from
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
if self.risk_object:
if isinstance(self.risk_object, list):
for i in range(0, len(self.risk_object)):
element = self.risk_object[i]
if hasattr(element, 'to_alipay_dict'):
self.risk_object[i] = element.to_alipay_dict()
if hasattr(self.risk_object, 'to_alipay_dict'):
params['risk_object'] = self.risk_object.to_alipay_dict()
else:
params['risk_object'] = self.risk_object
if self.risk_object_value:
if isinstance(self.risk_object_value, list):
for i in range(0, len(self.risk_object_value)):
element = self.risk_object_value[i]
if hasattr(element, 'to_alipay_dict'):
self.risk_object_value[i] = element.to_alipay_dict()
if hasattr(self.risk_object_value, 'to_alipay_dict'):
params['risk_object_value'] = self.risk_object_value.to_alipay_dict()
else:
params['risk_object_value'] = self.risk_object_value
if self.risk_profile:
if isinstance(self.risk_profile, list):
for i in range(0, len(self.risk_profile)):
element = self.risk_profile[i]
if hasattr(element, 'to_alipay_dict'):
self.risk_profile[i] = element.to_alipay_dict()
if hasattr(self.risk_profile, 'to_alipay_dict'):
params['risk_profile'] = self.risk_profile.to_alipay_dict()
else:
params['risk_profile'] = self.risk_profile
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityRiskRiskprofileQueryModel()
if 'request_from' in d:
o.request_from = d['request_from']
if 'request_id' in d:
o.request_id = d['request_id']
if 'risk_object' in d:
o.risk_object = d['risk_object']
if 'risk_object_value' in d:
o.risk_object_value = d['risk_object_value']
if 'risk_profile' in d:
o.risk_profile = d['risk_profile']
return o
|
# train.py
#!/usr/bin/env python3
""" stage 2 CPN key point detection eval """
import os
import sys
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from conf import settings
from utils import get_network, calc_mi, visualization_highq, \
gen_outputcsv_file, calc_rotation_diff
from CPN import CoordRegressionNetwork
from dsnt import dsntnn
import matplotlib.pyplot as plt
import pandas as pd
import PIL
from PIL import Image
import math
import albumentations as A
from albumentations.pytorch import ToTensorV2
import cv2
from sklearn.metrics import mean_absolute_error
def dataset_generate_hip(_IMAGE_DIR, label_file, mode, model_img, rot_diff, visual_mode, visual_path, Resize):
image_name = label_file[['filename']]
MP_right, MP_left = label_file[['mi_right']].fillna(np.nan), label_file[['mi_left']].fillna(np.nan)
keypoint_right = [label_file[['med_head_right']], label_file[['lat_head_right']], label_file[['lat_acetabulum_right']]]
keypoint_left = [label_file[['med_head_left']], label_file[['lat_head_left']], label_file[['lat_acetabulum_left']]]
dataset = []
model_mp_right, model_mp_left = [], []
if visual_mode:
v_path = os.path.join(visual_path,'base_img_{}'.format(mode))
if not os.path.exists(v_path):
os.makedirs(v_path)
for i in range(len(model_img)):
component = []
for idx in range(len(image_name)):
q = model_img[i]
k = image_name.iloc[idx].iat[0]
if q == k:
key_right_list = []
for key in keypoint_right:
key_right_list.append(float(key.iloc[idx].iat[0].split(', ')[0].split('(')[1]))
key_right_list.append(float(key.iloc[idx].iat[0].split(', ')[1].split(')')[0]))
key_left_list = []
for key in keypoint_left:
key_left_list.append(float(key.iloc[idx].iat[0].split(', ')[0].split('(')[1]))
key_left_list.append(float(key.iloc[idx].iat[0].split(', ')[1].split(')')[0]))
gt_mp_right = float(MP_right.iloc[idx].iat[0])
gt_mp_left = float(MP_left.iloc[idx].iat[0])
image_path = os.path.join(_IMAGE_DIR, model_img[i])
print('loading: {}'.format(image_path))
image = cv2.imread(image_path)
h, w, _ = image.shape
#Transform the GT upright image in stage2 to model upright image (stage1)
train_transform = A.Compose([
A.Affine(rotate=rot_diff[i],p=1.0)
], keypoint_params=A.KeypointParams(format='xy'))
keypoints = [
(key_right_list[0], key_right_list[1]),(key_right_list[2], key_right_list[3]),
(key_right_list[4], key_right_list[5]),(key_left_list[0], key_left_list[1]),
(key_left_list[2], key_left_list[3]),(key_left_list[4], key_left_list[5]),
]
transformed = train_transform(image=image, keypoints=keypoints)
img_trans = transformed['image']
keyps_trans = transformed['keypoints']
#Data augmentation for the model upright image (stage1)
scale = 1
rc_size = int(min(image.shape[0:2])*scale)
train_transform_2 = A.Compose([
A.CenterCrop(width=rc_size, height=rc_size),
A.Resize(Resize, Resize),
A.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.247, 0.243, 0.261)),
ToTensorV2(),
], keypoint_params=A.KeypointParams(format='xy'))
keypoints_2 = [
(keyps_trans[0][0], keyps_trans[0][1]),(keyps_trans[1][0], keyps_trans[1][1]),
(keyps_trans[2][0], keyps_trans[2][1]),(keyps_trans[3][0], keyps_trans[3][1]),
(keyps_trans[4][0], keyps_trans[4][1]),(keyps_trans[5][0], keyps_trans[5][1]),
]
transformed = train_transform_2(image=img_trans, keypoints=keypoints_2)
img_trans_2 = transformed['image']
keyps_trans_2 = transformed['keypoints']
keys = []
for k in range(6):
keys.append([(keyps_trans_2[k][0]*2+1)/Resize-1,(keyps_trans_2[k][1]*2+1)/Resize-1])
keys = torch.Tensor(keys)
mp_right = calc_mi(keyps_trans_2[0][0],keyps_trans_2[1][0],keyps_trans_2[2][0],'right')
mp_left = calc_mi(keyps_trans_2[3][0],keyps_trans_2[4][0],keyps_trans_2[5][0],'left')
model_mp_right.append(mp_right)
model_mp_left.append(mp_left)
print(mp_right, mp_left)
# #save the model upright image (stage1) for visualization
# scale = 1
# rc_size = int(min(image.shape[0:2])*scale)
# train_transform_3 = A.Compose([
# A.CenterCrop(width=rc_size, height=rc_size),
# A.Resize(Resize, Resize),
# ])
# transformed = train_transform_3(image=img_trans)
# img_trans_3 = transformed['image']
# cv2.imwrite(os.path.join(v_path, str(model_img[i])), img_trans_3)
# #save the model upright image (stage1) for visualization
if visual_mode:
cv2.imwrite(os.path.join(v_path, str(model_img[i])), img_trans)
component.append(img_trans_2)# image
component.append(keys)
component.append(gt_mp_right)
component.append(gt_mp_left)
component.append(model_img[i])
component = tuple(component)
dataset.append(component)
return dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-net', type=str, default='CPN', help='net type')
parser.add_argument('-BASE_DIR', type=str, default='../data', help='access data/label')
parser.add_argument('-LABEL_DIR', type=str, default='stage_2_labels_processed', help='access label file')
parser.add_argument('-GT_STAGE1_LABEL', type=str, default='stage_1_labels_processed', help='access label file')
parser.add_argument('-MODEL_STAGE1_PRED', type=str, default='avgR_MP_update_data', help='access label file')
parser.add_argument('-MODE', type=str, default='test', choices=['val', 'test'], help='evaluate on validation or test set')
parser.add_argument('-VISUAL_MODE', action='store_true', help='ON / OFF visualization')
parser.add_argument('-VISUAL_PATH', type=str, default='visualization', help='create visualization results on images')
parser.add_argument('-CSVFILE_MODE', action='store_true', help='ON / OFF generate csv file')
parser.add_argument('-weights', type=str, required=True, help='the weights file you want to test')
parser.add_argument('-gpu', action='store_true', default=False, help='use gpu or not')
args = parser.parse_args()
###hipdata function
_IMAGE_DIR = os.path.join(args.BASE_DIR, 'stage_2')
label_file = pd.read_csv(os.path.join(args.BASE_DIR, 'label', args.LABEL_DIR+'_{}.csv'.format(args.MODE)))#the baseline
gt_stage1_label = os.path.join(args.BASE_DIR, 'label', args.GT_STAGE1_LABEL+'_{}.csv'.format(args.MODE))
model_stage1_pred = os.path.join(args.BASE_DIR, 'label', args.MODEL_STAGE1_PRED+'_{}.csv'.format(args.MODE))
model_image, rotation_diff = calc_rotation_diff(gt_stage1_label, model_stage1_pred)
testset_hip = dataset_generate_hip(_IMAGE_DIR, label_file, args.MODE, model_image, \
rotation_diff, args.VISUAL_MODE, args.VISUAL_PATH, Resize=224)
testloader_hip = torch.utils.data.DataLoader(testset_hip, batch_size=1, shuffle=False, drop_last=True, num_workers=16)
ndata = testset_hip.__len__()
print('number of {} hip data for evaluation: {}'.format(args.MODE, ndata))
net = CoordRegressionNetwork().cuda()
net.load_state_dict(torch.load(args.weights))
print(net)
net.eval()
csv_data = []
csv_file = []
total_mp_right_mae = 0
total_mp_left_mae = 0
resize = 224
with torch.no_grad():
for n_iter, (images, keyps, gt_mp_right, gt_mp_left, img_name) in enumerate(testloader_hip):
print("iteration: {}\ttotal {} iterations".format(n_iter + 1, len(testloader_hip)))
coords, heatmaps = net(images.cuda())
mp_right_label = calc_mi(((keyps[0][0][0].item()+1)*resize-1)/2,\
((keyps[0][1][0].item()+1)*resize-1)/2,((keyps[0][2][0].item()+1)*resize-1)/2,'right')
mp_left_label = calc_mi(((keyps[0][3][0].item()+1)*resize-1)/2,\
((keyps[0][4][0].item()+1)*resize-1)/2,((keyps[0][5][0].item()+1)*resize-1)/2,'left')
mp_right_pred = calc_mi(((coords[0][0][0].item()+1)*resize-1)/2, \
((coords[0][1][0].item()+1)*resize-1)/2,((coords[0][2][0].item()+1)*resize-1)/2,'right')
mp_left_pred = calc_mi(((coords[0][3][0].item()+1)*resize-1)/2, \
((coords[0][4][0].item()+1)*resize-1)/2,((coords[0][5][0].item()+1)*resize-1)/2,'left')
mp_right_mae = mean_absolute_error([mp_right_pred], [mp_right_label])
mp_left_mae = mean_absolute_error([mp_left_pred], [mp_left_label])
print('Image:{} | MP RIGHT MAE:{} | MP LEFT MAE:{}'.format(img_name[0],mp_right_mae,mp_left_mae))
#Visualization of upright image with key points
#visualization(args.VISUAL_PATH, args.MODE, img_name, keyps, coords, resize=224, num_key=6)
if args.VISUAL_MODE:
mp_legend = [round(mp_right_label,2)*100,round(mp_left_label,2)*100, \
round(mp_right_pred,2)*100,round(mp_left_pred,2)*100]
visualization_highq(args.VISUAL_PATH, args.MODE, img_name, \
keyps, coords, mp_legend, resize=224, num_key=6)
total_mp_right_mae = total_mp_right_mae + mp_right_mae
total_mp_left_mae = total_mp_left_mae + mp_left_mae
csv_file.append([img_name[0],round(gt_mp_right[0].item(),2),round(gt_mp_left[0].item(),2), \
round(mp_right_pred,2),round(mp_left_pred,2)])
print('AVG MP RIGHT MAE: {}'.format(total_mp_right_mae/len(testloader_hip)))
print('AVG MP LEFT MAE: {}'.format(total_mp_left_mae/len(testloader_hip)))
if args.CSVFILE_MODE:
gen_outputcsv_file(args.VISUAL_PATH, args.MODE, csv_file)
|
import cv2
import numpy as np
from personal_color import predict_Personal_Color
def predict_personal_color(img):
rate = predict_Personal_Color(img)
result = max(rate)
return {'result': result}
if __name__ == "__main__":
pass
|
import numpy as np
import pytest
from hypothesis import assume, given, settings, strategies as st
import mephisto as mp
# change default profile as to not trigger on deadline changes
settings.register_profile('ci', database=None, deadline=None)
settings.load_profile('ci')
def test_histogram_a_ndarray():
a = np.sin(np.linspace(0, 10*np.pi, 1000))
hist_np, bin_edges_np = np.histogram(a)
hist_mp, bin_edges_mp = mp.histogram(a)
assert bin_edges_np.dtype == bin_edges_mp.dtype
assert bin_edges_np.ndim == bin_edges_mp.ndim
assert bin_edges_np.shape == bin_edges_mp.shape
assert np.allclose(bin_edges_np, bin_edges_mp)
assert hist_np.dtype == hist_mp.dtype
assert hist_np.ndim == hist_mp.ndim
assert hist_np.shape == hist_mp.shape
assert hist_np.sum() == hist_mp.sum()
assert np.array_equal(hist_np, hist_mp)
def test_histogram_a_list():
a = [1, 1, 2, 2, 3]
hist_np, bin_edges_np = np.histogram(a)
hist_mp, bin_edges_mp = mp.histogram(a)
assert bin_edges_np.dtype == bin_edges_mp.dtype
assert bin_edges_np.ndim == bin_edges_mp.ndim
assert bin_edges_np.shape == bin_edges_mp.shape
assert np.allclose(bin_edges_np, bin_edges_mp)
assert hist_np.dtype == hist_mp.dtype
assert hist_np.ndim == hist_mp.ndim
assert hist_np.shape == hist_mp.shape
assert hist_np.sum() == hist_mp.sum()
assert np.array_equal(hist_np, hist_mp)
@given(st.integers(min_value=1, max_value=16))
def test_histogram_bins_scalar(bins):
rng = np.random.RandomState(42)
a = rng.normal(size=1000)
hist_np, bin_edges_np = np.histogram(a, bins)
hist_mp, bin_edges_mp = mp.histogram(a, bins)
assert bin_edges_np.dtype == bin_edges_mp.dtype
assert bin_edges_np.ndim == bin_edges_mp.ndim
assert bin_edges_np.shape == bin_edges_mp.shape
assert np.allclose(bin_edges_np, bin_edges_mp)
assert hist_np.dtype == hist_mp.dtype
assert hist_np.ndim == hist_mp.ndim
assert hist_np.shape == hist_mp.shape
assert hist_np.sum() == hist_mp.sum()
assert np.array_equal(hist_np, hist_mp)
@pytest.mark.xfail(strict=True, raises=NotImplementedError)
def test_histogram_bins_array():
rng = np.random.RandomState(42)
a = rng.normal(size=1000)
bins = np.linspace(-5, 5, 10)
hist_mp, bin_edges_mp = mp.histogram(a, bins)
@pytest.mark.xfail(strict=True, raises=NotImplementedError)
def test_histogram_bins_string():
rng = np.random.RandomState(42)
a = rng.normal(size=1000)
bins = 'auto'
hist_mp, bin_edges_mp = mp.histogram(a, bins)
def test_histogram2d_x_ndarray_y_ndarray():
x = np.sin(np.linspace(0, 10*np.pi, 1000))
y = np.sin(np.linspace(0, 10*np.pi, 1000))
H_np, xedges_np, yedges_np = np.histogram2d(x, y)
H_mp, xedges_mp, yedges_mp = mp.histogram2d(x, y)
assert xedges_np.dtype == xedges_mp.dtype
assert xedges_np.ndim == xedges_mp.ndim
assert xedges_np.shape == xedges_mp.shape
assert np.allclose(xedges_np, xedges_mp)
assert yedges_np.dtype == yedges_mp.dtype
assert yedges_np.ndim == yedges_mp.ndim
assert yedges_np.shape == yedges_mp.shape
assert np.allclose(yedges_np, yedges_mp)
assert H_np.dtype == H_mp.dtype
assert H_np.ndim == H_mp.ndim
assert H_np.shape == H_mp.shape
assert H_np.sum() == H_mp.sum()
assert np.array_equal(H_np, H_mp)
def test_histogram2d_x_list_y_list():
x = [1, 1, 2, 2, 3]
y = [1, 1, 2, 2, 3]
H_np, xedges_np, yedges_np = np.histogram2d(x, y)
H_mp, xedges_mp, yedges_mp = mp.histogram2d(x, y)
assert xedges_np.dtype == xedges_mp.dtype
assert xedges_np.ndim == xedges_mp.ndim
assert xedges_np.shape == xedges_mp.shape
assert np.allclose(xedges_np, xedges_mp)
assert yedges_np.dtype == yedges_mp.dtype
assert yedges_np.ndim == yedges_mp.ndim
assert yedges_np.shape == yedges_mp.shape
assert np.allclose(yedges_np, yedges_mp)
assert H_np.dtype == H_mp.dtype
assert H_np.ndim == H_mp.ndim
assert H_np.shape == H_mp.shape
assert H_np.sum() == H_mp.sum()
assert np.array_equal(H_np, H_mp)
@given(st.integers(min_value=1, max_value=16))
def test_histogram2d_bins_scalar(bins):
rng = np.random.RandomState(42)
x = rng.normal(size=1000)
y = rng.normal(size=1000)
H_np, xedges_np, yedges_np = np.histogram2d(x, y, bins)
H_mp, xedges_mp, yedges_mp = mp.histogram2d(x, y, bins)
assert xedges_np.dtype == xedges_mp.dtype
assert xedges_np.ndim == xedges_mp.ndim
assert xedges_np.shape == xedges_mp.shape
assert np.allclose(xedges_np, xedges_mp)
assert yedges_np.dtype == yedges_mp.dtype
assert yedges_np.ndim == yedges_mp.ndim
assert yedges_np.shape == yedges_mp.shape
assert np.allclose(yedges_np, yedges_mp)
assert H_np.dtype == H_mp.dtype
assert H_np.ndim == H_mp.ndim
assert H_np.shape == H_mp.shape
assert H_np.sum() == H_mp.sum()
assert np.array_equal(H_np, H_mp)
@pytest.mark.xfail(strict=True, raises=NotImplementedError)
def test_histogram2d_bins_1Darray():
rng = np.random.RandomState(42)
x = rng.normal(size=1000)
y = rng.normal(size=1000)
bins = np.linspace(-5, 5, 10)
H_mp, xedges_mp, yedges_mp = mp.histogram2d(x, y, bins)
@given(st.lists(st.integers(min_value=1, max_value=1024), min_size=2, max_size=2))
def test_histogram2d_bins_list_of_2_ints(bins):
rng = np.random.RandomState(42)
x = rng.normal(size=1000)
y = rng.normal(size=1000)
H_np, xedges_np, yedges_np = np.histogram2d(x, y, bins)
H_mp, xedges_mp, yedges_mp = mp.histogram2d(x, y, bins)
assert xedges_np.dtype == xedges_mp.dtype
assert xedges_np.ndim == xedges_mp.ndim
assert xedges_np.shape == xedges_mp.shape
assert np.allclose(xedges_np, xedges_mp)
assert yedges_np.dtype == yedges_mp.dtype
assert yedges_np.ndim == yedges_mp.ndim
assert yedges_np.shape == yedges_mp.shape
assert np.allclose(yedges_np, yedges_mp)
assert H_np.dtype == H_mp.dtype
assert H_np.ndim == H_mp.ndim
assert H_np.shape == H_mp.shape
assert H_np.sum() == H_mp.sum()
assert np.array_equal(H_np, H_mp)
@pytest.mark.xfail(strict=True)#, raises=NotImplementedError)
def test_histogram2d_bins_list_of_1Darrays():
rng = np.random.RandomState(42)
x = rng.normal(size=1000)
y = rng.normal(size=1000)
bins = [np.linspace(-5, 5, 10), np.linspace(-1, 1, 100)]
H_mp, xedges_mp, yedges_mp = mp.histogram2d(x, y, bins)
@pytest.mark.xfail(strict=True)#, raises=RuntimeError)
def test_histogram2d_bins_list_mixed():
rng = np.random.RandomState(42)
x = rng.normal(size=1000)
y = rng.normal(size=1000)
bins = [np.linspace(-5, 5, 10), 100]
H_mp, xedges_mp, yedges_mp = mp.histogram2d(x, y, bins)
def test_histogramdd_sample_ndarray():
rng = np.random.RandomState(42)
sample = rng.randn(100, 3)
H_np, edges_np = np.histogramdd(sample)
H_mp, edges_mp = mp.histogramdd(sample)
assert type(edges_np) == type(edges_mp)
assert len(edges_np) == len(edges_mp)
assert all(n.dtype == m.dtype for n, m in zip(edges_np, edges_mp))
assert all(n.ndim == m.ndim for n, m in zip(edges_np, edges_mp))
assert all(n.shape == m.shape for n, m in zip(edges_np, edges_mp))
assert all(np.allclose(n, m) for n, m in zip(edges_np, edges_mp))
assert H_np.dtype == H_mp.dtype
assert H_np.ndim == H_mp.ndim
assert H_np.shape == H_mp.shape
assert H_np.sum() == H_mp.sum()
assert np.allclose(H_np, H_mp)
@pytest.mark.xfail(strict=True, raises=TypeError)
def test_histogramdd_sample_list():
sample = [[1, 1, 2, 2, 3], [1, 2, 2, 3, 3], [2, 2, 3, 3, 4]]
H_mp, edges_mp = mp.histogramdd(sample)
@given(st.integers(min_value=1, max_value=16))
def test_histogramdd_bins_scalar(bins):
rng = np.random.RandomState(42)
sample = rng.randn(100, 3)
H_np, edges_np = np.histogramdd(sample, bins)
H_mp, edges_mp = mp.histogramdd(sample, bins)
assert type(edges_np) == type(edges_mp)
assert len(edges_np) == len(edges_mp)
assert all(n.dtype == m.dtype for n, m in zip(edges_np, edges_mp))
assert all(n.ndim == m.ndim for n, m in zip(edges_np, edges_mp))
assert all(n.shape == m.shape for n, m in zip(edges_np, edges_mp))
assert all(np.allclose(n, m) for n, m in zip(edges_np, edges_mp))
assert H_np.dtype == H_mp.dtype
assert H_np.ndim == H_mp.ndim
assert H_np.shape == H_mp.shape
assert H_np.sum() == H_mp.sum()
assert np.allclose(H_np, H_mp)
@given(st.lists(st.integers(min_value=1, max_value=16), min_size=2, max_size=3))
def test_histogramdd_bins_array_of_scalars(bins):
rng = np.random.RandomState(42)
sample = rng.randn(1024*128//len(bins), len(bins))
H_np, edges_np = np.histogramdd(sample, bins)
H_mp, edges_mp = mp.histogramdd(sample, bins)
assert type(edges_np) == type(edges_mp)
assert len(edges_np) == len(edges_mp)
assert all(n.dtype == m.dtype for n, m in zip(edges_np, edges_mp))
assert all(n.ndim == m.ndim for n, m in zip(edges_np, edges_mp))
assert all(n.shape == m.shape for n, m in zip(edges_np, edges_mp))
assert all(np.allclose(n, m) for n, m in zip(edges_np, edges_mp))
assert H_np.dtype == H_mp.dtype
assert H_np.ndim == H_mp.ndim
assert H_np.shape == H_mp.shape
assert H_np.sum() == H_mp.sum()
assert np.allclose(H_np, H_mp)
@pytest.mark.xfail(strict=True)#, raises=NotImplementedError)
def test_histogramdd_bins_ndarray():
rng = np.random.RandomState(42)
sample = rng.randn(100, 3)
bins = (np.arange(3), np.arange(5), np.arange(7))
H_mp, edges_mp = mp.histogramdd(sample, bins)
def test_histogram_bin_edges_a_ndarray():
a = np.sin(np.linspace(0, 10*np.pi, 1000))
bin_edges_np = np.histogram_bin_edges(a)
bin_edges_mp = mp.histogram_bin_edges(a)
assert bin_edges_np.dtype == bin_edges_mp.dtype
assert bin_edges_np.ndim == bin_edges_mp.ndim
assert bin_edges_np.shape == bin_edges_mp.shape
assert np.allclose(bin_edges_np, bin_edges_mp)
def test_histogram_bin_edges_a_list():
a = [1, 1, 2, 2, 3]
bin_edges_np = np.histogram_bin_edges(a)
bin_edges_mp = mp.histogram_bin_edges(a)
assert bin_edges_np.dtype == bin_edges_mp.dtype
assert bin_edges_np.ndim == bin_edges_mp.ndim
assert bin_edges_np.shape == bin_edges_mp.shape
assert np.allclose(bin_edges_np, bin_edges_mp)
@given(st.integers(min_value=1, max_value=16))
def test_histogram_bin_edges_bins_scalar(bins):
rng = np.random.RandomState(42)
a = rng.normal(size=1000)
bin_edges_np = np.histogram_bin_edges(a, bins)
bin_edges_mp = mp.histogram_bin_edges(a, bins)
assert bin_edges_np.dtype == bin_edges_mp.dtype
assert bin_edges_np.ndim == bin_edges_mp.ndim
assert bin_edges_np.shape == bin_edges_mp.shape
assert np.allclose(bin_edges_np, bin_edges_mp)
@pytest.mark.xfail(strict=True, raises=NotImplementedError)
def test_histogram_bin_edges_bins_array():
rng = np.random.RandomState(42)
a = rng.normal(size=1000)
bins = np.linspace(-5, 5, 10)
hist_mp, bin_edges_mp = mp.histogram(a, bins)
@pytest.mark.xfail(strict=True, raises=NotImplementedError)
def test_histogram_bin_edges_bins_string():
rng = np.random.RandomState(42)
a = rng.normal(size=1000)
bins = 'auto'
hist_mp, bin_edges_mp = mp.histogram(a, bins)
def test_digitize_x_ndarray():
x = np.sin(np.linspace(0, 10*np.pi, 1000))
bins = np.linspace(-1, 1, 11)
indices_np = np.digitize(x, bins)
indices_mp = mp.digitize(x, bins)
assert indices_np.dtype == indices_mp.dtype
assert indices_np.ndim == indices_mp.ndim
assert indices_np.shape == indices_mp.shape
assert np.array_equal(indices_np, indices_mp)
def test_digitize_x_list():
x = [1, 1, 2, 2, 3, 4]
bins = np.linspace(1, 4, 4)
indices_np = np.digitize(x, bins)
indices_mp = mp.digitize(x, bins)
assert indices_np.dtype == indices_mp.dtype
assert indices_np.ndim == indices_mp.ndim
assert indices_np.shape == indices_mp.shape
assert np.array_equal(indices_np, indices_mp)
def test_digitize_bins_list():
x = np.sin(np.linspace(0, 10*np.pi, 1000))
bins = [-1, 0, 1]
indices_np = np.digitize(x, bins)
indices_mp = mp.digitize(x, bins)
assert indices_np.dtype == indices_mp.dtype
assert indices_np.ndim == indices_mp.ndim
assert indices_np.shape == indices_mp.shape
assert np.array_equal(indices_np, indices_mp)
def test_digitize_bins_decreasing():
x = np.sin(np.linspace(0, 10*np.pi, 1000))
bins = np.linspace(1, -1, 10)
indices_np = np.digitize(x, bins)
indices_mp = mp.digitize(x, bins)
assert indices_np.dtype == indices_mp.dtype
assert indices_np.ndim == indices_mp.ndim
assert indices_np.shape == indices_mp.shape
assert np.array_equal(indices_np, indices_mp)
@pytest.mark.parametrize('right', [False, True])
def test_digitize_bins_increasing_right(right):
x = np.array([1, 1, 2, 2, 3, 4])
bins = np.linspace(1, 4, 4)
indices_np = np.digitize(x, bins, right)
indices_mp = mp.digitize(x, bins, right)
assert indices_np.dtype == indices_mp.dtype
assert indices_np.ndim == indices_mp.ndim
assert indices_np.shape == indices_mp.shape
assert np.array_equal(indices_np, indices_mp)
@pytest.mark.parametrize('right', [False, True])
def test_digitize_bins_decreasing_right(right):
x = np.array([1, 1, 2, 2, 3, 4])
bins = np.linspace(4, 1, 4)
indices_np = np.digitize(x, bins, right)
indices_mp = mp.digitize(x, bins, right)
assert indices_np.dtype == indices_mp.dtype
assert indices_np.ndim == indices_mp.ndim
assert indices_np.shape == indices_mp.shape
assert np.array_equal(indices_np, indices_mp)
def test_get_cache_size_kb():
assert mp.get_cache_size_kb() == 128
@given(st.integers(min_value=0, max_value=2**27))
def test_set_cache_size_kb(size):
mp.set_cache_size_kb(size)
mp.get_cache_size_kb() == size
@given(st.integers(min_value=-(2**32), max_value=-1))
def test_set_cache_size_kb_not_positive(size):
with pytest.raises(ValueError) as e:
mp.set_cache_size_kb(size)
assert e.match('must be non-negative')
@given(st.one_of(st.characters(),
st.complex_numbers(),
st.dates(),
st.datetimes(),
st.dictionaries(st.integers(), st.integers()),
st.floats(),
st.iterables(st.integers()),
st.lists(st.integers()),
st.none(),
st.sets(st.integers()),
st.text(),
st.timedeltas(),
st.times(),
st.tuples(st.integers())))
def test_set_cache_size_kb_not_integer(size):
with pytest.raises(TypeError) as e:
mp.set_cache_size_kb(size)
assert e.match('integer')
|
def internet_reachable():
import socket
import requests
try:
requests.get('http://google.com', timeout=1)
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
socket.timeout,
socket.gaierror):
return False
return True
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.monitoring_v3.services.alert_policy_service.client import AlertPolicyServiceClient
from google.cloud.monitoring_v3.services.alert_policy_service.async_client import AlertPolicyServiceAsyncClient
from google.cloud.monitoring_v3.services.group_service.client import GroupServiceClient
from google.cloud.monitoring_v3.services.group_service.async_client import GroupServiceAsyncClient
from google.cloud.monitoring_v3.services.metric_service.client import MetricServiceClient
from google.cloud.monitoring_v3.services.metric_service.async_client import MetricServiceAsyncClient
from google.cloud.monitoring_v3.services.notification_channel_service.client import NotificationChannelServiceClient
from google.cloud.monitoring_v3.services.notification_channel_service.async_client import NotificationChannelServiceAsyncClient
from google.cloud.monitoring_v3.services.query_service.client import QueryServiceClient
from google.cloud.monitoring_v3.services.query_service.async_client import QueryServiceAsyncClient
from google.cloud.monitoring_v3.services.service_monitoring_service.client import ServiceMonitoringServiceClient
from google.cloud.monitoring_v3.services.service_monitoring_service.async_client import ServiceMonitoringServiceAsyncClient
from google.cloud.monitoring_v3.services.uptime_check_service.client import UptimeCheckServiceClient
from google.cloud.monitoring_v3.services.uptime_check_service.async_client import UptimeCheckServiceAsyncClient
from google.cloud.monitoring_v3.types.alert import AlertPolicy
from google.cloud.monitoring_v3.types.alert_service import CreateAlertPolicyRequest
from google.cloud.monitoring_v3.types.alert_service import DeleteAlertPolicyRequest
from google.cloud.monitoring_v3.types.alert_service import GetAlertPolicyRequest
from google.cloud.monitoring_v3.types.alert_service import ListAlertPoliciesRequest
from google.cloud.monitoring_v3.types.alert_service import ListAlertPoliciesResponse
from google.cloud.monitoring_v3.types.alert_service import UpdateAlertPolicyRequest
from google.cloud.monitoring_v3.types.common import Aggregation
from google.cloud.monitoring_v3.types.common import TimeInterval
from google.cloud.monitoring_v3.types.common import TypedValue
from google.cloud.monitoring_v3.types.common import ComparisonType
from google.cloud.monitoring_v3.types.common import ServiceTier
from google.cloud.monitoring_v3.types.dropped_labels import DroppedLabels
from google.cloud.monitoring_v3.types.group import Group
from google.cloud.monitoring_v3.types.group_service import CreateGroupRequest
from google.cloud.monitoring_v3.types.group_service import DeleteGroupRequest
from google.cloud.monitoring_v3.types.group_service import GetGroupRequest
from google.cloud.monitoring_v3.types.group_service import ListGroupMembersRequest
from google.cloud.monitoring_v3.types.group_service import ListGroupMembersResponse
from google.cloud.monitoring_v3.types.group_service import ListGroupsRequest
from google.cloud.monitoring_v3.types.group_service import ListGroupsResponse
from google.cloud.monitoring_v3.types.group_service import UpdateGroupRequest
from google.cloud.monitoring_v3.types.metric import LabelValue
from google.cloud.monitoring_v3.types.metric import Point
from google.cloud.monitoring_v3.types.metric import QueryError
from google.cloud.monitoring_v3.types.metric import TextLocator
from google.cloud.monitoring_v3.types.metric import TimeSeries
from google.cloud.monitoring_v3.types.metric import TimeSeriesData
from google.cloud.monitoring_v3.types.metric import TimeSeriesDescriptor
from google.cloud.monitoring_v3.types.metric_service import CreateMetricDescriptorRequest
from google.cloud.monitoring_v3.types.metric_service import CreateTimeSeriesError
from google.cloud.monitoring_v3.types.metric_service import CreateTimeSeriesRequest
from google.cloud.monitoring_v3.types.metric_service import CreateTimeSeriesSummary
from google.cloud.monitoring_v3.types.metric_service import DeleteMetricDescriptorRequest
from google.cloud.monitoring_v3.types.metric_service import GetMetricDescriptorRequest
from google.cloud.monitoring_v3.types.metric_service import GetMonitoredResourceDescriptorRequest
from google.cloud.monitoring_v3.types.metric_service import ListMetricDescriptorsRequest
from google.cloud.monitoring_v3.types.metric_service import ListMetricDescriptorsResponse
from google.cloud.monitoring_v3.types.metric_service import ListMonitoredResourceDescriptorsRequest
from google.cloud.monitoring_v3.types.metric_service import ListMonitoredResourceDescriptorsResponse
from google.cloud.monitoring_v3.types.metric_service import ListTimeSeriesRequest
from google.cloud.monitoring_v3.types.metric_service import ListTimeSeriesResponse
from google.cloud.monitoring_v3.types.metric_service import QueryErrorList
from google.cloud.monitoring_v3.types.metric_service import QueryTimeSeriesRequest
from google.cloud.monitoring_v3.types.metric_service import QueryTimeSeriesResponse
from google.cloud.monitoring_v3.types.mutation_record import MutationRecord
from google.cloud.monitoring_v3.types.notification import NotificationChannel
from google.cloud.monitoring_v3.types.notification import NotificationChannelDescriptor
from google.cloud.monitoring_v3.types.notification_service import CreateNotificationChannelRequest
from google.cloud.monitoring_v3.types.notification_service import DeleteNotificationChannelRequest
from google.cloud.monitoring_v3.types.notification_service import GetNotificationChannelDescriptorRequest
from google.cloud.monitoring_v3.types.notification_service import GetNotificationChannelRequest
from google.cloud.monitoring_v3.types.notification_service import GetNotificationChannelVerificationCodeRequest
from google.cloud.monitoring_v3.types.notification_service import GetNotificationChannelVerificationCodeResponse
from google.cloud.monitoring_v3.types.notification_service import ListNotificationChannelDescriptorsRequest
from google.cloud.monitoring_v3.types.notification_service import ListNotificationChannelDescriptorsResponse
from google.cloud.monitoring_v3.types.notification_service import ListNotificationChannelsRequest
from google.cloud.monitoring_v3.types.notification_service import ListNotificationChannelsResponse
from google.cloud.monitoring_v3.types.notification_service import SendNotificationChannelVerificationCodeRequest
from google.cloud.monitoring_v3.types.notification_service import UpdateNotificationChannelRequest
from google.cloud.monitoring_v3.types.notification_service import VerifyNotificationChannelRequest
from google.cloud.monitoring_v3.types.service import BasicSli
from google.cloud.monitoring_v3.types.service import DistributionCut
from google.cloud.monitoring_v3.types.service import Range
from google.cloud.monitoring_v3.types.service import RequestBasedSli
from google.cloud.monitoring_v3.types.service import Service
from google.cloud.monitoring_v3.types.service import ServiceLevelIndicator
from google.cloud.monitoring_v3.types.service import ServiceLevelObjective
from google.cloud.monitoring_v3.types.service import TimeSeriesRatio
from google.cloud.monitoring_v3.types.service import WindowsBasedSli
from google.cloud.monitoring_v3.types.service_service import CreateServiceLevelObjectiveRequest
from google.cloud.monitoring_v3.types.service_service import CreateServiceRequest
from google.cloud.monitoring_v3.types.service_service import DeleteServiceLevelObjectiveRequest
from google.cloud.monitoring_v3.types.service_service import DeleteServiceRequest
from google.cloud.monitoring_v3.types.service_service import GetServiceLevelObjectiveRequest
from google.cloud.monitoring_v3.types.service_service import GetServiceRequest
from google.cloud.monitoring_v3.types.service_service import ListServiceLevelObjectivesRequest
from google.cloud.monitoring_v3.types.service_service import ListServiceLevelObjectivesResponse
from google.cloud.monitoring_v3.types.service_service import ListServicesRequest
from google.cloud.monitoring_v3.types.service_service import ListServicesResponse
from google.cloud.monitoring_v3.types.service_service import UpdateServiceLevelObjectiveRequest
from google.cloud.monitoring_v3.types.service_service import UpdateServiceRequest
from google.cloud.monitoring_v3.types.span_context import SpanContext
from google.cloud.monitoring_v3.types.uptime import InternalChecker
from google.cloud.monitoring_v3.types.uptime import UptimeCheckConfig
from google.cloud.monitoring_v3.types.uptime import UptimeCheckIp
from google.cloud.monitoring_v3.types.uptime import GroupResourceType
from google.cloud.monitoring_v3.types.uptime import UptimeCheckRegion
from google.cloud.monitoring_v3.types.uptime_service import CreateUptimeCheckConfigRequest
from google.cloud.monitoring_v3.types.uptime_service import DeleteUptimeCheckConfigRequest
from google.cloud.monitoring_v3.types.uptime_service import GetUptimeCheckConfigRequest
from google.cloud.monitoring_v3.types.uptime_service import ListUptimeCheckConfigsRequest
from google.cloud.monitoring_v3.types.uptime_service import ListUptimeCheckConfigsResponse
from google.cloud.monitoring_v3.types.uptime_service import ListUptimeCheckIpsRequest
from google.cloud.monitoring_v3.types.uptime_service import ListUptimeCheckIpsResponse
from google.cloud.monitoring_v3.types.uptime_service import UpdateUptimeCheckConfigRequest
__all__ = ('AlertPolicyServiceClient',
'AlertPolicyServiceAsyncClient',
'GroupServiceClient',
'GroupServiceAsyncClient',
'MetricServiceClient',
'MetricServiceAsyncClient',
'NotificationChannelServiceClient',
'NotificationChannelServiceAsyncClient',
'QueryServiceClient',
'QueryServiceAsyncClient',
'ServiceMonitoringServiceClient',
'ServiceMonitoringServiceAsyncClient',
'UptimeCheckServiceClient',
'UptimeCheckServiceAsyncClient',
'AlertPolicy',
'CreateAlertPolicyRequest',
'DeleteAlertPolicyRequest',
'GetAlertPolicyRequest',
'ListAlertPoliciesRequest',
'ListAlertPoliciesResponse',
'UpdateAlertPolicyRequest',
'Aggregation',
'TimeInterval',
'TypedValue',
'ComparisonType',
'ServiceTier',
'DroppedLabels',
'Group',
'CreateGroupRequest',
'DeleteGroupRequest',
'GetGroupRequest',
'ListGroupMembersRequest',
'ListGroupMembersResponse',
'ListGroupsRequest',
'ListGroupsResponse',
'UpdateGroupRequest',
'LabelValue',
'Point',
'QueryError',
'TextLocator',
'TimeSeries',
'TimeSeriesData',
'TimeSeriesDescriptor',
'CreateMetricDescriptorRequest',
'CreateTimeSeriesError',
'CreateTimeSeriesRequest',
'CreateTimeSeriesSummary',
'DeleteMetricDescriptorRequest',
'GetMetricDescriptorRequest',
'GetMonitoredResourceDescriptorRequest',
'ListMetricDescriptorsRequest',
'ListMetricDescriptorsResponse',
'ListMonitoredResourceDescriptorsRequest',
'ListMonitoredResourceDescriptorsResponse',
'ListTimeSeriesRequest',
'ListTimeSeriesResponse',
'QueryErrorList',
'QueryTimeSeriesRequest',
'QueryTimeSeriesResponse',
'MutationRecord',
'NotificationChannel',
'NotificationChannelDescriptor',
'CreateNotificationChannelRequest',
'DeleteNotificationChannelRequest',
'GetNotificationChannelDescriptorRequest',
'GetNotificationChannelRequest',
'GetNotificationChannelVerificationCodeRequest',
'GetNotificationChannelVerificationCodeResponse',
'ListNotificationChannelDescriptorsRequest',
'ListNotificationChannelDescriptorsResponse',
'ListNotificationChannelsRequest',
'ListNotificationChannelsResponse',
'SendNotificationChannelVerificationCodeRequest',
'UpdateNotificationChannelRequest',
'VerifyNotificationChannelRequest',
'BasicSli',
'DistributionCut',
'Range',
'RequestBasedSli',
'Service',
'ServiceLevelIndicator',
'ServiceLevelObjective',
'TimeSeriesRatio',
'WindowsBasedSli',
'CreateServiceLevelObjectiveRequest',
'CreateServiceRequest',
'DeleteServiceLevelObjectiveRequest',
'DeleteServiceRequest',
'GetServiceLevelObjectiveRequest',
'GetServiceRequest',
'ListServiceLevelObjectivesRequest',
'ListServiceLevelObjectivesResponse',
'ListServicesRequest',
'ListServicesResponse',
'UpdateServiceLevelObjectiveRequest',
'UpdateServiceRequest',
'SpanContext',
'InternalChecker',
'UptimeCheckConfig',
'UptimeCheckIp',
'GroupResourceType',
'UptimeCheckRegion',
'CreateUptimeCheckConfigRequest',
'DeleteUptimeCheckConfigRequest',
'GetUptimeCheckConfigRequest',
'ListUptimeCheckConfigsRequest',
'ListUptimeCheckConfigsResponse',
'ListUptimeCheckIpsRequest',
'ListUptimeCheckIpsResponse',
'UpdateUptimeCheckConfigRequest',
)
|
#!/usr/bin/env python3
import cca
class Location:
VOID, PLAYER, R1, R2, R3, R4, R5 = range(7)
def rules():
def rules_r1(rb):
rb.default_check = {'player': Location.R1}
r = rb.add
r(("look",""),"You are in a small room with a bed, a creepy portrait, and an open door.", {'r1_door_open': True})
r(("look",""),"You are in a small room with a bed, a creepy portrait, and a closed door.")
r(("look","door"),"The door is open, revealing a more spacious room beyond.", {'r1_door_open': True})
r(("look","door"),"The door is very sturdy, but appears unlocked.", {'r1_door_open': False})
r(("open","door"),"The door is already open.", {'r1_door_open': True})
r(("open","door"),"The door creeks open ominously.", None, {'r1_door_open': True})
r(("look",("painting","portrait","picture")),"The door creeks open ominously.", {'r1_pic_seen': True})
r(("look",("painting","portrait","picture")),"""
The person portrayed hard to make out.
The painting is either badly aged or actually painted out of focus.
The subject could be a grotesque man or woman or angry lawn gnome.
The only element piercingly clear are black blood shot eyes that stare back at you with malice.
""", None, {'r1_pic_seen': True})
r(("close","door"),"The door appears stuck now, it won't budge.", {'r1_door_open': True})
r(("close","door"),"The door still closed.")
r((("leave",),("go",),("exit",),("use","door")),"""
You find yourself in another windowless room.
In addition to the door you just walked through, there are two more doors, both closed.
One is red, the other is blue. Looking behind you, you see the one you just opened is yellow.
Directly in front of you is another painting, the subject of which looks suspiciously like a slaughtered pig.
""",{'r1_door_open': True}, {'player': Location.R2}),
r((("leave",),("go",),("exit",),("use","door")),"The door still closed.")
def rules_r2(rb):
rb.default_check = {'player': Location.R2}
r = rb.add
r(("look",""),"You are in a room with three doors, yellow, red, and blue. On the remaining wall is a disturbing painting.")
r(("look",("painting","picture")),"""
What initially looked like butchered swine turns out to be a field of blood red poppies on
a hill of dead yellow grass. Still creepy. And vaguely porcine.
""")
r((("go","yellow"),("use","yellow"), ("yellow","")), "You exit the room through the yellow door.", None, {'player': Location.R1})
def rules_default(rb):
rb.default_check = None
rb.add(("die",),"You throw yourself at the ground. Hard. Ouch. The world swirls away into darkness.", None, {'done': True})
rb.add(None, "confused by {verb} {noun}")
rc = cca.RuleCollection()
rules_r1(rc)
rules_r2(rc)
rules_default(rc)
return rc
INIT_STATE = cca.State({
'msg': """You awake on a musty smelling bed in a spartan, windowless, room.
You see a painting on the wall that seems to be staring at you and a closed door.
You feel trapped. You don't know how you got here, but it can't be good.
""",
'debug': False,
'done': False,
'r1_door_open': False,
'r1_pic_seen': False,
'player': Location.R1
})
def run_game(game_io):
cca.play_game(INIT_STATE, rules(), game_io)
def play_game():
run_game(cca.ConsoleIO(78))
def test_game():
io = cca.TestInput(70)
a = io.add
a("look", "look dog", "look door", "open door", "use door", "fire gun", "look", "look painting")
run_game(io)
def main():
# play_game()
test_game()
if __name__== "__main__":
main()
|
import os
import shutil
import numpy as np
import pandas as pd
import yaml
from colorclass import Color
def read_multilabel_dataset(input_file, input_col=2, target_col=(-2, -1),
encoding='utf-8'):
df = pd.read_csv(input_file, sep='\t',
encoding=encoding, dtype='str')
if not (isinstance(target_col, list) or isinstance(target_col, tuple)):
target_col = [target_col]
target_col = [c if isinstance(c, str) else df.columns[c]
for c in target_col]
df = df.dropna(subset=target_col)
texts = np.asarray(df[df.columns[input_col]].values)
labels = np.asarray([np.asarray([lbls.split(" ") for lbls in df[c].values])
for c in target_col])
return texts, labels
def print_success(message):
print(Color("{green}%s{/green}" % message))
def print_bold(message):
print('\x1b[1;37m' + message.strip() + '\x1b[0m')
def normpath(filepath):
filepath = os.path.expandvars(os.path.expanduser(filepath))
filepath = os.path.normpath(filepath)
if not os.path.isabs(filepath):
filepath = os.path.abspath(filepath)
return filepath
def makedirs(dirpath, remove=False):
if remove and os.path.exists(dirpath):
shutil.rmtree(dirpath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def makedirs_file(filepath, remove=False):
folder_path = os.path.dirname(normpath(filepath))
makedirs(folder_path, remove=remove)
def load_yaml(filepath):
with open(normpath(filepath), 'r') as fp:
return yaml.load(fp)
def normlist(values, sep=","):
if isinstance(values, str):
return [v.strip() for v in values.split(sep)]
elif isinstance(values, int) or isinstance(values, float):
return [values]
return list(values)
def flatten(l):
return [i for sl in l for i in sl]
def flatten_list(y):
if isinstance(y, list) or isinstance(y, np.ndarray):
return flatten(y)
return y
def remove_list_indices(l, indices):
return [item for i, item in enumerate(l) if i not in indices]
def remove_list_values(l, vals):
return [item for item in l if item not in vals]
def filter_list(l, indices):
return [l[i] for i in indices]
|
################################################
## main.py
## - Servidor websocket
## Projeto: Arduino | Python | HTML | Websocket
## Criado por Guilherme Rodrigues em 28/03/2020
##
################################################
import asyncio
import json
import websockets
import datetime
# Modulos locais
import com_serial
com_serial.init()
def status_led_event(status_led):
return json.dumps({
"status_led": status_led
})
async def sync_status_led(websocket, path):
print('-> Nova conexão - ', datetime.datetime.now().strftime("%X"))
while True:
status = com_serial.read_status_led()
try:
await websocket.send(status_led_event(status))
except websockets.exceptions.ConnectionClosedError:
print('-> Conexao fechada - ', datetime.datetime.now().strftime("%X"))
break
# Inicia o servidor websocket na porta '6789' com loop eterno na função 'sync_status_led'
start_server = websockets.serve(sync_status_led, "0.0.0.0", 6789)
print("Servidor Em Execução")
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
|
from django.contrib.auth import authenticate
from rest_framework import serializers
from register.models import Registration
from django.core.validators import ValidationError
class RegistrationSerializer(serializers.ModelSerializer):
"""This class handles serializing and
deserializing of Registration objects"""
firstName = serializers.CharField(
required=True,
error_messages={
"error": "Hello, Kindly provide us with your First name"
}
)
lastName = serializers.CharField(
required=True,
error_messages={
"error": "Hello, Kindly provide us with your Last name",
}
)
dateOfBirth = serializers.DateField(
required=True,
error_messages={
"error": "Hello, Kindly provide us with your date of birth",
}
)
phoneNumber = serializers.CharField(
required=True,
error_messages={
"error": "Hello, Kindly provide us with a working Phone Number that we can contact you on",
}
)
email = serializers.EmailField(
required=True,
error_messages={
"error": "Hello, Kindly provide us with a working Email that we can contact you on",
}
)
nextOfKin1Title = serializers.CharField(
required=True,
error_messages={
"error": "Hello, How are you related",
}
)
nextOfKin1Name = serializers.CharField(
required=True,
error_messages={
"error": "Hello, Kindly provide us with the Name of your Next of kin",
}
)
nextOfKin1Number = serializers.CharField(
required=True,
error_messages={
"error": "Hello, Kindly provide us with the Phone number of your next of kin",
}
)
tribe = serializers.CharField(
required=True,
error_messages={
"error": "Hello, Kindly let us know your Tribe",
}
)
homeChurch = serializers.CharField(
required=True,
error_messages={
"error": "Hello, Kindly let us know where you fellowship from",
}
)
class Meta:
model = Registration
fields = ('firstName', 'lastName', 'dateOfBirth', 'phoneNumber', 'email',
'nextOfKin1Title', 'nextOfKin1Name', 'nextOfKin1Number', 'tribe',
'homeChurch')
read_only_fields = ('createdBy', 'publish_status', 'createdAt',
'updatedAt', 'delete_status')
|
from distutils.core import setup
import os
setup(name='pylagrit',
version='1.0.0',
description='Python interface for LaGriT',
author='Dylan R. Harp',
author_email='dharp@lanl.gov',
url='lagrit.lanl.gov',
license='LGPL',
install_requires=[
'pexpect==4.6.0',
'numpy',
],
packages=[
'pylagrit',]
)
|
class State:
# This class simulates a state in the game.
# Variables: pattern
# heuristic
# cost
# father
# Methods: getSuccessors
# swapBlocks
# goalTest
# getHeuristic
# hash
# F
def __init__(self, pattern, cost=0, father=None):
# Each state has a pattern of the current position of blocks,a
# cost for getting to it from the initial position, a heuristic
# that estimates the number of moves to get to the goal from it
# and a father to reference to the state that it came from. The
# father reference is used to get the path to this node from
# the initial position. Pattern is modeled as a list containing
# 9 number 1 to 9 and 9 being the blank block.
self.pattern = pattern
self.heuristic = self.getHeuristic()
self.cost = cost
self.father = father
def __str__(self):
# A state is printed like the game map, in 3 row and 3 columns.
pattern = list(self.pattern)
pattern[pattern.index(9)] = '-'
return "{0} {1} {2}\n{3} {4} {5}\n{6} {7} {8}".format(pattern[0], pattern[1], pattern[2],
pattern[3], pattern[4], pattern[5],
pattern[6], pattern[7], pattern[8])
def __getitem__(self, item):
# Accessing the state's pattern with [] for ease of access.
return self.pattern[item]
def __setitem__(self, key, value):
# Changing the state's pattern by []for ease of access. The
# heuristic value should be updated after changing the pattern.
self.pattern[key] = value
self.heuristic = self.getHeuristic()
def getSuccessors(self):
'''Returns the successors of this state'''
# The position of 9 is computed and successors are built according
# to it. The maximum of four successors will be computed and
# returned.
blankIndex = self.pattern.index(9)
blankRow = blankIndex // 3
blankCol = blankIndex % 3
successors = []
if blankRow > 0:
state = State(list(self.pattern), self.cost + 1, father=self)
state[blankIndex], state[blankIndex - 3] = state[blankIndex - 3], state[blankIndex]
successors.append(state)
if blankRow < 2:
state = State(list(self.pattern), self.cost + 1, father=self)
state[blankIndex], state[blankIndex + 3] = state[blankIndex + 3], state[blankIndex]
successors.append(state)
if blankCol > 0:
state = State(list(self.pattern), self.cost + 1, father=self)
state[blankIndex], state[blankIndex - 1] = state[blankIndex - 1], state[blankIndex]
successors.append(state)
if blankCol < 2:
state = State(list(self.pattern), self.cost + 1, father=self)
state[blankIndex], state[blankIndex + 1] = state[blankIndex + 1], state[blankIndex]
successors.append(state)
return successors
def swapBlocks(self, i, j):
'''Swaps 2 blocks of the currentState.'''
self[i], self[j] = self[j], self[i]
def goalTest(self):
'''Returns True if this is the goal state, otherwise returns
False'''
final = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for i in range(9):
if self[i] != final[i]:
return False
return True
def getHeuristic(self):
'''Returns the heuristic of this state'''
# This part computes the sum of the manhattan distance of each
# block with its current position.
h = 0
for i in range(9):
if self[i] != 9:
rightPlace = self[i] - 1
currentPlace = i
h += abs(rightPlace % 3 - currentPlace % 3) + abs(rightPlace // 3 - currentPlace // 3)
# This part enhances the heuristic by computing linear conflict.
# Linear Conflict Tiles Definition: Two tiles tj and tk are in a
# linear conflict if tj and tk are in the same line, the goal
# positions of tj and tk are both in that line, tj is to the
# right of tk and goal position of tj is to the left of the goal
# position of tk.
for i in range(9):
for j in range(i, 9):
if self[i] != 9 and self[j] != 9:
r1 = self[i] - 1
r2 = self[j] - 1
c1 = i
c2 = j
if c1 // 3 == c2 // 3 == r1 // 3 == r2 // 3 and (
(c1 % 3 > c2 % 3 and r1 % 3 < r2 % 3) or (c1 % 3 < c2 % 3 and r1 % 3 > r2 % 3)):
h += 2
elif c1 % 3 == c2 % 3 == r1 % 3 == r2 % 3 and (
(c1 // 3 > c2 // 3 and r1 // 3 < r2 // 3) or (c1 // 3 < c2 // 3 and r1 // 3 > r2 // 3)):
h += 2
return h
def hash(self):
'''Returns the hash value of this state.'''
# This function returns a hash value for the state. This can be
# used for keeping track of the visited states. Another approach
# is using set with average accessing time of O(1),But since the
# states of 8 puzzle is reasonable enough for memory (363,000)
# we use hashing to ensure the O(1) time.
pattern = self.pattern
fact = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880]
index = 0
for i in range(9):
index += sum(p < pattern[i:][0] for p in pattern[i:]) * fact[len(pattern[i:]) - 1]
return index
def F(self):
'''Returns the F of this state.'''
# The F parameter is used for A* and is the sum of cost and the
# heuristic.
return self.heuristic + self.cost
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class ServerFarmsOperations(object):
"""ServerFarmsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_server_farms(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets collection of App Service Plans in a resource group for a given
subscription.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServerFarmWithRichSkuPaged
<azure.mgmt.web.models.ServerFarmWithRichSkuPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ServerFarmWithRichSkuPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ServerFarmWithRichSkuPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_server_farm(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Gets specified App Service Plan in a resource group.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServerFarmWithRichSku
<azure.mgmt.web.models.ServerFarmWithRichSku>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerFarmWithRichSku', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_server_farm(
self, resource_group_name, name, server_farm_envelope, allow_pending_state=None, custom_headers=None, raw=False, **operation_config):
"""Creates or updates an App Service Plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param server_farm_envelope: Details of App Service Plan
:type server_farm_envelope: :class:`ServerFarmWithRichSku
<azure.mgmt.web.models.ServerFarmWithRichSku>`
:param allow_pending_state: OBSOLETE: If true, allow pending state
for App Service Plan
:type allow_pending_state: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`ServerFarmWithRichSku
<azure.mgmt.web.models.ServerFarmWithRichSku>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if allow_pending_state is not None:
query_parameters['allowPendingState'] = self._serialize.query("allow_pending_state", allow_pending_state, 'bool')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(server_farm_envelope, 'ServerFarmWithRichSku')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerFarmWithRichSku', response)
if response.status_code == 202:
deserialized = self._deserialize('ServerFarmWithRichSku', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete_server_farm(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Deletes a App Service Plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_server_farm_metrics(
self, resource_group_name, name, details=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Queries for App Serice Plan metrics.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param details: If true, metrics are broken down per App Service Plan
instance
:type details: bool
:param filter: Return only usages/metrics specified in the filter.
Filter conforms to odata syntax. Example: $filter=(name.value eq
'Metric1' or name.value eq 'Metric2') and startTime eq
'2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' and
timeGrain eq duration'[Hour|Minute|Day]'.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceMetricPaged
<azure.mgmt.web.models.ResourceMetricPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/metrics'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if details is not None:
query_parameters['details'] = self._serialize.query("details", details, 'bool')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ResourceMetricPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ResourceMetricPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_server_farm_metric_defintions(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""List of metrics that can be queried for an App Service Plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`MetricDefinitionPaged
<azure.mgmt.web.models.MetricDefinitionPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/metricdefinitions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.MetricDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.MetricDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_vnets_for_server_farm(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Gets list of vnets associated with App Service Plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`VnetInfo <azure.mgmt.web.models.VnetInfo>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VnetInfo]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vnet_from_server_farm(
self, resource_group_name, name, vnet_name, custom_headers=None, raw=False, **operation_config):
"""Gets a vnet associated with an App Service Plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param vnet_name: Name of virtual network
:type vnet_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VnetInfo <azure.mgmt.web.models.VnetInfo>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'vnetName': self._serialize.url("vnet_name", vnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VnetInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_routes_for_vnet(
self, resource_group_name, name, vnet_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of all routes associated with a vnet, in an app service
plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param vnet_name: Name of virtual network
:type vnet_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`VnetRoute <azure.mgmt.web.models.VnetRoute>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'vnetName': self._serialize.url("vnet_name", vnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VnetRoute]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_route_for_vnet(
self, resource_group_name, name, vnet_name, route_name, custom_headers=None, raw=False, **operation_config):
"""Gets a specific route associated with a vnet, in an app service plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param vnet_name: Name of virtual network
:type vnet_name: str
:param route_name: Name of the virtual network route
:type route_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`VnetRoute <azure.mgmt.web.models.VnetRoute>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'vnetName': self._serialize.url("vnet_name", vnet_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VnetRoute]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_vnet_route(
self, resource_group_name, name, vnet_name, route_name, route, custom_headers=None, raw=False, **operation_config):
"""Creates a new route or updates an existing route for a vnet in an app
service plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param vnet_name: Name of virtual network
:type vnet_name: str
:param route_name: Name of the virtual network route
:type route_name: str
:param route: The route object
:type route: :class:`VnetRoute <azure.mgmt.web.models.VnetRoute>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VnetRoute <azure.mgmt.web.models.VnetRoute>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'vnetName': self._serialize.url("vnet_name", vnet_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(route, 'VnetRoute')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 400, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VnetRoute', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_vnet_route(
self, resource_group_name, name, vnet_name, route_name, custom_headers=None, raw=False, **operation_config):
"""Deletes an existing route for a vnet in an app service plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param vnet_name: Name of virtual network
:type vnet_name: str
:param route_name: Name of the virtual network route
:type route_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'vnetName': self._serialize.url("vnet_name", vnet_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_vnet_route(
self, resource_group_name, name, vnet_name, route_name, route, custom_headers=None, raw=False, **operation_config):
"""Creates a new route or updates an existing route for a vnet in an app
service plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param vnet_name: Name of virtual network
:type vnet_name: str
:param route_name: Name of the virtual network route
:type route_name: str
:param route: The route object
:type route: :class:`VnetRoute <azure.mgmt.web.models.VnetRoute>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VnetRoute <azure.mgmt.web.models.VnetRoute>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'vnetName': self._serialize.url("vnet_name", vnet_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(route, 'VnetRoute')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 400, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VnetRoute', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_server_farm_vnet_gateway(
self, resource_group_name, name, vnet_name, gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the vnet gateway.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of the App Service Plan
:type name: str
:param vnet_name: Name of the virtual network
:type vnet_name: str
:param gateway_name: Name of the gateway. Only the 'primary' gateway
is supported.
:type gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VnetGateway <azure.mgmt.web.models.VnetGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'vnetName': self._serialize.url("vnet_name", vnet_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VnetGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_server_farm_vnet_gateway(
self, resource_group_name, name, vnet_name, gateway_name, connection_envelope, custom_headers=None, raw=False, **operation_config):
"""Updates the vnet gateway.
:param resource_group_name: The resource group
:type resource_group_name: str
:param name: The name of the App Service Plan
:type name: str
:param vnet_name: The name of the virtual network
:type vnet_name: str
:param gateway_name: The name of the gateway. Only 'primary' is
supported.
:type gateway_name: str
:param connection_envelope: The gateway entity.
:type connection_envelope: :class:`VnetGateway
<azure.mgmt.web.models.VnetGateway>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VnetGateway <azure.mgmt.web.models.VnetGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'vnetName': self._serialize.url("vnet_name", vnet_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(connection_envelope, 'VnetGateway')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VnetGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_server_farm_sites(
self, resource_group_name, name, skip_token=None, filter=None, top=None, custom_headers=None, raw=False, **operation_config):
"""Gets list of Apps associated with an App Service Plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param skip_token: Skip to of web apps in a list. If specified, the
resulting list will contain web apps starting from (including) the
skipToken. Else, the resulting list contains web apps from the start
of the list
:type skip_token: str
:param filter: Supported filter: $filter=state eq running. Returns
only web apps that are currently running
:type filter: str
:param top: List page size. If specified, results are paged.
:type top: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SitePaged <azure.mgmt.web.models.SitePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/sites'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SitePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SitePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def reboot_worker_for_server_farm(
self, resource_group_name, name, worker_name, custom_headers=None, raw=False, **operation_config):
"""Submit a reboot request for a worker machine in the specified server
farm.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of server farm
:type name: str
:param worker_name: Name of worker machine, typically starts with RD
:type worker_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/workers/{workerName}/reboot'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerName': self._serialize.url("worker_name", worker_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def restart_sites_for_server_farm(
self, resource_group_name, name, soft_restart=None, custom_headers=None, raw=False, **operation_config):
"""Restarts web apps in a specified App Service Plan.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of App Service Plan
:type name: str
:param soft_restart: Soft restart applies the configuration settings
and restarts the apps if necessary. Hard restart always restarts and
reprovisions the apps
:type soft_restart: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/restartSites'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if soft_restart is not None:
query_parameters['softRestart'] = self._serialize.query("soft_restart", soft_restart, 'bool')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_server_farm_operation(
self, resource_group_name, name, operation_id, custom_headers=None, raw=False, **operation_config):
"""Gets a server farm operation.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of server farm
:type name: str
:param operation_id: Id of Server farm operation">
:type operation_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServerFarmWithRichSku
<azure.mgmt.web.models.ServerFarmWithRichSku>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/operationresults/{operationId}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerFarmWithRichSku', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
import os
DOWNLOADS = os.environ['HOME'] + '/Descargas'
print(f'Directorio: {DOWNLOADS}')
[print(file) for file in os.listdir(DOWNLOADS) if os.path.isfile(os.path.join(DOWNLOADS,file))]
|
from sprite import Sprite
import numpy
def get_sprites(s):
d = {}
mapping = {'.': 0,
'x': 1}
for name, block in s.items():
name = name.strip()
block = block.strip()
if not block:
continue
lines = block.split('\n')
desc = []
for line in lines:
desc.append(map(mapping.get, line))
d[name] = Sprite(name, patch=desc)
return d
spritedesc = {
"BLIP":
"""
.x.
xxx
.x.
""",
"CROSS":
"""
.x.
.x.
xxx
.x.
.x.
""",
"CATERPILLAR":
"""
.xx.
xxxx
.xx.
xxxx
.xx.
xxxx
.xx.
xxxx
.xx.
xxxx
.xx.
""",
"SPIDER":
"""
..x.....x..
..x.....x..
...x...x...
xx..x.x..xx
..xxxxxxx..
....xxx....
..xxxxxxx..
xx..x.x..xx
...x...x...
..x.....x..
..x.....x..
""",
"MINIBUG":
"""
x.x
.x.
xxx
.x.
x.x
""",
"BALL":
"""
.xx.
xxxx
xxxx
.xx.
""",
"EMPTYBALL":
"""
.xx.
x..x
x..x
.xx.
""",
"ROACH":
"""
.xx....xx.
x..x..x..x
....xx....
..xxxxxx..
.x.xxxx.x.
..xxxxxx..
.x.xxxx.x.
..xxxxxx..
.x..xx..x.
""",
"ANT":
"""
x...x
.x.x.
..x..
.xxx.
x.x.x
.xxx.
x.x.x
.xxx.
x.x.x
""",
"X":
"""
x.x
.x.
x.x
""",
"SUPERX":
"""
xx...xx
xx...xx
..x.x..
...x...
..x.x..
xx...xx
xx...xx
""",
"FLAKE":
"""
.x.x.x.
xx...xx
..x.x..
x..x..x
..x.x..
xx...xx
.x.x.x.
""",
"SQUARE":
"""
xxx
x.x
xxx
""",
"HELIX":
"""
..x....
..x....
..xxxxx
..x.x..
xxxxx..
....x..
....x..
""",
"SIERPINSKI":
"""
...x...
..x.x..
.x.x.x.
x.x.x.x
""",
"DIAMOND":
"""
.x.
x.x
x.x
.x.
""",
"LADYBUG":
"""
.x.x.
..x..
xx.xx
.x.x.
x.x.x
""",
"CENTIPEDE":
"""
.x...x.
x.x.x.x
...x...
.xxxxx.
x..x..x
.xxxxx.
x..x..x
.xxxxx.
x..x..x
.xxxxx.
x..x..x
.xxxxx.
x..x..x
.xxxxx.
x..x..x
.xx.xx.
x.....x
""",
"LINE":
"""
x
x
x
""",
"DIAGONAL":
"""
x..
.x.
..x
""",
"SMALLBALL":
"""
.x.
x.x
.x.
""",
"S":
"""
.xx.
x..x
.x..
..x.
x..x
.xx.
""",
"FATBUG":
"""
x...x
.xxx.
.x.x.
.xxx.
.x.x.
.xxx.
x...x
""",
"C":
"""
xx
x.
xx
""",
"SPIRAL":
"""
xxxxxx
x....x
x.xx.x
x.x..x
x.xxxx
""",
"DRAGONFLY":
"""
..x.
.x..
x.x.
...x
""",
"WALKER":
"""
..x.x.
...xxx
x.xxx.
.xxx.x
xxx...
.x.x..
""",
"CRAWLER":
"""
....x.x.
.....xxx
..x.xxx.
...xxx.x
x.xxx...
.xxx.x..
xxx.....
.x.x....
""",
"ANCHOR":
"""
..x..
.xxx.
..x..
x.x.x
.xxx.
..x..
""",
"SKULL":
"""
.xxx.
x.x.x
x.x.x
.xxx.
..x..
""",
"ASTERISK":
"""
..x..
x.x.x
.xxx.
x.x.x
..x..
""",
"CANDY":
"""
...x.
...xx
..x..
xx...
.x...
""",
"H":
"""
x.x
xxx
x.x
""",
"DUMBBELL":
"""
x..x
xxxx
x..x
""",
"FLYBOX":
"""
x.xxx.x
.xx.xx.
..xxx..
""",
"INVADER2":
"""
...xx...
..xxxx..
.xxxxxx.
xx.xx.xx
xxxxxxxx
.x.xx.x.
x......x
.x....x.
""",
"INVADER2":
"""
...xx...
..xxxx..
.xxxxxx.
xx.xx.xx
xxxxxxxx
..x..x..
.x.xx.x.
x.x..x.x
""",
"INVADER3":
"""
..x.....x..
...x...x...
..xxxxxxx..
.xx.xxx.xx.
xxxxxxxxxxx
x.xxxxxxx.x
x.x.....x.x
...xx.xx...
""",
"UFO":
"""
......xxxx......
...xxxxxxxxxx...
..xxxxxxxxxxxx..
.xx.xx.xx.xx.xx.
xxxxxxxxxxxxxxxx
..xxx..xx..xxx..
...x........x...
""",
"PALMTREE":
"""
.x.x.
x.x.x
.xxx.
x.x.x
..x..
""",
"HOUSE":
"""
..x..
.xxx.
xxxxx
xx.xx
xx.xx
""",
"CAR":
"""
.xxx.
xxxxx
.x.x.
""",
"PUPPYFACE":
"""
.xxx.
x.x.x
.x.x.
..x..
""",
"CHERRIES":
"""
..x......
.x.x.....
.x.x.....
.x..x....
.x...xxx.
.xx..xxxx
xxxx.xxxx
xxxx..xx.
.xx......
""",
"W":
"""
x,,,,.x
x..x..x
.x.x.x.
..xxx..
""",
"FLOWER":
"""
.x.x.
xx.xx
..x..
xx.xx
.x.x.
""",
"HEART":
"""
.x...x.
xxx.xxx
xxxxxxx
.xxxxx.
..xxx..
...x...
""",
"CANADA":
"""
....x....
...xxx...
.x.xxx.x.
xxxxxxxxx
.xxxxxxx.
..xxxxx..
.xxxxxxx.
....x....
""",
"TETRISL":
"""
x.
x.
xx
""",
"TETRISJ":
"""
.x
.x
xx
""",
"TETRISS":
"""
.xx
xx.
""",
"TETRISZ":
"""
xx.
.xx
""",
"TETRIST":
"""
xxx
.x.
""",
"TETRISO":
"""
xx
xx
""",
"TETRISI":
"""
x
x
x
x
""",
"CORNER":
"""
x.
xx
""",
"INFINITE":
"""
.x.x.
x.x.x
x.x.x
.x.x.
""",
"COMB":
"""
.xxxxx.
x.x.x.x
""",
"KEY":
"""
.xxx.
x...x
x...x
.xxx.
..x..
..x..
.xx..
..x..
.xx..
""",
"BOTTLE":
"""
.x.
.x.
xxx
xxx
xxx
xxx
""",
"PI":
"""
xxxxx
.x.x.
.x.x.
""",
"STAIRS":
"""
x..
xx.
xxx
""",
"Y":
"""
x...x
.x.x.
..x..
..x..
""",
"ANGEL":
"""
....x..
.xx.xx.
.xxxxxx
..x.x..
xxxxx..
.xx....
..x....
""",
"GULL":
"""
.xx.xx.
x..x..x
""",
"JOYPAD":
"""
..xxx..
..x.x..
xxxxxxx
x.x.x.x
xxxxxxx
..x.x..
..xxx..
""",
"GLIDER":
"""
.x.
..x
xxx
""",
"CRAB":
"""
.xxxx.
x.xx.x
.xxxx.
x....x
.x..x.
""",
"PENTF":
"""
.xx
xx.
.x.
""",
"PENTF2":
"""
xx.
.xx
.x.
""",
"PENTI":
"""
x
x
x
x
x
""",
"PENTL":
"""
x.
x.
x.
xx
""",
"PENTJ":
"""
.x
.x
.x
xx
""",
"PENTN":
"""
.x
.x
xx
x.
""",
"PENTN2":
"""
x.
x.
xx
.x
""",
"PENTP":
"""
xx
xx
x.
""",
"PENTQ":
"""
xx
xx
.x
""",
"PENTT":
"""
xxx
.x.
.x.
""",
"PENTU":
"""
x.x
xxx
""",
"PENTV":
"""
x..
x..
xxx
""",
"PENTW":
"""
x..
xx.
.xx
""",
"PENTX":
"""
.x.
xxx
.x.
""",
"PENTY":
"""
.x
xx
.x
.x
""",
"PENTY2":
"""
x.
xx
x.
x.
""",
"PENTZ":
"""
xx.
.x.
.xx
""",
"PENTS":
"""
.xx
.x.
xx.
"""
}
sprites_db = get_sprites(spritedesc)
__doc__ ="""
``arcade_universe.sprites`` contains one field, ``sprites_db`` that maps a name to a
:class:`Sprite <spriteland.sprite.Sprite>` object. The supported sprites, along with
their names, are as follows (<space> = 0 and x = 1):
""" #+ "\n".join([" " + line for line in spritedesc.split("\n")])
for name, sprite in sorted(sprites_db.iteritems()):
name = name.lower()
__doc__ += ".. _sprite_%s:\n\n" % name
__doc__ += ".. parsed-literal::\n\n"
__doc__ += "\n".join(" " + line for line in str(sprite).split("\n"))
__doc__ += "\n\n"
# PYRAMID
# x.x.x
# x.x..
# x.xxx
# x....
# xxxxx
#import pprint as pp
#pp.pprint(sprites_db)
#sprites = get_sprites(spritedesc)
#b = sprites['PENTY2']
#print b
# print b.rotate(90)
# print b.rotate(180)
# print b.rotate(270)
# print b.scale(3)
# print b.scale(3, 1)
# print b.hflip()
# print b.vflip()
|
import sass
sass.compile(dirname=('assets/sass', 'assets/css'), output_style='expanded')
|
# -*- coding: utf-8 -*-
"""
@author: Bernd Porr, mail@berndporr.me.uk
Plots the frequency spectrum of the 1st selected channel in
Attys scope
"""
import socket
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import threading
# update rate in ms
updateRate = 1000
# Title for the Channel:
titleChannel = "Frequency spectrum"
# From data Column:
channel = 7
# minimal frequency detectable in Hz
minF = 1000.0 / updateRate
# socket connection to attys_scope
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
listen_addr = ("",65000)
s.bind(listen_addr)
f = s.makefile()
#That's our ringbuffer which accumluates the samples
#It's emptied every time when the plot window below
#does a repaint
ringbuffer = []
# for the thread below
doRun = True
# This reads the data from the socket in an endless loop
# and stores the data in a buffer
def readSocket():
global ringbuffer
while doRun:
# check if data is available
data = f.readline()
values = np.array(data.split(','),dtype=np.float32)
ringbuffer.append(values)
# start reading data from socket
t = threading.Thread(target=readSocket)
t.start()
# now let's plot the data
fig, ax = plt.subplots(1,1)
ax.set_xlim(minF,125)
ax.set_xlabel('')
ax.set_ylabel('Amplitude/V')
ax.set_xlabel('Frequency/Hz')
ax.set_title(titleChannel)
# empty axes
e = np.array([1,2])
line, = ax.semilogx(e,e)
# receives the data from the generator below
def update(data):
global ringbuffer
global channel
global line
global ax, fig
global titlePlot
# axis
spectrum = np.fft.rfft(np.array(ringbuffer)[:,channel])
# absolute value
spectrum = np.abs(spectrum)/len(spectrum)
spectrum[0] = 0
line.set_data(np.linspace(0,125,len(spectrum)), spectrum)
# set new max
ax.set_ylim(0,spectrum.max()*1.2)
ringbuffer = []
# return the line
return line
# start the animation
ani = animation.FuncAnimation(fig, update, interval=1000)
# show it
plt.show()
# stop the thread which reads the data
doRun = False
# wait for it to finish
t.join()
# close the file and socket
f.close()
s.close()
print("finished")
|
from collections import OrderedDict
from egosplit.benchmarks.data_structures.algorithms import EgoSplitAlgorithm
from egosplit.benchmarks.execution.cleanup import CleanUpConfig
from egosplit.external import partitionInfomap, partitionLeiden
from networkit.community import PLPFactory, PLMFactory, LPPotts, LouvainMapEquationFactory
class EgoSplitClusteringAlgorithmsConfig:
@staticmethod
def get(ego_part_algos):
partition_algos = OrderedDict()
if ego_part_algos == 'local' or ego_part_algos == 'global':
partition_algos['PLP'] = [PLPFactory(1, 20)]
partition_algos['PLM'] = [PLMFactory(True, 1.0, 'none randomized')]
partition_algos['Potts'] = [lambda g: LPPotts(g, 0.1, 1, 20).run().getPartition()]
partition_algos['Infomap'] = [lambda g: partitionInfomap(g)]
partition_algos['Surprise'] = [lambda g: partitionLeiden(g, 'surprise')]
partition_algos['Leiden'] = [lambda g: partitionLeiden(g, 'modularity')]
partition_algos['LM-Map'] = [LouvainMapEquationFactory(True)]
new_p_algos = {}
for name, p_algos in partition_algos.items():
if ego_part_algos == 'local':
new_p_algos[name + ' + LM-Map'] = [p_algos[0],
LouvainMapEquationFactory(True)]
if ego_part_algos == 'global':
new_p_algos['PLM + ' + name] = [PLMFactory(True, 1.0, 'none randomized'),
p_algos[0]]
partition_algos = new_p_algos
if ego_part_algos == 'best':
partition_algos['standard'] = []
if ego_part_algos == 'PLM_LM-Map':
partition_algos['PLM + LM-Map'] = [PLMFactory(True, 1.0, 'none randomized'),
LouvainMapEquationFactory(True)]
if ego_part_algos == 'two_best':
partition_algos['Leiden + LM-Map'] = [lambda g: partitionLeiden(g, 'modularity'),
LouvainMapEquationFactory(True)]
partition_algos['LM-Map + LM-Map'] = [LouvainMapEquationFactory(True),
LouvainMapEquationFactory(True)]
if ego_part_algos == 'Leiden/Infomap + Infomap':
partition_algos['Leiden + Infomap'] = [lambda g: partitionLeiden(g, 'modularity'),
lambda g: partitionInfomap(g)]
partition_algos['Infomap + Infomap'] = [lambda g: partitionInfomap(g),
lambda g: partitionInfomap(g)]
if ego_part_algos == 'leiden local':
partition_algos['Leiden + Infomap'] = [lambda g: partitionLeiden(g, 'modularity'),
lambda g: partitionInfomap(g)]
if ego_part_algos == 'fast':
partition_algos['PLP + PLM'] = [PLPFactory(1, 20),
PLMFactory(True, 1.0, 'none')]
if ego_part_algos == 'test':
partition_algos['PLM + LM-Map'] = [PLMFactory(True, 1.0, 'none'), LouvainMapEquationFactory(True)]
return partition_algos
class EgoSplitParameterConfig:
@staticmethod
def get(ego_parameter_config):
ego_parameters = OrderedDict()
simple_config = {
'Extend EgoNet Strategy': 'None',
'connectPersonas': 'Yes',
'limitExtensionByConductance': 'No',
'Cleanup': 'No',
}
edge_scores_config = {
**simple_config,
'SimpleExtension': 'Yes',
'Extend EgoNet Strategy': 'Edges',
'Edges Score Strategy': 'Edges pow 2 div Degree',
}
significance_scores_config = {
**simple_config,
'SimpleExtension': 'No',
'Extend EgoNet Strategy': 'Significance',
'Significance Base Extend': 'None',
'maxSignificance': 0.1,
'sortGroups': 'Significance',
'useSigMemo': 'No',
'minEdgesToGroupSig': 1,
'maxGroupsConsider': 99,
'secondarySigExtRounds': 99,
'signMerge': 'Yes',
'Extend and Partition Iterations': 3,
'onlyCheckSignOfMaxCandidates': 'Yes',
'Check Candidates Factor': 10,
'onlyUpdatedCandidates': 'Yes',
}
if 'best' in ego_parameter_config:
ego_parameters[''] = {} # Default parameters should be the best
if 'CleanupMinOverlap' in ego_parameter_config:
for merge in ['Yes', 'No']:
for overlap in [0.1, 0.25, 0.5]:
name = 'Merge={} Overlap={}'.format(merge, overlap)
ego_parameters[name] = {
'CleanupMerge': merge,
'CleanupMinOverlap': overlap
}
if 'test' in ego_parameter_config:
ego_parameters['NoMerge'] = {
**edge_scores_config,
'Cleanup': 'Yes',
'CleanupMerge': 'No',
}
ego_parameters['Merge'] = {
**edge_scores_config,
'Cleanup': 'Yes',
'CleanupMerge': 'Yes',
}
if 'no-extend' in ego_parameter_config:
ego_parameters['No Extension'] = simple_config
if 'edges' in ego_parameter_config:
ego_parameters['EdgesScore'] = {
**edge_scores_config,
}
if 'steps' in ego_parameter_config:
ego_parameters['Base'] = {
**simple_config,
'connectPersonas': 'No',
}
ego_parameters['EdgesScore'] = {
**edge_scores_config,
'connectPersonas': 'No',
}
ego_parameters['E + Spanning'] = {
**edge_scores_config,
}
ego_parameters['E + S + Cleanup'] = {
**edge_scores_config,
'Cleanup': 'Yes',
}
if 'edges-score' in ego_parameter_config:
for score in ['Edges', 'Edges div Degree', 'Edges pow 2 div Degree', 'Random', 'Significance']:
name = 'Extend: {}'.format(score)
ego_parameters[name] = {
**edge_scores_config,
'Edges Score Strategy': score,
}
if 'edges-significance' in ego_parameter_config:
for score in ['Edges pow 2 div Degree', 'Random', 'Significance']:
name = 'Extend: {}'.format(score)
ego_parameters[name] = {
**edge_scores_config,
'Edges Score Strategy': score,
}
if 'edges-factor' in ego_parameter_config:
for factor in [1, 2, 3, 5, 10, 20]:
name = r'$\alpha = {}$'.format(factor)
ego_parameters[name] = {
**edge_scores_config,
'Maximum Extend Factor': factor,
}
if 'sig-merge' in ego_parameter_config:
for merge in [False, True]:
name = 'Single + Merged Clusters' if merge else 'Single Clusters'
ego_parameters[name] = {
**significance_scores_config,
'signMerge': 'Yes' if merge else 'No',
'onlyCheckSignOfMaxCandidates': 'No',
'secondarySigExtRounds': 0,
'Extend and Partition Iterations': 1,
}
if 'sig-max-candidates' in ego_parameter_config:
for max_factor in [1, 2, 3, 5, 10, 20, 10000]:
name = r'$\gamma = {}$'.format(max_factor)
if max_factor == 10000:
name = 'All candidates'
ego_parameters[name] = {
**significance_scores_config,
'onlyCheckSignOfMaxCandidates': 'Yes',
'Check Candidates Factor': max_factor,
'secondarySigExtRounds': 0,
'Extend and Partition Iterations': 1,
}
if 'sig-ext-iter' in ego_parameter_config:
for iterations in [0, 1, 2, 3, 5, 10, 100]:
name = '{} Iteration{}'.format(iterations, '' if iterations == 1 else 's')
ego_parameters[name] = {
**significance_scores_config,
# 'onlyCheckSignOfMaxCandidates': 'No',
'secondarySigExtRounds': iterations,
'onlyUpdatedCandidates': 'No',
'Extend and Partition Iterations': 1,
}
if 'sig-check-updated' in ego_parameter_config:
for updated in [False, True]:
name = 'Only Improved' if updated else 'All'
ego_parameters[name] = {
**significance_scores_config,
'onlyUpdatedCandidates': 'Yes' if updated else 'No',
# 'onlyCheckSignOfMaxCandidates': 'No',
'Extend and Partition Iterations': 1,
}
if 'sig-cluster-iter' in ego_parameter_config:
for iterations in [1, 2, 3, 5, 8]:
name = '$I_c$ = {}'.format(iterations)
ego_parameters[name] = {
**significance_scores_config,
'Extend and Partition Iterations': iterations,
}
if 'sig-mem' in ego_parameter_config:
for memoize in [True, False]:
name = 'Memoize' if memoize else 'Calculate'
ego_parameters[name] = {
**significance_scores_config,
'useSigMemo': 'Yes' if memoize else 'No',
}
if 'extend' in ego_parameter_config:
ego_parameters['EdgesScore'] = {
**edge_scores_config,
}
ego_parameters['Significance'] = {
**significance_scores_config,
}
if 'connect-persona' in ego_parameter_config:
ego_parameters['EdgesScore | No Connection'] = {
**edge_scores_config,
'connectPersonas': 'No',
}
ego_parameters['EdgesScore | Max Spanning Unweighted'] = {
**edge_scores_config,
'connectPersonas': 'Yes',
'connectPersonasStrat': 'spanning',
'normalizePersonaCut': 'No',
'normalizePersonaWeights': 'unweighted',
}
ego_parameters['EdgesScore | All Density Max Weight 1'] = {
**edge_scores_config,
'connectPersonas': 'Yes',
'connectPersonasStrat': 'all',
'normalizePersonaCut': 'density',
'normalizePersonaWeights': 'max1',
}
ego_parameters['EdgesScore | All Unweighted'] = {
**edge_scores_config,
'connectPersonas': 'Yes',
'connectPersonasStrat': 'all',
'normalizePersonaCut': 'No',
'normalizePersonaWeights': 'unweighted',
}
return ego_parameters
def get_ego_algos(ego_part_algos, ego_parameter_config, clean_up_set, store_ego_nets):
if not ego_part_algos or not ego_parameter_config:
return []
part_algos = EgoSplitClusteringAlgorithmsConfig.get(ego_part_algos)
ego_parameters = EgoSplitParameterConfig.get(ego_parameter_config)
for parameter_set in ego_parameters.values():
parameter_set['storeEgoNet'] = 'Yes' if store_ego_nets else 'No'
clean_ups = CleanUpConfig.get_clean_up_set(clean_up_set)
algos = create_egosplit_algorithms(part_algos, ego_parameters, clean_ups)
return algos
def create_egosplit_algorithms(partition_algos, ego_parameters, clean_ups):
algos = []
i = 0
for part_name in partition_algos:
for para_name, parameters in ego_parameters.items():
name = '{} {}{}'.format('Ego({:03.0f})'.format(i), part_name,
(' | ' + para_name) if para_name else '')
algo = EgoSplitAlgorithm(
name,
parameters,
*partition_algos[part_name]
)
algos.append((algo, clean_ups))
i += 1
return algos
|
from torch import nn
from torch.nn import functional as F
def icnet_resnet18(in_channels, out_channels):
from neural.models.classification.resnet import ResNet, resnet18
backbone = resnet18(in_channels, 1)
ResNet.replace_stride_with_dilation(backbone, output_stride=8)
features = backbone.features
features.add_module('spp', PyramidPoolingModule())
return ICNet(
ICNet_Encoder(in_channels, nn.ModuleList([features[:3], features[3:]])),
ICNet_Head((64, 128, 512), 128),
nn.Conv2d(128, out_channels, 1),
)
def icnet_resnet34(in_channels, out_channels):
from neural.models.classification.resnet import ResNet, resnet34
backbone = resnet34(in_channels, 1)
ResNet.replace_stride_with_dilation(backbone, output_stride=8)
features = backbone.features
features.add_module('spp', PyramidPoolingModule())
return ICNet(
ICNet_Encoder(in_channels, nn.ModuleList([features[:3], features[3:]])),
ICNet_Head((64, 128, 512), 128),
nn.Conv2d(128, out_channels, 1),
)
def icnet_resnet50(in_channels, out_channels):
from neural.models.classification.resnet import ResNet, resnet50
backbone = resnet50(config='imagenet')
ResNet.replace_stride_with_dilation(backbone, output_stride=8)
features = backbone.features
features.add_module('spp', PyramidPoolingModule())
return ICNet(
ICNet_Encoder(in_channels, nn.ModuleList([features[:3], features[3:]])),
ICNet_Head((64, 512, 2048), 128),
nn.Conv2d(128, out_channels, 1),
)
class ICNet(nn.Module):
def __init__(self, encoder, head, classifier):
super().__init__()
self.encoder = encoder
self.head = head
self.classifier = classifier
def forward(self, input):
x = self.encoder(input)
x = self.head(x)
x = self.classifier(x)
return F.interpolate(x, size=input.shape[2:], mode='bilinear', align_corners=True)
class ICNet_Encoder(nn.Module):
def __init__(self, in_channels, backbone):
super().__init__()
self.spatial = nn.Sequential(
ConvBNReLU(in_channels, 32, 3, padding=1, stride=2),
ConvBNReLU(32, 32, 3, padding=1, stride=2),
ConvBNReLU(32, 64, 3, padding=1, stride=2),
)
self.context = backbone
def forward(self, input):
x2 = F.avg_pool2d(input, 2)
x4 = F.avg_pool2d(x2, 2)
return (
self.spatial(input),
self.context[0](x2),
self.context[1](self.context[0](x4)),
)
class ICNet_Head(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
channels1, channels2, channels4 = in_channels
self.cff12 = CascadeFeatureFusion((channels1, out_channels), out_channels)
self.cff24 = CascadeFeatureFusion((channels2, channels4), out_channels)
def forward(self, input):
x1, x2, x4 = input
x = self.cff24(x2, x4)
x = self.cff12(x1, x)
return x
class CascadeFeatureFusion(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.highres = ConvBNReLU(in_channels[0], out_channels, 3, padding=2, dilation=2, include_relu=False)
self.lowres = ConvBNReLU(in_channels[1], out_channels, 3, 1, include_relu=False)
def forward(self, highres, lowres):
lowres = F.interpolate(lowres, size=highres.shape[2:], mode='bilinear', align_corners=True)
lowres = self.lowres(lowres)
highres = self.highres(highres)
return F.relu(lowres + highres)
class PyramidPoolingModule(nn.Module):
def __init__(self, pyramids=[1, 2, 3, 6]):
super().__init__()
self.pyramids = pyramids
def forward(self, input):
features = input
for pyramid in self.pyramids:
x = F.adaptive_avg_pool2d(input, output_size=pyramid)
x = F.interpolate(x, size=input.shape[2:], mode='bilinear', align_corners=True)
features = features + x
return features
def ConvBNReLU(in_channels, out_channels, kernel_size, padding=0, stride=1, dilation=1, include_relu=True):
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding,
stride=stride, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
]
if include_relu:
layers += [nn.ReLU(inplace=True)]
return nn.Sequential(*layers)
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
"""Generic component build functions."""
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import dogpile.cache
from requests.exceptions import ConnectionError
import six
from module_build_service.common import conf, log, models
from module_build_service.common.models import BUILD_STATES
from module_build_service.common.retry import retry
from module_build_service.resolver import GenericResolver
"""
Example workflows - helps to see the difference in implementations
Koji workflow
1) create tag, and build-tag
2) create target out of ^tag and ^build-tag
3) run regen-repo to have initial repodata (happens automatically)
4) build module-build-macros which provides "dist" macro
5) tag module-build-macro into buildroot
6) wait for module-build-macro to be available in buildroot
7) build all components from scmurl
8) (optional) wait for selected builds to be available in buildroot
"""
def create_dogpile_key_generator_func(skip_first_n_args=0):
"""
Creates dogpile key_generator function with additional features:
- when models.ModuleBuild is an argument of method cached by dogpile-cache,
the ModuleBuild.id is used as a key. Therefore it is possible to cache
data per particular module build, while normally, it would be per
ModuleBuild.__str__() output, which contains also batch and other data
which changes during the build of a module.
- it is able to skip first N arguments of a cached method. This is useful
when the db.session is part of cached method call, and the caching should
work no matter what session instance is passed to cached method argument.
"""
def key_generator(namespace, fn):
fname = fn.__name__
def generate_key(*arg, **kwarg):
key_template = fname + "_"
for s in arg[skip_first_n_args:]:
if type(s) == models.ModuleBuild:
key_template += str(s.id)
else:
key_template += str(s) + "_"
return key_template
return generate_key
return key_generator
class GenericBuilder(six.with_metaclass(ABCMeta)):
"""
External Api for builders
Example usage:
config = module_build_service.common.config.Config()
builder = Builder(module="testmodule-1.2-3", backend="koji", config)
builder.buildroot_connect()
builder.build(artifact_name="bash",
source="https://src.stg.fedoraproject.org/rpms/bash"
"?#70fa7516b83768595a4f3280ae890a7ac957e0c7")
...
# E.g. on some other worker ... just resume buildroot that was initially created
builder = Builder(module="testmodule-1.2-3", backend="koji", config)
builder.buildroot_connect()
builder.build(artifact_name="not-bash",
source="https://src.stg.fedoraproject.org/rpms/not-bash"
"?#70fa7516b83768595a4f3280ae890a7ac957e0c7")
# wait until this particular bash is available in the buildroot
builder.buildroot_ready(artifacts=["bash-1.23-el6"])
builder.build(artifact_name="not-not-bash",
source="https://src.stg.fedoraproject.org/rpms/not-not-bash"
"?#70fa7516b83768595a4f3280ae890a7ac957e0c7")
"""
backend = "generic"
backends = {}
# Create region to cache the default_buildroot_groups results.
# We are skipping the caching based on the first two arguments of
# default_buildroot_groups, because they are "self" and db.session
# instance which are different each call we call that method.
default_buildroot_groups_cache = (
dogpile.cache.make_region(function_key_generator=create_dogpile_key_generator_func(2))
.configure("dogpile.cache.memory")
)
@classmethod
def register_backend_class(cls, backend_class):
GenericBuilder.backends[backend_class.backend] = backend_class
@classmethod
def create(cls, db_session, owner, module, backend, config, **extra):
"""
:param db_session: SQLAlchemy session object.
:param owner: a string representing who kicked off the builds
:param module: module_build_service.common.models.ModuleBuild instance.
:param backend: a string representing backend e.g. 'koji'
:param config: instance of module_build_service.common.config.Config
Any additional arguments are optional extras which can be passed along
and are implementation-dependent.
"""
# check if the backend is within allowed backends for the used resolver
resolver = GenericResolver.create(db_session, conf)
if not resolver.is_builder_compatible(backend):
raise ValueError(
"Builder backend '{}' is not compatible with resolver backend '{}'. Check your "
"configuration.".format(backend, resolver.backend)
)
if backend in GenericBuilder.backends:
return GenericBuilder.backends[backend](
db_session=db_session, owner=owner, module=module, config=config, **extra)
else:
raise ValueError("Builder backend='%s' not recognized" % backend)
@classmethod
def create_from_module(cls, db_session, module, config, buildroot_connect=True):
"""
Creates new GenericBuilder instance based on the data from module
and config and connects it to buildroot.
:param db_session: SQLAlchemy database session.
:param module: module_build_service.common.models.ModuleBuild instance.
:param config: module_build_service.common.config.Config instance.
:kwarg buildroot_connect: a boolean that determines if the builder should run
buildroot_connect on instantiation.
"""
components = [c.package for c in module.component_builds]
builder = GenericBuilder.create(
db_session,
module.owner,
module,
config.system,
config,
tag_name=module.koji_tag,
components=components,
)
if buildroot_connect is True:
groups = GenericBuilder.default_buildroot_groups(db_session, module)
builder.buildroot_connect(groups)
return builder
@classmethod
def tag_to_repo(cls, backend, config, tag_name, arch):
"""
:param backend: a string representing the backend e.g. 'koji'.
:param config: instance of module_build_service.common.config.Config
:param tag_name: Tag for which the repository is returned
:param arch: Architecture for which the repository is returned
Returns URL of repository containing the built artifacts for
the tag with particular name and architecture.
"""
if backend in GenericBuilder.backends:
return GenericBuilder.backends[backend].repo_from_tag(config, tag_name, arch)
else:
raise ValueError("Builder backend='%s' not recognized" % backend)
@abstractmethod
def buildroot_connect(self, groups):
"""
This is an idempotent call to create or resume and validate the build
environment. .build() should immediately fail if .buildroot_connect()
wasn't called.
Koji Example: create tag, targets, set build tag inheritance...
"""
raise NotImplementedError()
@abstractmethod
def buildroot_ready(self, artifacts=None):
"""
:param artifacts=None : a list of artifacts supposed to be in the buildroot
(['bash-123-0.el6'])
returns when the buildroot is ready (or contains the specified artifact)
This function is here to ensure that the buildroot (repo) is ready and
contains the listed artifacts if specified.
"""
raise NotImplementedError()
@abstractmethod
def buildroot_add_repos(self, dependencies):
"""
:param dependencies: a list of modules represented as a list of dicts,
like:
[{'name': ..., 'version': ..., 'release': ...}, ...]
Make an additional repository available in the buildroot. This does not
necessarily have to directly install artifacts (e.g. koji), just make
them available.
E.g. the koji implementation of the call uses MBS to get koji_tag
associated with each module dep and adds the tag to $module-build tag
inheritance.
"""
raise NotImplementedError()
@abstractmethod
def buildroot_add_artifacts(self, artifacts, install=False):
"""
:param artifacts: list of artifacts to be available or installed
(install=False) in the buildroot (e.g list of $NEVRAS)
:param install=False: pre-install artifact in the buildroot (otherwise
"just make it available for install")
Example:
koji tag-build $module-build-tag bash-1.234-1.el6
if install:
koji add-group-pkg $module-build-tag build bash
# This forces install of bash into buildroot and srpm-buildroot
koji add-group-pkg $module-build-tag srpm-build bash
"""
raise NotImplementedError()
@abstractmethod
def tag_artifacts(self, artifacts):
"""
:param artifacts: list of artifacts (NVRs) to be tagged
Adds the artifacts to tag associated with this module build.
"""
raise NotImplementedError()
@abstractmethod
def build(self, artifact_name, source):
"""
:param artifact_name : A package name. We can't guess it since macros
in the buildroot could affect it, (e.g. software
collections).
:param source : an SCM URL, clearly identifying the build artifact in a
repository
:return 4-tuple of the form (build task id, state, reason, nvr)
The artifact_name parameter is used in koji add-pkg (and it's actually
the only reason why we need to pass it). We don't really limit source
types. The actual source is usually delivered as an SCM URL from
fedmsg.
Warning: This function must be thread-safe.
Example
.build("bash", "git://someurl/bash#damn") #build from SCM URL
.build("bash", "/path/to/srpm.src.rpm") #build from source RPM
"""
raise NotImplementedError()
@abstractmethod
def cancel_build(self, task_id):
"""
:param task_id: Task ID returned by the build method.
Cancels the build.
"""
raise NotImplementedError()
@abstractmethod
def finalize(self, succeeded=True):
"""
:param succeeded: True if all module builds were successful
:return: None
This method is supposed to be called after all module builds are
finished.
It could be utilized for various purposes such as cleaning or
running additional build-system based operations on top of
finished builds
"""
pass
@classmethod
@abstractmethod
def repo_from_tag(self, config, tag_name, arch):
"""
:param config: instance of module_build_service.common.config.Config
:param tag_name: Tag for which the repository is returned
:param arch: Architecture for which the repository is returned
Returns URL of repository containing the built artifacts for
the tag with particular name and architecture.
"""
raise NotImplementedError()
@classmethod
def clear_cache(cls, module_build):
"""
Clears the per module build default_buildroot_groups cache.
"""
cls.default_buildroot_groups_cache.delete(
"default_buildroot_groups_" + str(module_build.id))
@classmethod
@retry(wait_on=(ConnectionError))
@default_buildroot_groups_cache.cache_on_arguments()
def default_buildroot_groups(cls, db_session, module):
try:
mmd = module.mmd()
resolver = GenericResolver.create(db_session, conf)
# Resolve default buildroot groups using the MBS, but only for
# non-local modules.
groups = resolver.resolve_profiles(mmd, ("buildroot", "srpm-buildroot"))
groups = {"build": groups["buildroot"], "srpm-build": groups["srpm-buildroot"]}
except ValueError:
reason = "Failed to gather buildroot groups from SCM."
log.exception(reason)
module.transition(
db_session, conf,
state=BUILD_STATES["failed"],
state_reason=reason, failure_type="user")
db_session.commit()
raise
return groups
@abstractmethod
def list_tasks_for_components(self, component_builds=None, state="active"):
"""
:param component_builds: list of component builds which we want to check
:param state: limit the check only for tasks in the given state
:return: list of tasks
This method is supposed to list tasks ('active' by default)
for component builds.
"""
raise NotImplementedError()
@classmethod
def get_built_rpms_in_module_build(cls, mmd):
"""
:param Modulemd mmd: Modulemd to get the built RPMs from.
:return: list of NVRs
"""
raise NotImplementedError()
@classmethod
def get_module_build_arches(cls, module):
"""
:param ModuleBuild module: Get the list of architectures associated with
the module build in the build system.
:return: list of architectures
"""
return GenericBuilder.backends[conf.system].get_module_build_arches(module)
@classmethod
def recover_orphaned_artifact(cls, component_build):
"""
Searches for a complete build of an artifact belonging to the module and sets the
component_build in the MBS database to the found build. This usually returns nothing since
these builds should *not* exist.
:param artifact_name: a ComponentBuild object
:return: a list of msgs that MBS needs to process
"""
return []
@classmethod
def get_average_build_time(self, component):
"""
Placeholder function for the builders to report the average time it takes to build the
specified component. If this function is not overridden, then 0.0 is returned.
:param component: a ComponentBuild object
:return: a float of 0.0
"""
return 0.0
@classmethod
def get_build_weights(cls, components):
"""
Returns a dict with component name as a key and float number
representing the overall Koji weight of a component build.
:param list components: List of component names.
:rtype: dict
:return: {component_name: weight_as_float, ...}
"""
return cls.compute_weights_from_build_time(components)
@classmethod
def compute_weights_from_build_time(cls, components, arches=None):
"""
Computes the weights of ComponentBuilds based on average time to build
and list of arches for which the component is going to be built.
This method should be used as a fallback only when KojiModuleBuilder
cannot be used, because the weight this method produces is not 100% accurate.
:param components: List of comopnent names to compute the weight for.
:param arches: List of arches to build for or None. If the value is None,
conf.arches will be used instead.
:rtype: dict
:return: {component_name: weight_as_float, ...}
"""
if not arches:
arches = conf.arches
weights = {}
for component in components:
average_time_to_build = cls.get_average_build_time(component)
# The way how `weight` is computed is based on hardcoded weight values
# in kojid.py.
# The weight computed here is not 100% accurate, because there are
# multiple smaller tasks in koji like waitrepo or createrepo and we
# cannot say if they will be executed as part of this component build.
# The weight computed here is used only to limit the number of builds
# and we generally do not care about waitrepo/createrepo weights in MBS.
# 1.5 is what Koji hardcodes as a default weight for BuildArchTask.
weight = 1.5
if not average_time_to_build:
weights[component] = weight
continue
if average_time_to_build < 0:
log.warning(
"Negative average build duration for component %s: %s",
component, str(average_time_to_build),
)
weights[component] = weight
continue
# Increase the task weight by 0.75 for every hour of build duration.
adj = average_time_to_build / ((60 * 60) / 0.75)
# cap the adjustment at +4.5
weight += min(4.5, adj)
# We build for all arches, so multiply the weight by number of arches.
weight = weight * len(arches)
# 1.5 here is hardcoded Koji weight of single BuildSRPMFromSCMTask
weight += 1.5
weights[component] = weight
return weights
|
from blockfrost import BlockFrostApi, ApiError
from blockfrost.api.cardano.scripts import \
ScriptsResponse, \
ScriptResponse, \
ScriptJsonResponse, \
ScriptCBORResponse, \
ScriptRedeemersResponse, \
ScriptDatumResponse
script_hash = "13a3efd825703a352a8f71f4e2758d08c28c564e8dfcce9f77776ad1"
datum_hash = "db583ad85881a96c73fbb26ab9e24d1120bb38f45385664bb9c797a2ea8d9a2d"
def test_scripts(requests_mock):
api = BlockFrostApi()
mock_data = [
{
"script_hash": script_hash
},
{
"script_hash": "e1457a0c47dfb7a2f6b8fbb059bdceab163c05d34f195b87b9f2b30e"
},
{
"script_hash": "a6e63c0ff05c96943d1cc30bf53112ffff0f34b45986021ca058ec54"
}
]
requests_mock.get(f"{api.url}/scripts", json=mock_data)
mock_object = [ScriptsResponse(**data) for data in mock_data]
assert api.scripts() == mock_object
def test_script(requests_mock):
api = BlockFrostApi()
mock_data = {
"script_hash": "13a3efd825703a352a8f71f4e2758d08c28c564e8dfcce9f77776ad1",
"type": "plutus",
"serialised_size": 3119
}
requests_mock.get(f"{api.url}/scripts/{script_hash}", json=mock_data)
mock_object = ScriptResponse(**mock_data)
assert api.script(script_hash=script_hash) == mock_object
def test_script_json(requests_mock):
api = BlockFrostApi()
mock_data = {
"json": {
"type": "atLeast",
"scripts": [
{
"type": "sig",
"keyHash": "654891a4db2ea44b5263f4079a33efa0358ba90769e3d8f86a4a0f81"
},
{
"type": "sig",
"keyHash": "8685ad48f9bebb8fdb6447abbe140645e0bf743ff98da62e63e2147f"
},
{
"type": "sig",
"keyHash": "cb0f3b3f91693374ff7ce1d473cf6e721c7bab52b0737f04164e5a2d"
}
],
"required": 2
}
}
requests_mock.get(f"{api.url}/scripts/{script_hash}/json", json=mock_data)
mock_object = ScriptJsonResponse(**mock_data)
assert api.script_json(script_hash=script_hash) == mock_object
def test_script_cbor(requests_mock):
api = BlockFrostApi()
mock_data = {
"cbor": "4e4d01000033222220051200120011"
}
requests_mock.get(f"{api.url}/scripts/{script_hash}/cbor", json=mock_data)
mock_object = ScriptCBORResponse(**mock_data)
assert api.script_cbor(script_hash=script_hash) == mock_object
def test_script_redeemers(requests_mock):
api = BlockFrostApi()
mock_data = [
{
"tx_hash": "1a0570af966fb355a7160e4f82d5a80b8681b7955f5d44bec0dce628516157f0",
"tx_index": 0,
"purpose": "spend",
"unit_mem": "1700",
"unit_steps": "476468",
"fee": "172033"
}
]
requests_mock.get(f"{api.url}/scripts/{script_hash}/redeemers", json=mock_data)
mock_object = [ScriptRedeemersResponse(**data) for data in mock_data]
assert api.script_redeemers(script_hash=script_hash) == mock_object
def test_script_redeemers(requests_mock):
api = BlockFrostApi()
mock_data = {
"json_value": {
"int": 42
}
}
requests_mock.get(f"{api.url}/scripts/datum/{datum_hash}", json=mock_data)
mock_object = ScriptDatumResponse(**mock_data)
assert api.script_datum(datum_hash=datum_hash) == mock_object
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
# Power by viekie. 2017-05-19 08:43:35
class Connections(object):
def __init__(self):
self.connections = []
def add_connection(self, connection):
self.connections.append(connection)
def __str__(self):
conn_info = 'connections: %s' % self.connections
return conn_info
|
from pyModbusTCP.client import ModbusClient
from pyModbusTCP import utils
import time
SERVER_HOST = "192.168.208.106"
SERVER_PORT = 1502
UNIT_ID = 71
# Open ModBus connection
try:
c = ModbusClient(host=SERVER_HOST, port=SERVER_PORT, unit_id=UNIT_ID, auto_open=True, auto_close=True)
except ValueError:
print("Error with host: {}, port: {} or unit-ID: {} params".format(SERVER_HOST, SERVER_PORT, UNIT_ID))
# Arrray for the setting values
def read_soc(reg):
# Load the actual state fo charge of the battery
regs = c.read_holding_registers(reg, 2)
# Load content of two registers into a single float value
zregs = utils.word_list_to_long(regs, big_endian=False)
return utils.decode_ieee(*zregs)
soc = read_soc(210)
while (soc > 5):
soc = read_soc(210)
c.write_single_register(1024, 5000)
time.sleep(1)
|
from data_importers.ems_importers import BaseDemocracyCountsCsvImporter
IVC_STATIONCODES = (
"IG36_1",
"IG17_3",
"IG05_1",
"IG26_2",
"IG21_1",
"IG19_3",
"BETHESDA_1",
"STCOLUMBAS_4",
"IG23_1",
"IG23_2",
"IG27_3",
"IG32_1",
"IG30_2",
"IG01_1",
"IG09_1",
"IG30_1",
"IG10_2",
"IG13_1",
"IG12_2",
"IG36_2",
"IG06_1",
"IG29_1",
"IG17_2",
"IG13_2",
"IG03_1",
"IG08_2",
"IG31_2",
"IG02_1",
"IG02_2",
"IG26_1",
"IG11_1",
"IG21_2",
"IG35_1",
"IG33_1",
"IG07_1",
"IG06_3",
"IG27_2",
"IG15_1",
"IG23_3",
"IG33_2",
"IG14_2",
"IG08_1",
"IG19_1",
"IG17_1",
"IG34_1",
"IG24_1",
"IG22_1",
"IG09_2",
"IG28_1",
"IG20_2",
"IG11_2",
"IG16_2",
"IG04_1",
"STCOLUMBAS_3",
"IG03_2",
"IG25_1",
"IG28_2",
"IG12_1",
"IG04_2",
"IG06_2",
"IG30_3",
"IG19_2",
"IG35_2",
"IG20_1",
"IG31_1",
"IG27_1",
"IG14_1",
"IG18_2",
"IG07_2",
"IG10_1",
"IG16_1",
"IG01_2",
"STCOLUMBAS_2",
"STCOLUMBAS_1",
"IG10_3",
"IG22_2",
"IG33_3",
"IG18_1",
)
class Command(BaseDemocracyCountsCsvImporter):
council_id = "IVC"
addresses_name = (
"2021-03-25T10:48:11.214321/Refrew DemocracyClub_PollingDistricts.csv"
)
stations_name = (
"2021-03-25T10:48:11.214321/Refrew DEmocracyClub_PollingStations.csv"
)
elections = ["2021-05-06"]
def address_record_to_dict(self, record):
if record.stationcode not in IVC_STATIONCODES:
return None
return super().address_record_to_dict(record)
def station_record_to_dict(self, record):
if record.stationcode not in IVC_STATIONCODES:
return None
if record.stationcode in [
"IG07_1", # ST JOHN'S CHURCH HALL ST JOHN'S CHURCH HALL
"IG07_2", # ST JOHN'S CHURCH HALL ST JOHN'S CHURCH HALL
]:
record = record._replace(xordinate="")
record = record._replace(yordinate="")
return super().station_record_to_dict(record)
|
from django.contrib import admin
from django.urls import path
from home import views
from django.conf.urls import url
urlpatterns = [
path('', views.home , name="home"),
path('pay', views.payFees , name="pay"),
path('profile', views.profile , name="profile"),
path('result', views.result , name="result"),
path('upload_pic', views.profile_pic_upload , name="Upload"),
path('notifications', views.notifications , name="notifications"),
]
|
"""Tests for ``coffin.template``.
``coffin.template.library``, ``coffin.template.defaultfilters`` and
``coffin.template.defaulttags`` have their own test modules.
"""
def test_template_class():
from coffin.template import Template
from coffin.common import env
# initializing a template directly uses Coffin's Jinja
# environment - we know it does if our tags are available.
t = Template('{% spaceless %}{{ ""|truncatewords }}{% endspaceless %}')
assert t.environment == env
# render can accept a Django context object
from django.template import Context
c = Context()
c.update({'x': '1'}) # update does a push
c.update({'y': '2'})
assert Template('{{x}};{{y}}').render(c) == '1;2'
# [bug] render can handle nested Context objects
c1 = Context(); c2 = Context(); c3 = Context()
c3['foo'] = 'bar'
c2.update(c3)
c1.update(c2)
assert Template('{{foo}}').render(c1) == 'bar'
# There is a "origin" attribute for Django compatibility
assert Template('{{foo}}').origin.name == '<template>'
def test_render_to_string():
# [bug] Test that the values given directly do overwrite does that
# are already exist in the given context_instance. Due to a bug this
# was previously not the case.
from django.template import Context
from coffin.template.loader import render_to_string
assert render_to_string('render-x.html', {'x': 'new'},
context_instance=Context({'x': 'old'})) == 'new'
# [bug] Test that the values from context_instance actually make it
# into the template.
assert render_to_string('render-x.html',
context_instance=Context({'x': 'foo'})) == 'foo'
# [bug] Call without the optional ``context_instance`` argument works
assert render_to_string('render-x.html', {'x': 'foo'}) == 'foo'
# ``dictionary`` argument may be a Context instance
assert render_to_string('render-x.html', Context({'x': 'foo'})) == 'foo'
# [bug] Both ``dictionary`` and ``context_instance`` may be
# Context objects
assert render_to_string('render-x.html', Context({'x': 'foo'}), context_instance=Context()) == 'foo'
|
#! /usr/bin/env python3
import argparse
import pandas as pd
import logging
import sys
import pbio.utils.fastx_utils as fastx_utils
import pbio.misc.logging_utils as logging_utils
import pbio.misc.parallel as parallel
import pbio.misc.pandas_utils as pandas_utils
logger = logging.getLogger(__name__)
default_num_cpus = 1
default_num_groups = 100
default_num_peptides = 0
default_peptide_separator = '\t'
default_peptide_filter_field = 'PEP'
default_peptide_filter_value = 0.1
def get_match_series(o, peptide):
""" This function extracts relevant information from the orf and peptide
string. It returns the results as a pd.Series.
"""
ret = {'peptide': peptide,
'orf_id': o['orf_id']}
return pd.Series(ret)
def find_matching_orfs(peptide, orfs):
""" This function finds all of the ORFs which include the peptide as an
EXACT SUBSTRING.
Args:
peptide (pd.Series): the peptide to search for
orfs (pd.DataFrame): All of the predicted ORFs in which to search
Returns:
pd.DataFrame: containing the peptide, orf_id and orf_sequence
of all matches.
"""
peptide_seq = peptide['Sequence']
mask_matching = orfs['orf_sequence'].str.contains(peptide_seq)
# short-circuit, when possible
if sum(mask_matching) == 0:
return None
matching_orfs = orfs[mask_matching]
ret = matching_orfs.apply(get_match_series, args=(peptide_seq,), axis=1)
return ret
def find_matching_orfs_group(peptides, orfs):
""" A helper function to call find_matching_orfs on a pd.GroupBy of peptides.
"""
ret = parallel.apply_df_simple(peptides, find_matching_orfs, orfs)
#progress_bar=True)
ret = [r for r in ret if r is not None]
if len(ret) == 0:
return None
return pd.concat(ret)
def count_matches(peptide_matches):
""" This function counts the number of matches in the given group. It returns
a series containing the orf_id and number of matches.
"""
num_matches = len(peptide_matches)
peptide_matches_str = ";".join(peptide_matches['peptide'])
orf_id = peptide_matches.iloc[0]['orf_id']
ret = {
'num_matches': num_matches,
'peptide_matches': peptide_matches_str,
'orf_id': orf_id
}
return pd.Series(ret)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script uses the peptides.txt file from MaxQuant to determine "
"which predicted ORFs have some proteomics evidence.\n\nIt contains "
"some hard-coded field names.")
parser.add_argument('predicted_proteins', help="The (fasta, protein) file of "
"predicted ORFs")
parser.add_argument('peptides', help="The peptides.txt file produced by MaxQuant")
parser.add_argument('out', help="The output (csv.gz) file containing the predicted "
"ORFs and their coverage")
parser.add_argument('--num-cpus', help="The number of CPUs to use for searching",
type=int, default=default_num_cpus)
parser.add_argument('--peptide-filter-field', help="The field to use for filtering "
"the peptides from MaxQuant", default=default_peptide_filter_field)
parser.add_argument('--peptide-filter-value', help="All peptides with a value greater "
"than the filter value will be removed", type=float, default=default_peptide_filter_value)
parser.add_argument('--peptide-separator', help="The separator in the --peptide file",
default=default_peptide_separator)
parser.add_argument('-g', '--num-groups', help="The number of groups into which to split "
"the ORFs. More groups means the progress bar is updated more frequently but incurs "
"more overhead because of the parallel calls.", type=int, default=default_num_groups)
parser.add_argument('--num-peptides', help="If n>0, then only the first n peptide "
"sequences will be used to calculate coverage. This is for testing.", type=int,
default=default_num_peptides)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
msg = "[get-orf-peptide-matches]: {}".format(' '.join(sys.argv))
logger.info(msg)
msg = "Reading and filtering peptides"
logger.info(msg)
peptides = pd.read_csv(args.peptides, sep=args.peptide_separator)
mask_filter = peptides[args.peptide_filter_field] < args.peptide_filter_value
peptides = peptides[mask_filter]
peptide_sequences = pd.DataFrame(peptides['Sequence'])
if args.num_peptides > 0:
peptide_sequences = peptide_sequences.head(args.num_peptides)
msg = "Number of filtered peptides: {}".format(len(peptide_sequences))
logger.info(msg)
msg = "Reading predicted ORFs into a data frame"
logger.info(msg)
# TODO: use read iterator
predicted_orfs = fastx_utils.get_read_iterator(args.predicted_proteins)
orf_ids = []
orf_sequences = []
for orf_id, seq in predicted_orfs:
orf_ids.append(orf_id)
orf_sequences.append(seq)
predicted_orfs_df = pd.DataFrame()
predicted_orfs_df['orf_id'] = orf_ids
predicted_orfs_df['orf_sequence'] = orf_sequences
msg = "Searching for matching peptides"
logger.info(msg)
peptide_matches = parallel.apply_parallel_split(peptide_sequences, args.num_cpus,
find_matching_orfs_group, predicted_orfs_df, progress_bar=True, num_groups=args.num_groups)
# filter out the Nones to avoid DataFrame conversion problems
msg = "Joining results back into large data frame"
logger.info(msg)
peptide_matches = [pm for pm in peptide_matches if pm is not None]
peptide_matches = pd.concat(peptide_matches)
# now, we have a data frame of matches (fields: peptide, orf_id)
msg = "Getting peptide coverage of ORFs"
logger.info(msg)
# first, count the matches for each ORF
peptide_matches_groups = peptide_matches.groupby('orf_id')
orf_matches = parallel.apply_parallel_groups(peptide_matches_groups, args.num_cpus,
count_matches, progress_bar=True)
orf_matches = pd.DataFrame(orf_matches)
# then join back on the original list of ORFs to have entries for ORFs
# with no peptide matches
predicted_orf_coverage = pd.merge(predicted_orfs_df, orf_matches, on='orf_id', how="left")
# and patch the holes in the data frame
predicted_orf_coverage = predicted_orf_coverage.fillna(0)
msg = "Writing coverage information to disk"
pandas_utils.write_df(predicted_orf_coverage, args.out, index=False)
if __name__ == '__main__':
main()
|
# [START imports]
import os
import urllib
from datetime import datetime
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
from baseClasses import *
from authModel import *
from authBaseCode import *
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# [END imports]
# [START LoginPage]
class NewUser(BaseHandler):
def get(self):
params = { 'greeting': 'Enter new user details'}
template = JINJA_ENVIRONMENT.get_template('www/register.html')
self.response.write(template.render(params))
def post(self):
user_name = self.request.get('username')
email = self.request.get('email')
name = self.request.get('name')
password = self.request.get('password')
last_name = self.request.get('lastname')
company = self.request.get('company')
unique_properties = ['email_address']
user_data = self.user_model.create_user(user_name,
unique_properties,
email_address=email, name=name, password_raw=password,
last_name=last_name, company=company, verified=False)
greeting = ""
if not user_data[0]: #user_data is a tuple
greeting = ('Unable to create account: User %s already exists!' % (user_name))
else:
user = user_data[1]
user.put()
greeting = ('Success! Please continue to Login')
params = { 'greeting': greeting }
template = JINJA_ENVIRONMENT.get_template('www/register.html')
self.response.write(template.render(params))
|
__author__ = 'Randall'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas as pd
from talibpy import *
def main():
basepath = os.path.dirname(__file__)
filename = os.path.join(basepath, "..", "data", "AAPL_GOOGL_IBM_20140101_20141201.xls")
d = pd.read_excel(filename, sheetname=None)
panel = pd.Panel.from_dict(d)
#print(panel.loc['Open','2014-02-03','AAPL'])
panel = panel.iloc[:,1:,:]
panel.major_axis.name = "Date"
#print(panel)
df_AAPL = panel.loc[:,:,'AAPL']
print(df_AAPL)
SETTINGS.join = False
print(MA(df_AAPL, 3))
if __name__ == '__main__':
main()
|
import unittest
import jsparagus.gen
from jsparagus import parse_pgen, parse_pgen_generated
class ParsePgenTestCase(unittest.TestCase):
def test_self(self):
import os
filename = os.path.join(os.path.dirname(parse_pgen.__file__), "..",
"pgen.pgen")
grammar = parse_pgen.load_grammar(filename)
self.maxDiff = None
pgen_grammar = parse_pgen.pgen_grammar
self.assertEqual(pgen_grammar.nonterminals, grammar.nonterminals)
self.assertEqual(pgen_grammar.variable_terminals,
grammar.variable_terminals)
self.assertEqual(pgen_grammar.goals(), grammar.goals())
with open(parse_pgen_generated.__file__) as f:
pre_generated = f.read()
import io
out = io.StringIO()
jsparagus.gen.generate_parser(out, grammar)
generated_from_file = out.getvalue()
self.maxDiff = None
self.assertEqual(pre_generated, generated_from_file)
if __name__ == '__main__':
unittest.main()
|
from math import ceil, lcm
def earliest_depart(now : int, bus : int) -> int:
"""Get the earliest departure time (after now) for a bus
departing on a fixed schedule."""
return int(ceil(now / bus)) * bus
def part1(now : int, busses : list[str]) -> int:
"""Get the minimum wait for a bus, multiplied by its id."""
best = None
for bus in busses:
if bus == "x": continue
wait = earliest_depart(now, int(bus)) - now
if best is None or wait < best[0]:
best = (wait, bus, wait * int(bus))
print("New best:", best)
return best[2]
def get_sequences(busses : list[str]) -> list[tuple[int, int]]:
"""Get the start and periods of the sequences of valid answers for each bus."""
return [
(int(x), int(x) - i)
for i, x in enumerate(busses)
if x != "x"
]
def get_coincidence(period1:int, start1:int, period2:int, start2:int)->int:
"""Get the first coincidence of two sequences with given period and starts.
period1 is assumed to be larger if of extremely different sizes."""
val = start1
while (val - start2) % period2:
val += period1
return val
def part2(busses : list[str]) -> int:
"""Get the first timestamp for departure of the first bus, such that each
subsequent bus departs one minute later (x progresses the minute by one,
but is not a bus)."""
seqs = get_sequences(busses)
period1, start1 = seqs.pop()
periods = [period1]
while seqs:
period2, start2 = seqs.pop()
print(f"Looking for n*{period1}+{start1} = m*{period2}+{start2}")
start1 = get_coincidence(period1, start1, period2, start2)
periods.append(period2)
period1 = lcm(*periods)
return start1
if __name__ == "__main__":
with open("input13.txt") as f:
now = int(f.readline())
busses = f.readline().strip().split(',')
print(now)
print(busses)
print(f"Earliest departure score: {part1(now, busses)}")
print(f"Earliest time: {part2(busses)}")
|
"""
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
import qy
class Variable(object):
"""
Mutable value.
"""
def __init__(self, type_):
"""
Initialize.
"""
self._location = qy.stack_allocate(type_)
def __lt__(self, other):
"""
XXX.
"""
return self._location.load() < other
def __le__(self, other):
"""
XXX.
"""
return self._location.load() <= other
def __gt__(self, other):
"""
XXX.
"""
return self._location.load() > other
def __ge__(self, other):
"""
XXX.
"""
return self._location.load() >= other
def __eq__(self, other):
"""
XXX.
"""
return self._location.load() == other
def __ne__(self, other):
"""
XXX.
"""
return self._location.load() != other
def __add__(self, other):
"""
XXX.
"""
return self._location.load() + other
def __sub__(self, other):
"""
XXX.
"""
return self._location.load() - other
def __mul__(self, other):
"""
XXX.
"""
return self._location.load() * other
def __div__(self, other):
"""
XXX.
"""
return self._location.load() / other
def __floordiv__(self, other):
"""
XXX.
"""
return self._location.load() // other
def __mod__(self, other):
"""
XXX.
"""
return self._location.load() % other
def __divmod__(self, other):
"""
XXX.
"""
return divmod(self._location.load(), other)
def __pow__(self, other):
"""
XXX.
"""
return self._location.load() ** other
def __and__(self, other):
"""
XXX.
"""
return self._location.load() & other
def __xor__(self, other):
"""
XXX.
"""
return self._location.load() ^ other
def __or__(self, other):
"""
XXX.
"""
return self._location.load() | other
def __lshift__(self, other):
"""
XXX.
"""
return self._location.load() << other
def __rshift__(self, other):
"""
XXX.
"""
return self._location.load() >> other
def __neg__(self):
"""
XXX.
"""
return -self._location.load()
def __pos__(self):
"""
XXX.
"""
return +self._location.load()
def __abs__(self):
"""
XXX.
"""
return abs(self._location.load())
def __invert__(self):
"""
XXX.
"""
return ~self._location.load()
def __radd__(self, other):
"""
Return other + self.
"""
return other | self._location.load()
def __rsub__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __rmul__(self, other):
"""
"""
return other | self._location.load()
def __rdiv__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __rmod__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __rdivmod__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __rpow__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __rlshift__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __rrshift__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __rand__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __rxor__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __ror__(self, other):
"""
XXX.
"""
return other | self._location.load()
def __iadd__(self, other):
"""
XXX.
"""
self.set(self + other)
def __isub__(self, other):
"""
XXX.
"""
self.set(self - other)
def __imul__(self, other):
"""
XXX.
"""
self.set(self * other)
def __idiv__(self, other):
"""
XXX.
"""
self.set(self / other)
def __ifloordiv__(self, other):
"""
XXX.
"""
self.set(self // other)
def __imod__(self, other):
"""
XXX.
"""
self.set(self % other)
def __ipow__(self, other):
"""
XXX.
"""
self.set(self ** other)
def __iand__(self, other):
"""
XXX.
"""
self.set(self & other)
def __ixor__(self, other):
"""
XXX.
"""
self.set(self ^ other)
def __ior__(self, other):
"""
XXX.
"""
self.set(self | other)
def __ilshift__(self, other):
"""
XXX.
"""
self.set(self << other)
def __irshift__(self, other):
"""
XXX.
"""
self.set(self >> other)
def set(self, value):
"""
Change the value of the variable.
"""
qy.value_from_any(value).store(self._location)
return self
@property
def value(self):
"""
The current value.
"""
return self._location.load()
@staticmethod
def set_to(value):
"""
Return a new variable, initialized.
"""
value = qy.value_from_any(value)
return Variable(value.type_).set(value)
|
"""WebUtils Tests"""
|
import json
from django import template
from django.apps import apps
register = template.Library()
quilljs_app = apps.get_app_config('quilljs')
@register.filter()
def quilljs_conf(name):
"""Get a value from the configuration app."""
return getattr(quilljs_app, name)
quilljs_conf.is_safe = True
@register.filter()
def quilljs_conf_json(name):
"""Get a value from the configuration app as JSON."""
return json.dumps(getattr(quilljs_app, name))
quilljs_conf_json.is_safe = True
@register.simple_tag(takes_context=True)
def render_toolbar(context, config):
"""Render the toolbar for the given config."""
quilljs_config = getattr(quilljs_app, config)
t = template.loader.get_template(quilljs_config['toolbar_template'])
return t.render(context.flatten())
@register.simple_tag(takes_context=True)
def render_editor(context, config):
"""Render the editor for the given config."""
quilljs_config = getattr(quilljs_app, config)
t = template.loader.get_template(quilljs_config['editor_template'])
return t.render(context.flatten())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Colour Quality Plotting
=======================
Defines the colour quality plotting objects:
- :func:`colour_rendering_index_bars_plot`
"""
from __future__ import division
import matplotlib.pyplot
import numpy as np
import pylab
from colour.algebra import normalise
from colour.models import XYZ_to_sRGB
from colour.quality import colour_rendering_index
from colour.plotting import (
aspect,
bounding_box,
display,
figure_size)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['colour_rendering_index_bars_plot']
@figure_size((8, 8))
def colour_rendering_index_bars_plot(illuminant, **kwargs):
"""
Plots the *colour rendering index* of given illuminant.
Parameters
----------
illuminant : SpectralPowerDistribution
Illuminant to plot the *colour rendering index*.
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> from colour import ILLUMINANTS_RELATIVE_SPDS
>>> illuminant = ILLUMINANTS_RELATIVE_SPDS.get('F2')
>>> colour_rendering_index_bars_plot(illuminant) # doctest: +SKIP
True
"""
figure, axis = matplotlib.pyplot.subplots()
cri, colour_rendering_indexes, additional_data = \
colour_rendering_index(illuminant, additional_data=True)
colours = ([[1] * 3] + [normalise(XYZ_to_sRGB(x.XYZ / 100))
for x in additional_data[0]])
x, y = tuple(zip(*sorted(colour_rendering_indexes.items(),
key=lambda x: x[0])))
x, y = np.array([0] + list(x)), np.array(
[cri] + list(y))
positive = True if np.sign(min(y)) in (0, 1) else False
width = 0.5
bars = pylab.bar(x, y, color=colours, width=width)
y_ticks_steps = 10
pylab.yticks(range(0 if positive else -100,
100 + y_ticks_steps,
y_ticks_steps))
pylab.xticks(x + width / 2,
['Ra'] + ['R{0}'.format(index) for index in x[1:]])
def label_bars(bars):
"""
Add labels above given bars.
"""
for bar in bars:
y = bar.get_y()
height = bar.get_height()
value = height if np.sign(y) in (0, 1) else -height
axis.text(bar.get_x() + bar.get_width() / 2,
0.025 * height + height + y,
'{0:.1f}'.format(value),
ha='center', va='bottom')
label_bars(bars)
settings = {
'title': 'Colour Rendering Index - {0}'.format(illuminant.name),
'grid': True,
'x_tighten': True,
'y_tighten': True,
'limits': [-width, 14 + width * 2, -10 if positive else -110,
110]}
settings.update(kwargs)
bounding_box(**settings)
aspect(**settings)
return display(**settings)
|
from .load import Load
Default = Load
|
__all__ = ['EvaluationMetric']
class EvaluationMetric(object):
r"""
Base class for all Evaluation Metrics
"""
def __init__(self):
self.arg_map = {}
def set_arg_map(self, value):
r"""Updates the ``arg_map`` for passing a different value to the ``metric_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``metric_ops`` signature is
``metric_ops(self, gen, disc)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``metric.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def preprocess(self, x):
r"""
Subclasses must override this function and provide their own preprocessing
pipeline.
:raises NotImplementedError: If the subclass doesn't override this function.
"""
raise NotImplementedError
def calculate_score(self, x):
r"""
Subclasses must override this function and provide their own score calculation.
:raises NotImplementedError: If the subclass doesn't override this function.
"""
raise NotImplementedError
def metric_ops(self, generator, discriminator, **kwargs):
r"""
Subclasses must override this function and provide their own metric evaluation ops.
:raises NotImplementedError: If the subclass doesn't override this function.
"""
raise NotImplementedError
def __call__(self, x):
return self.calculate_score(self.preprocess(x))
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.db import connection
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
from ..document.models import Document, Annotation, View
from ..task.models import Requirement, Level, UserQuestRelationship
from ..userprofile.models import Team
from ..common.models import Group
from ..analysis.models import Report
from ..task.models import Task
from ..task.ner.models import EntityRecognitionAnnotation
from ..task.re.models import RelationAnnotation
from ..score.models import Point
from . import training_data
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from itertools import chain
import networkx as nx
import datetime
_attrs = dict(id='id', source='source', target='target', key='key')
def node_link_data(G, attrs=_attrs):
multigraph = G.is_multigraph()
id_ = attrs['id']
source = attrs['source']
target = attrs['target']
# Allow 'key' to be omitted from attrs if the graph is not a multigraph.
key = None if not multigraph else attrs['key']
if len(set([source, target, key])) < 3:
raise nx.NetworkXError('Attribute names are not unique.')
data = {}
data['directed'] = G.is_directed()
data['multigraph'] = multigraph
data['graph'] = list(G.graph.items())
data['nodes'] = [dict(chain(G.node[n].items(), [(id_, n)])) for n in G]
data['edges'] = [dict(chain(d.items(), [(source, u), (target, v), ('id', k)])) for u, v, k, d in G.edges_iter(keys=True, data=True)] # N1, N2, IDX, ATTRS
return data
@api_view(['GET'])
def group_network(request, group_pk):
get_object_or_404(Group, pk=group_pk)
from ..analysis.tasks import generate_network
G = generate_network(group_pk, spring_force=8)
d = node_link_data(G)
return Response(d)
@login_required
@api_view(['GET'])
def analysis_group_user(request, group_pk, user_pk=None):
group = get_object_or_404(Group, pk=group_pk)
response = []
reports = group.report_set.filter(report_type=Report.AVERAGE).order_by('-created').all()
user_id = int(user_pk) if user_pk else int(request.user.pk)
for report in reports:
df = report.dataframe
df = df[df['user_id'] == user_id]
if df.shape[0] > 0:
row = df.iloc[0]
response.append({
'created': report.created,
'f-score': row['f-score'],
'pairings': row['pairings']})
return Response(response)
@login_required
@api_view(['GET'])
def analysis_group(request, group_pk):
group = get_object_or_404(Group, pk=group_pk)
weighted = True
response = []
reports = group.report_set.filter(report_type=1).order_by('-created').all()
for report in reports:
df = report.dataframe
if weighted:
df['wf'] = df['pairings'] * df['f-score']
response.append({
'created': report.created,
'f-score': df['wf'].sum() / df['pairings'].sum(),
'pairings': df['pairings'].sum()})
else:
response.append({
'created': report.created,
'f-score': df['f-score'].mean(),
'pairings': df['pairings'].sum()})
return Response(response)
@api_view(['GET'])
def mark2cure_stats(request):
return Response({
'ner_annotations': EntityRecognitionAnnotation.objects.count(),
're_annotations': RelationAnnotation.objects.count(),
})
@login_required
@api_view(['GET'])
def ner_stats(request):
return Response({
'total_score': request.user.profile.score(task='entity_recognition'),
'quests_completed': UserQuestRelationship.objects.filter(user=request.user, completed=True).count(),
'papers_reviewed': View.objects.filter(user=request.user, completed=True, task_type='ner').count(),
'annotations': Annotation.objects.filter(kind='re', view__user=request.user).count()
})
@api_view(['GET'])
def ner_document(request, document_pk):
"""
Return a JSON response with the generic document structure
No annotations of any kind are included
"""
get_object_or_404(Document, pk=document_pk)
response = Document.objects.as_json(document_pks=[document_pk])
return Response(response[0])
@login_required
@api_view(['GET'])
def ner_quest_read(request, quest_pk):
cmd_str = ""
with open('mark2cure/task/ner/commands/get-quest-progression.sql', 'r') as f:
cmd_str = f.read()
cmd_str = cmd_str.format(task_id=quest_pk, user_id=request.user.pk)
c = connection.cursor()
try:
c.execute(cmd_str)
queryset = [dict(zip(['pk', 'quest_completed', 'view_count',
'completed', 'had_opponent',
'disease_pub', 'gene_pub', 'drug_pub'], x)) for x in c.fetchall()]
finally:
c.close()
documents = Document.objects.as_json(document_pks=[x['pk'] for x in queryset],
pubtators=[[x['disease_pub'], x['gene_pub'], x['drug_pub']] for x in queryset])
# Each query does ASC document_pk so we know they're in the same order!
documents = [{**queryset[i], **documents[i]} for i in range(len(queryset))]
doc_quest_completed_bools = [d['quest_completed'] for d in documents]
if not all(x == doc_quest_completed_bools[0] for x in doc_quest_completed_bools):
raise ValueError
for doc in documents:
del doc['disease_pub']
del doc['gene_pub']
del doc['drug_pub']
del doc['quest_completed']
doc['completed'] = True if doc['completed'] else False
res = {
'completed': True if doc_quest_completed_bools[0] else False,
'documents': documents,
}
return Response(res)
@login_required
@api_view(['GET'])
def re_stats(request):
return Response({
'total_score': request.user.profile.score(task='relation'),
'quests_completed': View.objects.filter(user=request.user, completed=True, task_type='re').count(),
'annotations': Annotation.objects.filter(kind='re', view__user=request.user).count()
})
@login_required
@api_view(['GET'])
def talk_comments(request):
cmd_str = ""
with open('mark2cure/talk/commands/get-recent-comments.sql', 'r') as f:
cmd_str = f.read()
c = connection.cursor()
try:
c.execute(cmd_str)
queryset = [dict(zip(['user_id', 'user_name', 'comment',
'submit_date', 'document_pk'], x)) for x in c.fetchall()]
finally:
c.close()
return Response(queryset)
@login_required
@api_view(['GET'])
def talk_document_ner_contributors(request, document_pk):
cmd_str = ""
with open('mark2cure/task/commands/get-contributors.sql', 'r') as f:
cmd_str = f.read()
cmd_str = cmd_str.format(document_pk=document_pk, task_type='ner')
c = connection.cursor()
try:
c.execute(cmd_str)
queryset = [dict(zip(['user_id'], x)) for x in c.fetchall()]
finally:
c.close()
return Response(queryset)
@login_required
@api_view(['GET'])
def talk_document_annotations(request, document_pk, ann_idx):
cmd_str = ""
with open('mark2cure/talk/commands/get-ner-ann-occurances.sql', 'r') as f:
cmd_str = f.read()
cmd_str = cmd_str.format(document_pk=document_pk, type_idx=ann_idx)
c = connection.cursor()
try:
c.execute(cmd_str)
queryset = [dict(zip(['text', 'occurances'], x)) for x in c.fetchall()]
finally:
c.close()
return Response(queryset)
@login_required
@api_view(['GET'])
def talk_documents(request):
cmd_str = ""
with open('mark2cure/talk/commands/get-discussed-documents.sql', 'r') as f:
cmd_str = f.read()
c = connection.cursor()
try:
c.execute(cmd_str)
queryset = [dict(zip(['id', 'title', 'comments'], x)) for x in c.fetchall()]
finally:
c.close()
return Response(queryset)
@api_view(['GET'])
def ner_list_item_contributors(request, group_pk):
group = get_object_or_404(Group, pk=group_pk)
return Response([{'username': i[0], 'count': i[1]} for i in group.contributors()])
@api_view(['GET'])
def ner_list_item_quests(request, group_pk):
from .serializers import QuestSerializer
group = get_object_or_404(Group, pk=group_pk)
# we now allow users to see a group 'home page' for detailed information whether or
# not they are logged in
if request.user.is_authenticated():
queryset = Task.objects.filter(group=group).extra(select={
"current_submissions_count": """
SELECT COUNT(*) AS current_submissions_count
FROM task_userquestrelationship
WHERE (task_userquestrelationship.completed = 1
AND task_userquestrelationship.task_id = task_task.id)""",
"user_completed": """
SELECT COUNT(*) AS user_completed
FROM task_userquestrelationship
WHERE (task_userquestrelationship.completed = 1
AND task_userquestrelationship.user_id = %d
AND task_userquestrelationship.task_id = task_task.id)""" % (request.user.pk,)
}).prefetch_related('documents')
else:
queryset = Task.objects.filter(group=group).extra(select={
"current_submissions_count": """
SELECT COUNT(*) AS current_submissions_count
FROM task_userquestrelationship
WHERE (task_userquestrelationship.completed = 1
AND task_userquestrelationship.task_id = task_task.id)"""
}).prefetch_related('documents')
serializer = QuestSerializer(queryset, many=True, context={'user': request.user})
return Response(serializer.data)
@api_view(['GET'])
def ner_list(request):
from .serializers import NERGroupSerializer
queryset = Group.objects.exclude(stub='training').order_by('-order')
serializer = NERGroupSerializer(queryset, many=True)
return Response(serializer.data)
@api_view(['GET'])
def ner_list_item(request, group_pk):
group = get_object_or_404(Group, pk=group_pk)
cmd_str = ""
with open('mark2cure/common/commands/get-group-info.sql', 'r') as f:
cmd_str = f.read()
cmd_str = cmd_str.format(group_pk=group.id)
c = connection.cursor()
try:
c.execute(cmd_str)
queryset = [dict(zip(['id', 'name', 'stub',
'description', 'quest_count', 'document_count',
'total_contributors', 'start_date', 'end_date',
'complete_percent'], x)) for x in c.fetchall()]
finally:
c.close()
return Response(queryset[0])
@login_required
@api_view(['GET'])
def re_list(request):
""" Returns the available relation tasks for a specific user
Accessed through a JSON API endpoint
"""
from .serializers import DocumentRelationSerializer
cmd_str = ""
with open('mark2cure/api/commands/get-relations.sql', 'r') as f:
cmd_str = f.read()
# Start the DB Connection
c = connection.cursor()
c.execute('SET @user_work_max = {rel_work_size};'.format(rel_work_size=20))
c.execute('SET @k_max = {completions};'.format(completions=settings.ENTITY_RECOGNITION_K))
c.execute('SET @user_id = {user_id};'.format(user_id=request.user.pk))
c.execute('SET @rel_ann_content_type_id = 56;')
c.execute(cmd_str)
queryset = [{'id': x[0],
'document_id': x[1],
'title': x[2],
'total_document_relationships': x[3],
'user_document_relationships': x[4],
'community_answered': x[5],
'community_completed': x[6],
'community_progress': x[7],
'user_completed': x[8],
'user_progress': x[9],
'user_answered': x[10],
'user_view_completed': x[11]} for x in c.fetchall()]
# Close the connection
c.close()
# Prevent documents from being shown that have since been completed
# by the community before the requqest.user could complete everything
for idx, item in enumerate(queryset):
if int(item['user_document_relationships']) <= 0:
document = get_object_or_404(Document, pk=item['id'])
first_section = document.section_set.first()
view = View.objects.filter(task_type='re', section=first_section, user=request.user).last()
# print(' - X:', document, first_section, view)
# (TODO) Why is there no View on these sections?
if view:
Point.objects.create(user=request.user,
amount=settings.RELATION_DOC_POINTS,
content_type=ContentType.objects.get_for_model(view),
object_id=view.id)
view.completed = True
view.save()
del queryset[idx]
serializer = DocumentRelationSerializer(queryset, many=True)
return Response(serializer.data)
def users_with_score(days=30):
today = datetime.datetime.now()
since = today - datetime.timedelta(days=days)
res = Point.objects.raw("""
SELECT ANY_VALUE(`score_point`.`id`) as `id`,
SUM(score_point.amount) as score,
`auth_user`.`username`,
`auth_user`.`id`
FROM `score_point`
LEFT OUTER JOIN `auth_user`
ON `auth_user`.`id` = `score_point`.`user_id`
WHERE ( `score_point`.`created` > '{since}'
AND `score_point`.`created` <= '{today}'
AND `auth_user`.`id` NOT IN ({excluded_users}) )
GROUP BY `auth_user`.`id` ORDER BY score DESC;""".format(
since=since,
today=today,
excluded_users=', '.join('\'' + str(item) + '\'' for item in [5, 160]))
)
return [row for row in res if row.score is not None]
def get_annotated_teams(days=30):
# (TODO) This could be smaller by only being UserProfiles that
# we know are part of a Team
users_queryset = users_with_score(days=days)
teams = Team.objects.all()
for team in teams:
team_user_profile_pks = team.userprofile_set.values_list('pk', flat=True)
team.score = sum(filter(None, [row.score for row in filter(lambda x: x.id in team_user_profile_pks, users_queryset)]))
teams = list(teams)
teams.sort(key=lambda x: x.score, reverse=True)
return teams
@api_view(['GET'])
def leaderboard_users(request, day_window):
from .serializers import LeaderboardSerializer
queryset = users_with_score(days=int(day_window))[:25]
serializer = LeaderboardSerializer(queryset, many=True)
return Response(serializer.data)
@api_view(['GET'])
def leaderboard_teams(request, day_window):
from .serializers import TeamLeaderboardSerializer
queryset = list(get_annotated_teams(days=int(day_window)))[:25]
queryset = [team for team in queryset if team.score is not 0]
serializer = TeamLeaderboardSerializer(queryset, many=True)
return Response(serializer.data)
def get_training_dict(user_pk):
"""Returns Array of Module dictionaries that contain:
Module: {levels: arr, task: str} """
res = []
for task_type in ["ner", "re"]:
cmd_str = ""
with open('mark2cure/training/commands/get-user-requirement-training.sql', 'r') as f:
cmd_str = f.read()
cmd_str = cmd_str.format(user_id=user_pk, task_type=task_type)
c = connection.cursor()
try:
c.execute(cmd_str)
queryset = [dict(zip(['hash', 'name', 'last_created', 'completions'], x)) for x in c.fetchall()]
finally:
c.close()
res.append({
'task': task_type,
'levels': queryset
})
return res
@api_view(['GET'])
def training(request):
"""Returns back an array of Task Types the platform allows.
- For each, provides the steps involved in training and
parameters for monitoring a user's progression through it
"""
if request.user.is_anonymous():
return Response([{"task": "re"}])
res = get_training_dict(request.user.pk)
return Response(res)
@api_view(['GET', 'POST'])
def training_details(request, task_type):
if task_type not in ["re", "ner"]:
return Response({}, status=status.HTTP_404_NOT_FOUND)
if request.method == 'POST':
if task_type == "re":
r = get_object_or_404(Requirement, hash=request.POST.get('requirement', None), active=True, task_type=task_type)
Level.objects.create(user=request.user, requirement=r, created=timezone.now())
return Response({'requirement': 'completed'})
else:
if task_type == "re":
res = training_data["data"]["re"]
elif task_type == "ner":
res = []
return Response(res)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import threading
from postman import rpc
def _target(queue, function, wait_till_full):
try:
while True:
with queue.get(wait_till_full=wait_till_full) as batch:
batch.set_outputs(function(*batch.get_inputs()))
except StopIteration:
return
class Server(rpc.Server):
def bind(self, name, function, batch_size, num_threads=1, wait_till_full=False):
self.threads = getattr(self, "threads", [])
self.queues = getattr(self, "queues", [])
queue = rpc.ComputationQueue(batch_size)
self.bind_queue(name, queue)
self.queues.append(queue)
for i in range(num_threads):
self.threads.append(
threading.Thread(
target=_target,
name="thread-%s-%i" % (name, i),
args=(queue, function, wait_till_full),
)
)
def stop(self):
for queue in getattr(self, "queues", []):
queue.close()
super(Server, self).stop()
def run(self):
super(Server, self).run()
for thread in getattr(self, "threads", []):
thread.start()
def wait(self):
super(Server, self).wait()
for thread in getattr(self, "threads", []):
thread.join()
|
#
# TODO: for the moment, it has to be py3.6 compatible. Please do not use e.g. Final
# time
MINUTE: int = 60 # secs
# string templates
HEADER_STR: str = "{:-^50}\n"
|
# https://leetcode.com/problems/restore-ip-addresses/
#
# algorithms
# Medium (31.43%)
# Total Accepted: 138,937
# Total Submissions: 442,122
# beats 85.52% of python submissions
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
length = len(s)
if length < 4 or length > 12:
return []
res = [[]]
def recursive(idx, path, pre):
path_len = len(path)
if path_len > 4:
return
if path_len == 4:
if idx == length + 1:
res[0] += '.'.join(path),
return
for i in xrange(idx, length + 1):
if self.is_valid(s[pre:i]):
recursive(i + 1, path + [s[pre:i]], i)
recursive(0, [], 0)
return res[0]
def is_valid(self, s):
length = len(s)
if length == 0 or length > 3:
return False
if length > 1 and s[0] == '0':
return False
if length == 3 and int(s) > 255:
return False
return True
|
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__=''
#REPORTLAB_TEST_SCRIPT
import sys, copy, os
from reportlab.platypus import *
_NEW_PARA=os.environ.get('NEW_PARA','0')[0] in ('y','Y','1')
_REDCAP=int(os.environ.get('REDCAP','0'))
_CALLBACK=os.environ.get('CALLBACK','0')[0] in ('y','Y','1')
if _NEW_PARA:
def Paragraph(s,style):
from rlextra.radxml.para import Paragraph as PPPP
return PPPP(s,style)
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
import reportlab.rl_config
reportlab.rl_config.invariant = 1
styles = getSampleStyleSheet()
Title = "The Odyssey"
Author = "Homer"
def myTitlePage(canvas, doc):
canvas.saveState()
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
def go():
def myCanvasMaker(fn,**kw):
from reportlab.pdfgen.canvas import Canvas
canv = Canvas(fn,**kw)
# attach our callback to the canvas
canv.myOnDrawCB = myOnDrawCB
return canv
doc = BaseDocTemplate('dodyssey.pdf',showBoundary=0)
#normal frame as for SimpleFlowDocument
frameT = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height, id='normal')
#Two Columns
frame1 = Frame(doc.leftMargin, doc.bottomMargin, doc.width/2-6, doc.height, id='col1')
frame2 = Frame(doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
doc.addPageTemplates([PageTemplate(id='First',frames=frameT, onPage=myTitlePage),
PageTemplate(id='OneCol',frames=frameT, onPage=myLaterPages),
PageTemplate(id='TwoCol',frames=[frame1,frame2], onPage=myLaterPages),
])
doc.build(Elements,canvasmaker=myCanvasMaker)
Elements = []
ChapterStyle = copy.deepcopy(styles["Heading1"])
ChapterStyle.alignment = TA_CENTER
ChapterStyle.fontsize = 14
InitialStyle = copy.deepcopy(ChapterStyle)
InitialStyle.fontsize = 16
InitialStyle.leading = 20
PreStyle = styles["Code"]
def newPage():
Elements.append(PageBreak())
chNum = 0
def myOnDrawCB(canv,kind,label):
print('myOnDrawCB(%s)'%kind, 'Page number=', canv.getPageNumber(), 'label value=', label)
def chapter(txt, style=ChapterStyle):
global chNum
Elements.append(NextPageTemplate('OneCol'))
newPage()
chNum += 1
if _NEW_PARA or not _CALLBACK:
Elements.append(Paragraph(txt, style))
else:
Elements.append(Paragraph(('foo<onDraw name="myOnDrawCB" label="chap %d"/> '%chNum)+txt, style))
Elements.append(Spacer(0.2*inch, 0.3*inch))
if useTwoCol:
Elements.append(NextPageTemplate('TwoCol'))
def fTitle(txt,style=InitialStyle):
Elements.append(Paragraph(txt, style))
ParaStyle = copy.deepcopy(styles["Normal"])
ParaStyle.spaceBefore = 0.1*inch
if 'right' in sys.argv:
ParaStyle.alignment = TA_RIGHT
elif 'left' in sys.argv:
ParaStyle.alignment = TA_LEFT
elif 'justify' in sys.argv:
ParaStyle.alignment = TA_JUSTIFY
elif 'center' in sys.argv or 'centre' in sys.argv:
ParaStyle.alignment = TA_CENTER
else:
ParaStyle.alignment = TA_JUSTIFY
useTwoCol = 'notwocol' not in sys.argv
def spacer(inches):
Elements.append(Spacer(0.1*inch, inches*inch))
def p(txt, style=ParaStyle):
if _REDCAP:
fs, fe = '<font color="red" size="+2">', '</font>'
n = len(txt)
for i in range(n):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = (txt[:i]+(fs+txt[i]+fe))+txt[i+1:]
break
if _REDCAP>=2 and n>20:
j = i+len(fs)+len(fe)+1+int((n-1)/2)
while not ('a'<=txt[j]<='z' or 'A'<=txt[j]<='Z'): j += 1
txt = (txt[:j]+('<b><i><font size="+2" color="blue">'+txt[j]+'</font></i></b>'))+txt[j+1:]
if _REDCAP==3 and n>20:
n = len(txt)
fs = '<font color="green" size="+1">'
for i in range(n-1,-1,-1):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = txt[:i]+((fs+txt[i]+fe)+txt[i+1:])
break
Elements.append(Paragraph(txt, style))
firstPre = 1
def pre(txt, style=PreStyle):
global firstPre
if firstPre:
Elements.append(NextPageTemplate('OneCol'))
newPage()
firstPre = 0
spacer(0.1)
p = Preformatted(txt, style)
Elements.append(p)
def parseOdyssey(fn):
from time import time
E = []
t0=time()
text = open(fn,'r').read()
i0 = text.index('Book I')
endMarker = 'covenant of peace between the two contending parties.'
i1 = text.index(endMarker)+len(endMarker)
PREAMBLE=list(map(str.strip,text[0:i0].split('\n')))
L=list(map(str.strip,text[i0:i1].split('\n')))
POSTAMBLE=list(map(str.strip,text[i1:].split('\n')))
def ambleText(L):
while L and not L[0]: L.pop(0)
while L:
T=[]
while L and L[0]:
T.append(L.pop(0))
yield T
while L and not L[0]: L.pop(0)
def mainText(L):
while L:
B = L.pop(0)
while not L[0]: L.pop(0)
T=[]
while L and L[0]:
T.append(L.pop(0))
while not L[0]: L.pop(0)
P = []
while L and not (L[0].startswith('Book ') and len(L[0].split())==2):
E=[]
while L and L[0]:
E.append(L.pop(0))
P.append(E)
if L:
while not L[0]: L.pop(0)
yield B,T,P
t1 = time()
print("open(%s,'r').read() took %.4f seconds" %(fn,t1-t0))
E.append([spacer,2])
E.append([fTitle,'<font color="red">%s</font>' % Title, InitialStyle])
E.append([fTitle,'<font size="-4">by</font> <font color="green">%s</font>' % Author, InitialStyle])
for T in ambleText(PREAMBLE):
E.append([p,'\n'.join(T)])
for (B,T,P) in mainText(L):
E.append([chapter,B])
E.append([p,'<font size="+1" color="Blue"><b>%s</b></font>' % '\n'.join(T),ParaStyle])
for x in P:
E.append([p,' '.join(x)])
firstPre = 1
for T in ambleText(POSTAMBLE):
E.append([p,'\n'.join(T)])
t3 = time()
print("Parsing into memory took %.4f seconds" %(t3-t1))
del L
t4 = time()
print("Deleting list of lines took %.4f seconds" %(t4-t3))
for i in range(len(E)):
E[i][0](*E[i][1:])
t5 = time()
print("Moving into platypus took %.4f seconds" %(t5-t4))
del E
t6 = time()
print("Deleting list of actions took %.4f seconds" %(t6-t5))
go()
t7 = time()
print("saving to PDF took %.4f seconds" %(t7-t6))
print("Total run took %.4f seconds"%(t7-t0))
import hashlib
print('file digest: %s' % hashlib.md5(open('dodyssey.pdf','rb').read()).hexdigest())
def run():
for fn in ('odyssey.full.txt','odyssey.txt'):
if os.path.isfile(fn):
parseOdyssey(fn)
break
def doProf(profname,func,*args,**kwd):
import hotshot, hotshot.stats
prof = hotshot.Profile(profname)
prof.runcall(func)
prof.close()
stats = hotshot.stats.load(profname)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
if __name__=='__main__':
if '--prof' in sys.argv:
doProf('dodyssey.prof',run)
else:
run()
|
from flask import render_template, request
from flask import url_for, flash, redirect, abort
from flask_login import current_user, login_required
from flaskblog import db
from flaskblog.model import Post
from flaskblog.posts.forms import PostForm
from flask import Blueprint
posts = Blueprint('posts', __name__)
@posts.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data,
content=form.content.data,
author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created', category='success')
return redirect(url_for('main.home'))
return render_template('create_post.html',
title='New Post', form=form,
legend='New Post')
@posts.route('/post/<int:post_id>')
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html',
title=post.title,
post=post)
@posts.route('/post/<int:post_id>/update', methods=['GET', 'POST'])
@login_required
def post_update(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post is updated', category='success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html',
title='Update Post', form=form,
legend='Update Post')
@posts.route('/post/<int:post_id>/delete', methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post is deleted', category='success')
return redirect(url_for('main.home'))
|
import time
# Version: 1.0.5
import pandas as pd
def timer(fn):
def wrapped(*args, **kwargs):
name = fn.__name__
print(f'Starting {name}...')
start = time.time()
out = fn(*args, **kwargs)
stop = time.time()
print(f'Finished {name} in {stop - start}s')
return out
return wrapped
@timer
def write_matrix():
# transactions = pd.read_parquet('/data/performance-benchmark-data')
transactions = pd.read_parquet('/data/performance-benchmark-data/part-00000-0cf99dad-6d07-4025-a5e9-f425bb9532b9-c000.snappy.parquet')
transactions['sales'] = transactions['price'] * transactions['quantity']
matrix = (
transactions
.groupby('member-id')
.agg({
'sales': ['sum', 'mean'],
'price': ['mean'],
'trx-id': ['count'],
'date': ['nunique'],
'brand-id': ['nunique'],
'style-id': ['nunique'],
})
.reset_index())
matrix.columns = ['-'.join(col).strip() for col in matrix.columns.values]
matrix.to_parquet('pandas-matrix.parquet')
if __name__ == '__main__':
write_matrix()
# 1 Part
# Starting write_matrix...
# Finished write_matrix in 587.2644765377045s
# Finished write_matrix in 3.1726512908935547s
# 12 Parts
# Starting write_matrix...
# Finished write_matrix in 1131.992933511734s
# Finished write_matrix in 41.91314244270325s
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import platform
import shutil
import tempfile
def get_test_data_file_path(filename):
curr_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(curr_dir, "data", filename)
|
# mybarcode.py
from base64 import b64encode
from reportlab.lib import units
from reportlab.graphics import renderPM
from reportlab.graphics.barcode import createBarcodeDrawing
from reportlab.graphics.shapes import Drawing
def get_barcode(value, width, barWidth = 0.05 * units.inch, fontSize = 20, humanReadable = False):
barcode = createBarcodeDrawing('Code128', value = value, barWidth = barWidth, fontSize = fontSize, humanReadable = humanReadable)
drawing_width = width
barcode_scale = drawing_width / barcode.width
drawing_height = 40
drawing = Drawing(drawing_width, drawing_height)
drawing.scale(barcode_scale, barcode_scale)
drawing.add(barcode, name='barcode')
return drawing
def get_image(cod):
barcode = get_barcode(value = cod, width = 600)
data = b64encode(renderPM.drawToString(barcode, fmt = 'PNG'))
return format(data)
from .generarI25 import GenerarImagen
from .base64 import encodestring
import StringIO
def get_image2(cod):
barcode = GenerarImagen(codigo=cod)
output = StringIO.StringIO()
barcode.save(output,format="PNG")
data = encodestring(output.getvalue())
return format(data)
|
# Validate event sourcing pattern: create new orders to the order microservice end points
# validate the order is added and a order created event was added
# It assumes the event broker (kafka) and all the solution services are running locally (by default)
# If these tests have to run against remote deployed solution the following environment variables are used:
# KAFKA_BROKERS, ORDER_CMD_MS (URL end point for the Command microservice, ORDER_QUERY_MS' for the query ms)
import unittest
import os
import json
import requests
import time
try:
KAFKA_BROKERS = os.environ['KAFKA_BROKERS']
except KeyError:
print("The KAFKA_BROKERS environment variable needs to be set.")
exit
try:
ORDER_CMD_MS = os.environ['ORDER_CMD_MS']
except KeyError:
ORDER_CMD_MS = "ordercmd:9080"
try:
ORDER_QUERY_MS = os.environ['ORDER_QUERY_MS']
except:
ORDER_QUERY_MS = "orderquery:9080"
# listen to orders topic, verify orderCreated event was published
from confluent_kafka import Consumer, KafkaError, Producer
# See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
orderConsumer = Consumer({
'bootstrap.servers': KAFKA_BROKERS,
'group.id': 'python-orders-consumer',
'auto.offset.reset': 'earliest',
'enable.auto.commit': True
})
orderConsumer.subscribe(['orders'])
def pollNextOrder(orderID):
gotIt = False
order = {}
while not gotIt:
msg = orderConsumer.poll(timeout=10.0)
if msg is None:
print("no message")
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
print('@@@ pollNextOrder {} partition: [{}] at offset {} with key {}:\n'
.format(msg.topic(), msg.partition(), msg.offset(), str(msg.key())))
orderStr = msg.value().decode('utf-8')
print('@@@ pollNextOrder Received message: {}'.format(orderStr))
orderEvent = json.loads(orderStr)
if (orderEvent['payload']['orderID'] == orderID):
print('@@@@ got the matching order ')
gotIt = True
return orderEvent
def getAllOrderedOrderEvents(orderID):
print("Get all event mathing the given orderID")
orderReloader = Consumer({
'bootstrap.servers': KAFKA_BROKERS,
'group.id': 'python-orders-reload',
'auto.offset.reset': 'earliest',
'enable.auto.commit': False
})
orderReloader.subscribe(['orders'])
orderEvents = []
gotAll = False
while not gotAll:
msg = orderReloader.poll(timeout=30)
if msg is None:
print('Timed out... assume we have all')
gotAll = True
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
gotAll = True
continue
eventAsString = msg.value().decode('utf-8')
orderEvent = json.loads(eventAsString)
if (orderEvent['payload']['orderID'] == orderID):
orderEvents.append(orderEvent)
orderReloader.close()
return orderEvents
def delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
def postContainerAllocated(orderID):
orderProducer = Producer({'bootstrap.servers': KAFKA_BROKERS})
data = {"timestamp": int(time.time()),"type":"ContainerAllocated","version":"1","payload": {"containerID": "c10","orderID":orderID}}
dataStr = json.dumps(data)
orderProducer.produce('orders',dataStr.encode('utf-8'), callback=delivery_report)
orderProducer.flush()
def getOrderQuery(orderID):
res = requests.get("http://" + ORDER_QUERY_MS + "/orders/" + orderID)
print(res.text)
return json.loads(res.text)
'''
Test the happy path for the state diagram as in
https://ibm-cloud-architecture.github.io/refarch-kc/design/readme/#shipment-order-lifecycle-and-state-change-events
'''
class TestEventSourcingHappyPath(unittest.TestCase):
def test_createOrder(self):
# 1- load the order request from json
f = open('../data/FreshProductOrder.json','r')
order = json.load(f)
f.close()
# 2- create order by doing a POST on /api/orders of the orders command service
res = requests.post("http://" + ORDER_CMD_MS + "/orders",json=order)
orderID=json.loads(res.text)['orderID']
self.assertIsNotNone(orderID)
print('@@@@ Post new order -> resp with ID:' + orderID)
# 3- get OrderCreated Event
print('@@@@ wait for OrderCreated event with ID:' + orderID)
orderEvent = pollNextOrder(orderID)
self.assertEqual(orderEvent['type'], "OrderCreated")
# 4- get next order event, should be assigned to a voyage
print('@@@@ wait for OrderAssigned event from the voyage service for ' + orderID)
orderEvent = pollNextOrder(orderID)
self.assertEqual(orderEvent['type'], "OrderAssigned")
voyage=orderEvent['payload']
self.assertIsNotNone(voyage)
self.assertIsNotNone(voyage['voyageID'])
# 4.2- Verify voyageId is in the query model
time.sleep(10)
orderQuery = getOrderQuery(orderID)
voyageID=orderQuery['voyageID']
self.assertIsNotNone(voyageID)
# 5- Simulate assignment of the container
print('@@@@ post container allocation to mockup missing container ms for ' + orderID)
postContainerAllocated(orderID)
time.sleep(10)
orderQuery = getOrderQuery(orderID)
containerID=orderQuery['containerID']
self.assertIsNotNone(containerID)
# 6- list all events
orderEvents = getAllOrderedOrderEvents(orderID)
for oe in orderEvents:
print(oe)
if __name__ == '__main__':
unittest.main()
orderConsumer.close()
|
from setuptools import setup
import os
VERSION = "0.1"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="discogsdata",
description="CLI for exploring/exploiting a DB populated from Discogs Data",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Brian M. Dennis",
url="https://github.com/crossjam/discogsdata",
project_urls={
"Issues": "https://github.com/crossjam/discogsdata/issues",
"CI": "https://github.com/crossjam/discogsdata/actions",
"Changelog": "https://github.com/crossjam/discogsdata/releases",
},
license="Apache License, Version 2.0",
version=VERSION,
packages=["discogsdata"],
entry_points="""
[console_scripts]
discogsdata=discogsdata.cli:cli
""",
install_requires=["click", "psycopg2"],
extras_require={"test": ["pytest", "psycopg2"]},
tests_require=["discogsdata[test]", "psycopg2"],
python_requires=">=3.6",
)
|
from django.contrib import admin
from django.urls import include, path, re_path
from rest_framework import routers
from api import views
from . import view
from django.conf import settings
router = routers.DefaultRouter()
router.register(r'movies', views.MovieViewSet)
#router.register(r'groups', views.GroupViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
re_path('^admin/',admin.site.urls),
path('', include(router.urls)),
#path('',view.index,name="index"),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
admin.site.site_header = settings.SITE_NAME + " Admin"
admin.site.site_title = settings.SITE_NAME + " Admin"
admin.site.index_title = "Welcome to " + settings.SITE_NAME
|
import os
import json
import time
import shutil
import logging
import zipfile
import platform
import tempfile
import subprocess
from pathlib import Path
from threading import Thread
import requests
from ...exceptions import BentoMLException
logger = logging.getLogger(__name__)
def get_command() -> str:
"""
ngrok command based on OS
"""
system = platform.system()
if system == "Darwin":
command = "ngrok"
elif system == "Windows":
command = "ngrok.exe"
elif system == "Linux":
command = "ngrok"
else:
raise BentoMLException(f"{system} is not supported")
return command
def log_url() -> None:
localhost_url = "http://localhost:4040/api/tunnels" # Url with tunnel details
while True:
time.sleep(1)
response = requests.get(localhost_url)
if response.status_code == 200:
data = json.loads(response.text)
if data["tunnels"]:
tunnel = data["tunnels"][0]
logger.info(
" Ngrok running at: %s",
tunnel["public_url"].replace("https://", "http://"),
)
logger.info(" Traffic stats available on http://127.0.0.1:4040")
return
else:
logger.info("Waiting for ngrok to start...")
def start_ngrok(port: int):
"""
Start ngrok server synchronously
"""
command = get_command()
ngrok_path = str(Path(tempfile.gettempdir(), "ngrok"))
download_ngrok(ngrok_path)
executable = str(Path(ngrok_path, command))
os.chmod(executable, 0o777)
Thread(target=log_url).start()
with subprocess.Popen([executable, "http", str(port)]) as ngrok_process:
ngrok_process.wait()
def download_ngrok(ngrok_path: str) -> None:
"""
Check OS and decide on ngrok download URL
"""
if Path(ngrok_path).exists():
return
system = platform.system()
if system == "Darwin":
url = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-amd64.zip"
elif system == "Windows":
url = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-windows-amd64.zip"
elif system == "Linux":
url = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip"
else:
raise Exception(f"{system} is not supported")
download_path = download_file(url)
with zipfile.ZipFile(download_path, "r") as zip_ref:
zip_ref.extractall(ngrok_path)
def download_file(url: str) -> str:
"""
Download ngrok binary file to local
Args:
url (:code:`str`):
URL to download
Returns:
:code:`download_path`: str
"""
local_filename = url.split("/")[-1]
r = requests.get(url, stream=True)
download_path = str(Path(tempfile.gettempdir(), local_filename))
with open(download_path, "wb") as f:
shutil.copyfileobj(r.raw, f)
return download_path
|
from gen_drivfunc import *
import basic_func as bf
import random
from mpmath import *
import itertools
from scipy.optimize import basinhopping
import time
import signal
class TimeoutError (RuntimeError):
pass
def handler (signum, frame):
raise TimeoutError()
signal.signal(signal.SIGALRM, handler)
def extract_noempty_bound(bound):
mp.dps = 30
# bf.fpartition(bound)
ret_lst = []
if bound[0]==0:
ret_lst.append(bf.f64min)
else:
ret_lst.append(bound[0] + bf.getulp(bound[0]))
if bound[1] == 0:
ret_lst.append(bf.f64min)
else:
ret_lst.append(bound[1] - bf.getulp(bound[1]))
fpart_bound = bf.fdistribution_partition(bound[0],bound[1])
a = int(max(len(fpart_bound)/10.0,1))
if a == 0:
a = 1
for i in range(0,len(fpart_bound),a):
ret_lst.append(random.uniform(fpart_bound[i][0],fpart_bound[i][1]))
# step = [random.uniform(0,1) for i in range(0,10)]
# mpf1 = mpf(bound[1])
# mpf0 = mpf(bound[0])
# distance = mpf1-mpf0
# for i in step:
# ret_lst.append(float(mpf0+distance*i))
return ret_lst
def get_testing_point(bound):
points_lst = []
bl = len(bound)
for i in bound:
if i == []:
points_lst.append(bf.get_double_random())
else:
points_lst.append(extract_noempty_bound(i))
ret_lst = []
# for i in points_lst:
# print len(i)
for element in itertools.product(*points_lst):
ret_lst.append(list(element))
return ret_lst
def bdary_fun(pf,x):
try:
x = list(x)
except TypeError:
x = [x]
pf_res = pf(*x)
return pf_res
def reduce_lst(ovfps_lst):
temp_lst = []
cont = 0
for i in ovfps_lst:
cont = cont + 1
if i not in ovfps_lst[cont:]:
temp_lst.append(i)
return temp_lst
def FPexp_detect(fun_exe,widx_lst,bound,max_iter,num_exp):
# print bound
points_lst = get_testing_point(bound)
ovfps_lst = []
temp_bound = []
for i in bound:
if i == []:
temp_bound.append([-bf.f64max,bf.f64max])
# temp_bound.append([])
else:
temp_bound.append(i)
glob_fitness_fun = lambda x: bdary_fun(fun_exe, bf.reduce_x(temp_bound, x))
# glob_fitness_fun = lambda x: bdary_fun(fun_exe, x)
# minimizer_kwargs = {"method": "Powell"}
# minimizer_kwargs = {"method": "TNC"}
minimizer_kwargs = {"method": "Nelder-Mead"}
# max_iter = widx_lst[4].value
# print max_iter
# print num_exp
# bar = progressbar.ProgressBar(maxval=max_iter, \
# widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
# bar.start()
st = time.time()
temp_points = []
temp_ovfps_lst = []
for i in points_lst:
res = glob_fitness_fun(i)
widx_lst[3].value = 0
if res != 1e100:
temp_points.append(i)
st_flag = 0
for j in range(0, max_iter):
# print j
# print ovfps_lst
for i in temp_points:
# print i
try:
signal.alarm(5)
temp_i = bf.generate_x(temp_bound, i)
# temp_i = i
res = basinhopping(glob_fitness_fun, temp_i, minimizer_kwargs=minimizer_kwargs, niter_success=1)
if res.fun == 0:
try:
inp = list(res.x)
except TypeError:
inp = [res.x]
# print "inp"
# print res.x
# ovfps_lst.append(bf.reduce_x(temp_bound,res.x))
ovfps_lst.append(bf.reduce_x(temp_bound, inp))
break
except TimeoutError:
break
signal.alarm(60)
widx_lst[2][j] = widx_lst[3].value
widx_lst[1].value = j + 1
widx_lst[3].value = 0
et = time.time() - st
ovfps_lst = reduce_lst(ovfps_lst)
if len(temp_ovfps_lst) == len(ovfps_lst):
st_flag = st_flag + 1
else:
st_flag = 0
if st_flag > 30:
break
if len(ovfps_lst)>num_exp:
break
if et > 600:
break
temp_ovfps_lst = ovfps_lst + []
# bar.finish()
temp_lst = []
cont = 0
for i in ovfps_lst:
cont = cont + 1
if i not in ovfps_lst[cont:]:
temp_lst.append(i)
return temp_lst
def get_random_point(bound):
points_lst = []
bounds_lst = []
for i in bound:
bound_lst = bf.bound_fpDiv(i)
temp_pst = []
for j in bound_lst:
if 0.0 in j:
temp_pst.append(j[0])
temp_pst.append(j[1])
else:
if j[0]>0:
temp_pst.append(j[1])
else:
temp_pst.append(j[0])
points_lst.append(temp_pst)
bounds_lst.append(bound_lst)
ret_lst = []
ret_lst2 = []
for element in itertools.product(*points_lst):
ret_lst.append(list(element))
for element in itertools.product(*bounds_lst):
ret_lst2.append(list(element))
return ret_lst,ret_lst2
# ret_lst,ret_lst2 = get_random_point([[0,10],[0,10]])
# print ret_lst[0]
# print ret_lst2[0]
def issubnormal(res):
if (res!=0)&(fabs(res)<bf.f64min):
return 1
else:
return 0
def pure_fun_test(pf,x):
sf_res = pf(*x)
pf_res = sf_res.val
return pf_res
def random_sample_test(fun_exe,stat_fun,bound,num_exp):
# print bound
points_lst,bounds_lst = get_random_point(bound)
ovfps_lst = []
temp_bound = []
for i in bound:
if i == []:
temp_bound.append([-bf.f64max, bf.f64max])
# temp_bound.append([])
else:
temp_bound.append(i)
# glob_fitness_fun = lambda x: bdary_fun(fun_exe, bf.reduce_x(temp_bound, x))
glob_fitness_fun = lambda x: pure_fun_test(fun_exe, x)
nan_res = []
of_res = []
uf_res = []
temp_dect_lst = []
print len(points_lst)
print len(bounds_lst)
for i,j in zip(points_lst,bounds_lst):
res = glob_fitness_fun(i)
stat = stat_fun()
flag = 0
if isnan(res):
nan_res.append(i)
flag = 1
if isinf(res):
of_res.append(i)
flag = 1
if issubnormal(res):
uf_res.append(i)
flag = 1
if (flag == 1)&(stat == 0):
flag = 1
else:
flag = 0
if flag == 1:
temp_dect_lst.append([j,nan_res, of_res, uf_res])
return temp_dect_lst
def FPoverflow_detector(fun_exe,widx_lst,bound):
print bound
points_lst = get_testing_point(bound)
minimizer_kwargs = {"method": "Powell"}
ovfps_lst = []
temp_bound = []
for i in bound:
if i == []:
temp_bound.append([-bf.f64max,bf.f64max])
else:
temp_bound.append(i)
# glob_fitness_fun = lambda x: bdary_fun(fun_exe, bf.reduce_x(temp_bound, x))
glob_fitness_fun = lambda x: bdary_fun(fun_exe, x)
print len(points_lst)
for i in points_lst:
# print i
# print glob_fitness_fun(i)
# temp_i = bf.generate_x(temp_bound,i)
temp_i = i
res = basinhopping(glob_fitness_fun, temp_i, minimizer_kwargs=minimizer_kwargs, niter_success=1, niter=200)
if res.fun == 0:
widx_lst[3].value = 0
# ovfps_lst.append(bf.reduce_x(temp_bound,res.x))
ovfps_lst.append(res.x)
return ovfps_lst
# x = x >> 53
# print x
# print np.nan < bf.f64max
# print fabs(np.nan)
def Oflow_fun(pf,x):
fpmax = bf.f64max
sf_res = pf(*x)
pf_res = fabs(sf_res.val)
if(pf_res<fpmax):
w = fpmax - pf_res
else:
w = 0.0
return w
def FPexcption_detector_whole(fun_exe,stat_fun,bound):
points_lst = get_testing_point(bound)
minimizer_kwargs = {"method": "Powell"}
ovfps_lst = []
temp_bound = []
for i in bound:
if i == []:
temp_bound.append([-bf.f64max,bf.f64max])
else:
temp_bound.append(i)
glob_fitness_fun = lambda x: Oflow_fun(fun_exe, bf.reduce_x(temp_bound, x))
# print len(points_lst)
for i in points_lst:
# print i
try:
temp_i = bf.generate_x(temp_bound, i)
res = basinhopping(glob_fitness_fun, temp_i, minimizer_kwargs=minimizer_kwargs, niter_success=1, niter=200)
# print res.fun
if res.fun == 0:
inp = bf.reduce_x(temp_bound, res.x)
glob_fitness_fun(inp)
stat = stat_fun()
if stat == 0:
ovfps_lst.append(inp)
except (TypeError,TimeoutError):
continue
temp_lst = []
cont = 0
for i in ovfps_lst:
cont = cont + 1
if i not in ovfps_lst[cont:]:
temp_lst.append(i)
return temp_lst
def get_bound_type(ret_vals,num_excp):
if 0 not in ret_vals:
bound_type = 2
else:
if len(ret_vals) == 1:
if num_excp == 0:
bound_type = 1
else:
bound_type = 3
else:
if num_excp == 0:
bound_type = 4
else:
bound_type = 3
return bound_type
def append_noempty_lst(aplst,a):
if a!=[]:
aplst.append(a)
return aplst
def detect_exception(fid,test_fun,eva_bound_lst):
fun_dz, widx_lstdz = load_fpdz_fun(test_fun[0])
fun_nan, widx_lstnan = load_fpnan_fun(test_fun[0])
fun_of, widx_lstof = load_fpof_fun(test_fun[0])
fun_uf, widx_lstuf = load_fpuf_fun(test_fun[0])
# fun_pu, stat_fun = load_pure_fun(test_fun[0])
detect_lst = []
detect_lst.append(fid)
detect_lst.append(test_fun[0])
# print fun_dz(-1.8427611519777438)
temp_res = []
limit_time = 120
st = time.time()
for i in eva_bound_lst:
# temp_res.append(i)
ret_vals = i[0][1]
num_excp = i[0][0]
bt = get_bound_type(ret_vals, num_excp)
# print bt
temp_dect_lst = []
temp_res.append(bt)
bound = i[1]
# print i
dz_res= []
nan_res= []
uf_res= []
of_res= []
try:
signal.alarm(limit_time)
if bt == 3:
if fid != 97:
# print "detect res"
dz_res = FPexp_detect(fun_dz, widx_lstdz, bound, widx_lstdz[4].value, num_excp)
nan_res = FPexp_detect(fun_nan, widx_lstnan, bound, widx_lstdz[4].value, num_excp)
of_res = FPexp_detect(fun_of, widx_lstof, bound, widx_lstdz[4].value, num_excp)
# uf_res = FPexp_detect(fun_uf, widx_lstuf, bound, widx_lstuf[4].value, widx_lstuf[4].value)
temp_dect_lst.append([dz_res,nan_res,of_res])
temp_dect_lst.append(bound)
else:
b00 = bound[0][0]
if b00 < -400:
bound[0][0] = -400
dz_res = FPexp_detect(fun_dz, widx_lstdz, bound, widx_lstdz[4].value, num_excp)
nan_res = FPexp_detect(fun_nan, widx_lstnan, bound, widx_lstdz[4].value, num_excp)
of_res = FPexp_detect(fun_of, widx_lstof, bound, widx_lstdz[4].value, num_excp)
uf_res = FPexp_detect(fun_uf, widx_lstuf, bound, widx_lstuf[4].value, widx_lstuf[4].value)
temp_dect_lst.append([dz_res, nan_res, of_res],bound)
temp_dect_lst.append(bound)
except TimeoutError:
temp_dect_lst.append([dz_res, nan_res, of_res])
signal.alarm(0)
temp_res.append(temp_dect_lst)
et = time.time() - st
detect_lst.append(temp_res)
# detect_lst.append(et)
return detect_lst
def detect_underflow(fid,test_fun,eva_bound_lst):
fun_dz, widx_lstdz = load_fpdz_fun(test_fun[0])
fun_nan, widx_lstnan = load_fpnan_fun(test_fun[0])
fun_of, widx_lstof = load_fpof_fun(test_fun[0])
fun_uf, widx_lstuf = load_fpuf_fun(test_fun[0])
# fun_pu, stat_fun = load_pure_fun(test_fun[0])
detect_lst = []
detect_lst.append(fid)
detect_lst.append(test_fun[0])
# print fun_dz(-1.8427611519777438)
temp_res = []
limit_time = 60
print widx_lstuf
st = time.time()
for i in eva_bound_lst:
# temp_res.append(i)
ret_vals = i[0][1]
num_excp = i[0][0]
bt = get_bound_type(ret_vals, num_excp)
# print bt
temp_dect_lst = []
temp_res.append(bt)
bound = i[1]
# print i
dz_res= []
nan_res= []
uf_res= []
of_res= []
try:
signal.alarm(limit_time)
if bt == 3:
if fid != 97:
# print "detect res"
# dz_res = FPexp_detect(fun_dz, widx_lstdz, bound, widx_lstdz[4].value, num_excp)
# nan_res = FPexp_detect(fun_nan, widx_lstnan, bound, widx_lstdz[4].value, num_excp)
# of_res = FPexp_detect(fun_of, widx_lstof, bound, widx_lstdz[4].value, num_excp)
uf_res = FPexp_detect(fun_uf, widx_lstuf, bound, widx_lstuf[4].value, widx_lstuf[4].value)
temp_dect_lst.append(uf_res)
else:
b00 = bound[0][0]
if b00 < -400:
bound[0][0] = -400
# dz_res = FPexp_detect(fun_dz, widx_lstdz, bound, widx_lstdz[4].value, num_excp)
# nan_res = FPexp_detect(fun_nan, widx_lstnan, bound, widx_lstdz[4].value, num_excp)
# of_res = FPexp_detect(fun_of, widx_lstof, bound, widx_lstdz[4].value, num_excp)
uf_res = FPexp_detect(fun_uf, widx_lstuf, bound, widx_lstuf[4].value, widx_lstuf[4].value)
temp_dect_lst.append(uf_res)
else:
if bt in [1,4]:
uf_res = FPexp_detect(fun_uf, widx_lstuf, bound, widx_lstuf[4].value, widx_lstuf[4].value)
temp_dect_lst.append(uf_res)
except TimeoutError:
temp_dect_lst.append(uf_res)
signal.alarm(0)
temp_res.append(temp_dect_lst)
et = time.time() - st
detect_lst.append(temp_res)
# detect_lst.append(et)
return detect_lst
|
from .numbers import number_to_scientific_latex
from .string import StrPrinter
from ..units import _latex_from_dimensionality
class LatexPrinter(StrPrinter):
_default_settings = dict(
StrPrinter._default_settings,
repr_name='latex',
Equilibrium_arrow=r'\rightleftharpoons',
Reaction_arrow=r'\rightarrow',
magnitude_fmt=number_to_scientific_latex,
unit_fmt=_latex_from_dimensionality
)
def _print_Substance(self, substance, **kwargs):
return substance.latex_name or substance.name
def latex(obj, **settings):
return LatexPrinter(settings).doprint(obj)
|
import geopandas as gpd
import pandas as pd
from .grids import *
from CoordinatesConverter import getdistance
def clean_same(data,col = ['VehicleNum','Time','Lng','Lat']):
'''
Delete the data with the same information as the data before and after to reduce the amount of data. For example, if several consecutive data of an individual have the same information except for the time, only the first and last two data can be kept
Parameters
-------
data : DataFrame
Data
col : List
The column name, in the order of [‘Vehicleid, Time’]. It will sort by time, and then determine the information of other columns besides the time
Returns
-------
data1 : DataFrame
Cleaned data
'''
[VehicleNum,Time,Lng,Lat] = col[:4]
extra = col[4:]
data1 = data.copy()
data1 = data1.drop_duplicates(subset = [VehicleNum,Time])
data1 = data1.sort_values(by = [VehicleNum,Time])
data1['issame'] = 0
for i in [VehicleNum,Lng,Lat]+extra:
data1['issame'] += (data1[i].shift()==data1[i])&(data1[i].shift(-1)==data1[i])
data1 = data1[-(data1['issame'] == len([VehicleNum,Lng,Lat]+extra))]
data1 = data1.drop('issame',axis = 1)
return data1
def clean_drift(data,col = ['VehicleNum','Time','Lng','Lat'],speedlimit = 80,dislimit = 1000):
'''
Delete the drift data. The select principle is that: if the speed of a trajectory point is larger than the speed limit with before and after points, while the speed between the before and after data is less than the speedlimit. The time column in the input data is calculated more efficiently if it is in datetime format.
Parameters
-------
data : DataFrame
Data
col : List
Column names, in the order of [‘VehicleNum’, ‘Time’, ‘Lng’, ‘Lat’]
speedlimit : number
Speed limitation
Returns
-------
data1 : DataFrame
Cleaned data
'''
[VehicleNum,Time,Lng,Lat] = col
data1 = data.copy()
data1 = data1.drop_duplicates(subset = [VehicleNum,Time])
data1[Time+'_dt'] = pd.to_datetime(data1[Time])
data1 = data1.sort_values(by = [VehicleNum,Time])
for i in [VehicleNum,Lng,Lat,Time+'_dt']:
data1[i+'_pre'] = data1[i].shift()
data1[i+'_next'] = data1[i].shift(-1)
data1['dis_pre'] = getdistance(data1[Lng],data1[Lat],data1[Lng+'_pre'],data1[Lat+'_pre'])
data1['dis_next'] = getdistance(data1[Lng],data1[Lat],data1[Lng+'_next'],data1[Lat+'_next'])
data1['dis_prenext'] = getdistance(data1[Lng+'_pre'],data1[Lat+'_pre'],data1[Lng+'_next'],data1[Lat+'_next'])
data1['timegap_pre'] = data1[Time+'_dt'] - data1[Time+'_dt_pre']
data1['timegap_next'] = data1[Time+'_dt_next'] - data1[Time+'_dt']
data1['timegap_prenext'] = data1[Time+'_dt_next'] - data1[Time+'_dt_pre']
data1['speed_pre'] = data1['dis_pre']/data1['timegap_pre'].dt.total_seconds()*3.6
data1['speed_next'] = data1['dis_next']/data1['timegap_next'].dt.total_seconds()*3.6
data1['speed_prenext'] = data1['dis_prenext']/data1['timegap_prenext'].dt.total_seconds()*3.6
if speedlimit:
data1 = data1[-((data1[VehicleNum+'_pre'] == data1[VehicleNum])&(data1[VehicleNum+'_next'] == data1[VehicleNum])&\
(data1['speed_pre']>speedlimit)&(data1['speed_next']>speedlimit)&(data1['speed_prenext']<speedlimit))]
if dislimit:
data1 = data1[-((data1[VehicleNum+'_pre'] == data1[VehicleNum])&(data1[VehicleNum+'_next'] == data1[VehicleNum])&\
(data1['dis_pre']>dislimit)&(data1['dis_next']>dislimit)&(data1['dis_prenext']<dislimit))]
data1 = data1[data.columns]
return data1
def clean_outofbounds(data,bounds,col = ['Lng','Lat']):
'''
The input is the latitude and longitude coordinates of the lower left and upper right of the study area and exclude data that are outside the study area
Parameters
-------
data : DataFrame
Data
bounds : List
Latitude and longitude of the lower left and upper right of the study area, in the order of [lon1, lat1, lon2, lat2]
col : List
Column name of longitude and latitude
Returns
-------
data1 : DataFrame
Data within the scope of the study
'''
lon1,lat1,lon2,lat2 = bounds
if (lon1>lon2)|(lat1>lat2)|(abs(lat1)>90)|(abs(lon1)>180)|(abs(lat2)>90)|(abs(lon2)>180):
raise Exception('Bounds error. The input bounds should be in the order of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and (lon2,lat2) is the upper right corner.')
Lng,Lat = col
data1 = data.copy()
data1 = data1[(data1[Lng]>bounds[0])&(data1[Lng]<bounds[2])&(data1[Lat]>bounds[1])&(data1[Lat]<bounds[3])]
return data1
def clean_outofshape(data,shape,col = ['Lng','Lat'],accuracy=500):
'''
Input the GeoDataFrame of the study area and exclude the data beyond the study area
Parameters
-------
data : DataFrame
Data
shape : GeoDataFrame
The GeoDataFrame of the study area
col : List
Column name of longitude and latitude
accuracy : number
The size of grid. The principle is to do the data gridding first and then do the data cleaning. The smaller the size is, the higher accuracy it has
Returns
-------
data1 : DataFrame
Data within the scope of the study
'''
Lng,Lat = col
shape_unary = shape.unary_union
bounds = shape_unary.bounds
params = grid_params(bounds,accuracy)
data1 = data.copy()
data1['LONCOL'],data1['LATCOL'] = GPS_to_grids(data1[Lng],data1[Lat],params)
data1_gdf = data1[['LONCOL','LATCOL']].drop_duplicates()
data1_gdf['geometry'] = gridid_to_polygon(data1_gdf['LONCOL'],data1_gdf['LATCOL'],params)
data1_gdf = gpd.GeoDataFrame(data1_gdf)
data1_gdf = data1_gdf[data1_gdf.intersects(shape_unary)]
data1 = pd.merge(data1,data1_gdf[['LONCOL','LATCOL']]).drop(['LONCOL','LATCOL'],axis = 1)
return data1
def clean_traj(data,col = ['uid','str_time','lon','lat'],tripgap = 1800,disgap = 50000,speedlimit = 80):
'''
A combo for trajectory data cleaning, including defining the the time length threshold considered as a new trip, and the distance threshold considered as a new trip
Parameters
-------
data : DataFrame
Trajectory data
col : List
Column names, in the order of [‘VehicleNum’, ‘Time’, ‘Lng’, ‘Lat’]
tripgap : number
The time length threshold considered as a new trip
disgap : number
The distance threshold considered as a new trip
speedlimit : number
Speed limit
Returns
-------
data1 : DataFrame
Cleaned data
'''
uid,timecol,lon,lat = col
data[timecol] = pd.to_datetime(data[timecol])
data = data.sort_values(by = [uid,timecol])
cols = []
for i in data.columns:
if i not in [uid,timecol,lon,lat]:
cols.append(i)
data = clean_same(data,col = [uid,timecol,lon,lat]+cols)
data = clean_drift(data,col = [uid, timecol, lon, lat],
speedlimit=speedlimit)
data = id_reindex(data,uid,timecol = timecol,timegap = tripgap)
data = data.rename(columns = {uid+'_new':'tripid'})
data = id_reindex_disgap(data,col = ['tripid',lon,lat],disgap=disgap,suffix='')
data1 = data.copy()
data1['lon1'] = data1[lon].shift(-1)
data1['lat1'] = data1[lat].shift(-1)
data1['tripid1'] = data1['tripid'].shift(-1)
data1 = data1[data1['tripid']==data1['tripid1']]
data1['dis'] = getdistance(data1[lon],data1[lat],data1['lon1'],data1['lat1'])
a = data1.groupby(['tripid'])['dis'].sum()
a = a[-(a<50)].reset_index()['tripid']
data = pd.merge(data,a)
data = data.drop('tripid',axis = 1)
data = id_reindex(data,uid,timecol = timecol,timegap = tripgap)
data = data.rename(columns = {uid+'_new':'tripid'})
data = id_reindex_disgap(data,col = ['tripid',lon,lat],disgap=disgap,suffix='')
return data
def dataagg(data,shape,col = ['Lng','Lat','count'],accuracy=500):
'''
Aggregate data to traffic zone
Parameters
-------
data : DataFrame
The origin DataFrame
shape : GeoDataFrame
The shape of the traffic zone
col : List
You can either choose to input two columns, i.e., [‘Lng’,’Lat’], or to input three columns, i.e., [‘Lng’,’Lat’,’count’]”, where count means the points count
accuracy : number
The idea is to first implement data gridding and then the aggregation. Here, the grid size will be determined. The less the size is, the higher the accuracy will have.
Returns
-------
aggresult : GeoDataFrame
Traffic zone. The count column is the output result
data1 : DataFrame
The zone-matched data
'''
if len(col) == 2:
Lng,Lat = col
aggcol = None
else:
Lng,Lat,aggcol = col
shape['index'] = range(len(shape))
shape_unary = shape.unary_union
bounds = shape_unary.bounds
params = grid_params(bounds,accuracy)
data1 = data.copy()
data1['LONCOL'],data1['LATCOL'] = GPS_to_grids(data1[Lng],data1[Lat],params)
data1_gdf = data1[['LONCOL','LATCOL']].drop_duplicates()
data1_gdf['geometry'] = gpd.points_from_xy(*grids_centre(data1_gdf['LONCOL'],data1_gdf['LATCOL'],params))
data1_gdf = gpd.GeoDataFrame(data1_gdf)
data1_gdf = gpd.sjoin(data1_gdf,shape,how = 'left')
data1 = pd.merge(data1,data1_gdf).drop(['LONCOL','LATCOL'],axis = 1)
if aggcol:
aggresult = pd.merge(shape,data1.groupby('index')[aggcol].sum().reset_index()).drop('index',axis = 1)
else:
data1['_'] = 1
aggresult = pd.merge(shape,data1.groupby('index')['_'].sum().rename('count').reset_index()).drop('index',axis = 1)
data1 = data1.drop('_',axis = 1)
data1 = data1.drop('index',axis = 1)
return aggresult,data1
def id_reindex_disgap(data,col = ['uid','lon','lat'],disgap=1000,suffix = '_new'):
'''
Renumber the ID columns of the data,If two adjacent records exceed the distance, the number is the new ID
Parameters
-------
data : DataFrame
Data
col : str
Name of the ID column to be re-indexed
disgap : number
If two adjacent records exceed this distance, the number is the new ID
suffix : str
The suffix of the new column. When set to False, the former column will be replaced
Returns
-------
data1 : DataFrame
Renumbered data
'''
uid,lon,lat = col
data1 = data.copy()
data1[uid+suffix] = ((data1[uid].shift()!=data1[uid])|
(getdistance(data1[lon],data1[lat],data1[lon].shift(),data1[lat].shift())>disgap)).astype(int).cumsum()-1
a = data1.groupby([uid+suffix])[lon].count()
data1 = pd.merge(data1,a[a>1].reset_index()[[uid+suffix]])
return data1
def id_reindex(data,col,new = False,timegap = None,timecol = None,suffix = '_new',sample = None):
'''
Renumber the ID columns of the data
Parameters
-------
data : DataFrame
Data
col : str
Name of the ID column to be re-indexed
new : bool
False: the new number of the same ID will be the same index; True: according to the order of the table, the origin ID appears again with different index
timegap : number
If an individual does not appear for a period of time (timegap is the time threshold), it is numbered as a new individual. This parameter should be set with timecol to take effect.
timecol : str
The column name of time, it should be set with timegap to take effect
suffix : str
The suffix of the new column. When set to False, the former column will be replaced
sample : int (optional)
To desampling the data
Returns
-------
data1 : DataFrame
Renumbered data
'''
if suffix == False:
suffix = ''
data1 = data.copy()
if new:
data1[col+suffix]=data1[col]!=data1[col].shift()
data1[col+suffix]=data1[col+suffix].cumsum()-1
else:
tmp=data1[[col]].drop_duplicates()
tmp[col+'_']=range(len(tmp))
data1=pd.merge(data1,tmp,on=col)
data1[col+suffix] = data1[col+'_']
if suffix != '_':
data1 = data1.drop(col+'_',axis = 1)
if (timegap is not None)&(timecol is not None):
data1[timecol] = pd.to_datetime(data1[timecol])
data1 = data1.sort_values(by = [col+suffix,timecol])
data1[col+suffix] = ((data1[col+suffix].shift()!=data1[col+suffix])|
((data1[timecol]-data1[timecol].shift()).dt.total_seconds()>timegap)).astype(int).cumsum()-1
if sample:
tmp = data1[col+suffix].drop_duplicates().sample(sample)
data1 = pd.merge(data1,tmp)
return data1
|
from scapy.all import sr
from craft_a_packet import generate_icmp_packet
def send_and_receive_packets(packets_to_send):
'''
send packets and receive answers, abandon unanswered
packets_to_send: a list of packets to be sent
'''
responses = []
for packet in packets_to_send: # loop each packet in the list and send them, then receive answers
answer, unanswer = sr(packet, timeout = 10)
responses.append(answer)
return responses
def get_all_hops(destination_ip):
'''
Parse in dest ip, send packet based on decreasing ttl from 10 to 1.
Print answered packet ttl and source ip, no display for unanswered ip.
For the time that we ran the code on campus, only ttl = 10, 9, 8, 1 was answered.
Several deviding line for testing purpose, the code for the lines was deleted, but the code was not ran again after that.
'''
ttl = 10
packets = {}
while ttl >= 1: # send packets and store answered packet list in a dictionary with ttl as key
packet = generate_icmp_packet(destination_ip, ttl)
response = send_and_receive_packets(packet)
packets[ttl] = response
ttl = ttl - 1
for i in range(10, 0, -1): # loop through to show source IP for each ttl, unanswered packets for that ttl was ignored
try:
print(f'TTL value: {i} | Source IP in response: {packets[i][0][0][1].src}')
# the indexing here is to first get ttl corresponding response list from dictionary,
# second indexing is to get the first item from the response list, then get the ip src from the packet
except:
continue
return packets
get_all_hops('8.8.8.8')
|
from django.apps import AppConfig
class DataSourcesConfig(AppConfig):
name = 'data_sources'
|
from .evaluation import (
visu_preds_and_targets,
visu_preds,
visualize,
voronoi_IPF_plot,
in_and_out_of_plane,
evaluate,
EvaluationManager
)
from .losses import custom_loss
from .metrics import moa, misorientation
from .models import CustomModel
|
import numpy as np
from scipy import interpolate
def interpolate_path(dmp, path):
time = np.linspace(0, dmp.cs.T, path.shape[0])
inter = interpolate.interp1d(time, path)
y = np.array([inter(i * dmp.dt) for i in range(dmp.cs.N)])
return y
def calc_derivatives(y, dt):
# velocity
yd = np.diff(y) / dt
yd = np.concatenate(([0], yd))
# acceleration
ydd = np.diff(yd) / dt
ydd = np.concatenate(([0], ydd))
return yd, ydd
|
# -*- coding: utf-8 -*-
from . import utils
import requests
def get_product(barcode, locale='world'):
"""
Return information of a given product.
"""
url = utils.build_url(geography=locale,
service='api',
resource_type='product',
parameters=barcode)
return utils.fetch(url)
def get_by_facets(query, page=1, locale='world'):
"""
Return products for a set of facets.
"""
path = []
keys = query.keys()
if len(keys) == 0:
return []
else:
keys = sorted(keys)
for key in keys:
path.append(key)
path.append(query[key])
url = utils.build_url(geography=locale,
resource_type=path,
parameters=str(page))
return utils.fetch(url)['products']
def add_new_product(post_data, locale='world'):
"""
Add a new product to OFF database.
"""
if not post_data['code'] or not post_data['product_name']:
raise ValueError('code or product_name not found!')
url = utils.build_url(geography='world',
service='cgi',
resource_type='product_jqm2.pl')
return requests.post(url, data=post_data)
def upload_image(code, imagefield, img_path):
"""
Add new image for a product
"""
if imagefield not in ["front", "ingredients", "nutrition"]:
raise ValueError("Imagefield not valid!")
image_payload = {"imgupload_%s" % imagefield: open(img_path, 'rb')}
url = utils.build_url(service='cgi',
resource_type='product_image_upload.pl')
other_payload = {'code': code, 'imagefield': imagefield}
headers = {'Content-Type': 'multipart/form-data'}
return requests.post(url=url,
data=other_payload,
files=image_payload,
headers=headers)
def search(query, page=1, page_size=20,
sort_by='unique_scans', locale='world'):
"""
Perform a search using Open Food Facts search engine.
"""
parameters = {'search_terms': query,
'page': page,
'page_size': page_size,
'sort_by': sort_by,
'json': '1'}
url = utils.build_url(geography=locale,
service='cgi',
resource_type='search.pl',
parameters=parameters)
return utils.fetch(url, json_file=False)
def advanced_search(post_query):
"""
Perform advanced search using OFF search engine
"""
post_query['json'] = '1'
url = utils.build_url(service='cgi',
resource_type='search.pl',
parameters=post_query)
return utils.fetch(url, json_file=False)
|
from django.db import models
from accounts.models import IncomeAccount
# Create your models here.
class Item(models.Model):
name = models.CharField(max_length=100)
total_units = models.PositiveIntegerField()
remaining_units = models.PositiveIntegerField()
unit_price = models.DecimalField(max_digits=12, decimal_places=2)
description = models.TextField()
account = models.ForeignKey(IncomeAccount)
def __str__(self):
return self.name
|
"""Support for the OpenWeatherMap (OWM) service."""
from __future__ import annotations
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
ATTR_API_FORECAST,
ATTRIBUTION,
DEFAULT_NAME,
DOMAIN,
ENTRY_NAME,
ENTRY_WEATHER_COORDINATOR,
FORECAST_SENSOR_TYPES,
MANUFACTURER,
WEATHER_SENSOR_TYPES,
)
from .weather_update_coordinator import WeatherUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up OpenWeatherMap sensor entities based on a config entry."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
name = domain_data[ENTRY_NAME]
weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR]
entities: list[AbstractOpenWeatherMapSensor] = [
OpenWeatherMapSensor(
name,
f"{config_entry.unique_id}-{description.key}",
description,
weather_coordinator,
)
for description in WEATHER_SENSOR_TYPES
]
entities.extend(
[
OpenWeatherMapForecastSensor(
f"{name} Forecast",
f"{config_entry.unique_id}-forecast-{description.key}",
description,
weather_coordinator,
)
for description in FORECAST_SENSOR_TYPES
]
)
async_add_entities(entities)
class AbstractOpenWeatherMapSensor(SensorEntity):
"""Abstract class for an OpenWeatherMap sensor."""
_attr_should_poll = False
_attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
def __init__(
self,
name: str,
unique_id: str,
description: SensorEntityDescription,
coordinator: DataUpdateCoordinator,
) -> None:
"""Initialize the sensor."""
self.entity_description = description
self._coordinator = coordinator
self._attr_name = f"{name} {description.name}"
self._attr_unique_id = unique_id
split_unique_id = unique_id.split("-")
self._attr_device_info = DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, f"{split_unique_id[0]}-{split_unique_id[1]}")},
manufacturer=MANUFACTURER,
name=DEFAULT_NAME,
)
@property
def attribution(self) -> str:
"""Return the attribution."""
return ATTRIBUTION
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._coordinator.last_update_success
async def async_added_to_hass(self) -> None:
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self._coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self) -> None:
"""Get the latest data from OWM and updates the states."""
await self._coordinator.async_request_refresh()
class OpenWeatherMapSensor(AbstractOpenWeatherMapSensor):
"""Implementation of an OpenWeatherMap sensor."""
def __init__(
self,
name: str,
unique_id: str,
description: SensorEntityDescription,
weather_coordinator: WeatherUpdateCoordinator,
) -> None:
"""Initialize the sensor."""
super().__init__(name, unique_id, description, weather_coordinator)
self._weather_coordinator = weather_coordinator
@property
def native_value(self) -> StateType:
"""Return the state of the device."""
return self._weather_coordinator.data.get(self.entity_description.key, None)
class OpenWeatherMapForecastSensor(AbstractOpenWeatherMapSensor):
"""Implementation of an OpenWeatherMap this day forecast sensor."""
def __init__(
self,
name: str,
unique_id: str,
description: SensorEntityDescription,
weather_coordinator: WeatherUpdateCoordinator,
) -> None:
"""Initialize the sensor."""
super().__init__(name, unique_id, description, weather_coordinator)
self._weather_coordinator = weather_coordinator
@property
def native_value(self) -> StateType:
"""Return the state of the device."""
forecasts = self._weather_coordinator.data.get(ATTR_API_FORECAST)
if forecasts is not None and len(forecasts) > 0:
return forecasts[0].get(self.entity_description.key, None)
return None
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 11:04:50 2019
@author: Ham
HackerRanch Challenge: Words Score
In this challenge, the task is to debug the existing code
to successfully execute all provided test files.
Consider that vowels in the alphabet are a, e, i, o, u and y.
Function score_words takes a list of lowercase words as an argument
and returns a score as follows:
The score of a single word is 2 if the word contains an even number of vowels.
Otherwise, the score of this word is 1.
The score for the whole list of words is the sum of scores of all words in the list.
Debug the given function score_words such that it returns a correct score.
Your function will be tested on several cases by the locked template code.
Input Format
The input is read by the provided locked code template.
In the first line, there is a single integer N denoting the number of words.
In the second line, there are N space-separated lowercase words.
Constraints
Each word has at most letters and all letters are English lowercase letters
Output Format
The output is produced by the provided and locked code template.
It calls function score_words with the list of words read from the input
as the argument and prints the returned score to the output.
Sample Input 0
2
hacker book
Sample Output 0
4
Explanation 0
There are two words in the input: hacker and book.
The score of the word hacker is 2 because it contains an even number of vowels,
i.e. 2 vowels, and the score of book is 2 for the same reason.
Thus the total score is 4.
Sample Input 1
3
programming is awesome
Sample Output 1
4
Explanation 1
There are 3 words in the input: programming, is and awesome.
The score of programming is 1 since it contains 3 vowels, an odd number of vowels.
The score of is is also 1 because it has an odd number of vowels.
The score of awesome is 2 since it contains 4 vowels, an even number of vowels.
Thus, the total score is 1+1+2=4.
"""
def is_vowel(letter):
return letter in ['a', 'e', 'i', 'o', 'u', 'y']
def score_words(words):
#score = 0
#for w in words:
# v = sum([1 for l in w if is_vowel(l)])
# score += (2 if (v % 2 == 0) else 1)
return sum([2 if (sum([1 for l in w if is_vowel(l)]) % 2 == 0) else 1 for w in words])
input() # discarded
#words = input().split()
print(score_words(input().split()))
|
#-*- coding: utf-8 -*-
import numpy as np
from scipy.integrate import odeint
from scipy.sparse.linalg import eigs
import pandas
from scipy.integrate import solve_ivp
from scipy.linalg.blas import dgemm, daxpy
from numba import jit
def convert_data(df, ssdf):
'''Converts pandas dataframe into numpy array and rearranges data for the following steps.
Arguments:
df -- dataframe containing means labeled cells fractions and errors over time
ssdf -- dataframe containing steady state fractions in S and G2, mean values and errors
'''
array = df.to_numpy()
vector_data = np.transpose(array).reshape(1, np.size(array))[0]
data, error = np.split(vector_data, [len(vector_data) // 2])
data = np.append(data, ssdf['mean'])
error = np.append(error, ssdf['error'])
return data, error
def _make_transitions(theta):
'''Helper function to construct a transition matrix from parameters'''
lambda_, mu, nu = theta[:3] # transition rates in G1, S, G2
l = abs(int(theta[4])) # number of substeps in G1
m = 15 # number of substeps in S; fixed
n = 15 # number of substeps in G2; fixed
a = theta[5] # probability to enter G0 upon mitosis
g1, s, g2, g0, size = 0, l, l+m, l+m+n, l+m+n+1 # convenience: starting indices
trans = np.zeros((size, size))
for i in range(g1, s):
trans[i+1, i] = lambda_
trans[i, i] = -lambda_
for i in range(s, g2):
trans[i+1, i] = mu
trans[i, i] = -mu
for i in range(g2, g0):
trans[i+1, i] = nu
trans[i, i] = -nu
trans[g1, g0-1] = (1. - a) * nu * 2.
trans[g0, g0-1] = a * nu * 2.
return trans
class log_flat_prior:
'''Makes a log prior that corresponding to a given allowed parameter range.
The returned log prior function is not normalized.
Arguments:
min -- lower bounds for parameters
max -- upper bounds
'''
def __init__(self, min, max):
self.min, self.max = min, max
def __call__(self, theta):
if np.logical_and(self.min < theta, theta < self.max).all():
return 0.0
else:
return -np.inf
# the jit decorator speeds up sampling by using numba. If this is not desired,
# simply comment out the decorator.
@jit('f8[:](f8,f8[::1],f8[:,::1],f8[::1],f8[::1],f8,f8[:,::1])', nopython=True)
def model_jit(t, y, transitions, theta, ss_fractions, kappa, labeling):
'''ODE model for labeled cells
Arguments:
t -- time point
y -- vector of initial conditions
transitions -- the transition matrix
theta -- vector of parameters
ss_fractions -- vector of steady state fractions in each sub-phase
kappa -- growth rate kappa
labeling -- labeling matrix
'''
lbl_fractions = y
# l is the integer number of G1 substeps, but emcee only knows how to
# handle real params. We work around this by letting emcee see l as a real
# parameter but using it as an integer internally. As a result, emcee will
# generate stepwise constant posteriors for the fake real l, which is
# slightly inefficient but correct.
l = abs(int(theta[4]))
m = 15 # S substeps; fixed
n = 15 # G2 substeps; fixed
eps_0 = theta[8] # intial EdU labeling rate
tau = theta[3] # EdU labeling time constant
mu = theta[1] #transition rate
eps = eps_0 * np.exp(-t / tau)
# update of the labeling matrix
# labeling is passed as a function argument and updated with low-level
# numpy functions for speed
labeling_sub_2 = labeling[l:l+n, l:l+n]
np.fill_diagonal(labeling_sub_2, eps)
dldt2 = (transitions.dot(lbl_fractions) - kappa * lbl_fractions
- labeling.dot(lbl_fractions - ss_fractions))
return dldt2
class log_likelihood:
'''Make a log likelihood function for a given set of data.
Initialization arguments:
tdata -- vector of time points
data -- vector, mean fractions to which the model is fitted,
generated with function convert_data()
dataerr -- vector, error of the means, generated with function convert_data()
The returned callable evaluates the likelihood as a function of theta.
Argument:
theta -- vector of parameters
'''
def __init__(self, tdata, data, dataerr):
self.tdata, self.data, self.dataerr = tdata, data, dataerr
def __call__(self, theta):
#definitition of the parameters
l = abs(int(theta[4])) #number of substeps in G1
m = 15 # number of substeps in G2M
n = 15 # number of substeps in S
a = theta[5] # probability to enter G0 upon mitosis
earlyS = int(theta[6] * n)
lateS = int(theta[7] * n)
y0 = np.zeros(l+n+m+1)
# construct the transition matrix
transitions = _make_transitions(theta)
# calculate the steady-growth state
eig = np.linalg.eig(transitions)
index = np.argmax(eig[0])
k = eig[0][index]
if not np.isclose(k, k.real, rtol=1e-8):
return -np.inf
else:
k = k.real
ss_fractions = np.ascontiguousarray(eig[1][:, index].real)
ss_fractions /= np.sum(ss_fractions)
ss_G1, ss_S, ss_G2, ss_G0 = np.split(ss_fractions, [l, l+m, l+m+n])
ss_earlyS, ss_midS, ss_lateS = np.split(ss_S, [earlyS, -lateS])
ss_gate_S = np.sum(ss_midS)
ss_gate_G2 = np.sum(ss_lateS) + np.sum(ss_G2)
# now solve the ODE system
labeling = np.zeros((l+n+m+1, l+m+n+1)) # allocate labeling matrix for speed
sol = solve_ivp(model_jit, [0, self.tdata[-1]], y0, t_eval=self.tdata,
args=(transitions, theta, ss_fractions, k, labeling)).y
fit_G1l = np.sum(sol[0:l+earlyS, :], axis=0)
fit_G0l = sol[l+m+n, :]
fit_G0G1l = fit_G1l + fit_G0l
fit_Sl = np.sum(sol[l+earlyS:l+n-lateS, :], axis=0)
fit_G2l = np.sum(sol[l+n-lateS:l+n+m, :], axis=0)
fit = np.concatenate([fit_G0G1l, fit_Sl, fit_G2l, [ss_gate_S, ss_gate_G2]])
chi_squared = np.sum(((self.data - fit) ** 2 / (self.dataerr) ** 2))
return -0.5 * chi_squared
class log_posterior:
'''Make a log-posterior function from the given likelihood and prior.
Initialization arguments:
likelihood --- callable that yields the log likelihood
prior --- callable that yields the log prior
The returned callable gives the posterior as a function of theta.
Argument:
theta --- parameter vector
'''
def __init__(self, log_likelihood, log_prior):
self.likelihood, self.prior = log_likelihood, log_prior
def __call__(self, theta):
lp = self.prior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + self.likelihood(theta)
def get_cycle(sampled):
'''Return cell phase length, fractions, and growth rate from a full
posterior sample.
Argument:
sampled -- one sample of parameters
'''
lambda_, mu, nu, tau = sampled[:4]
l = abs(int(sampled[4])) # number of substeps in G1
m = 15 # number of substeps in G2M; fixed
n = 15 # number of substeps in S; fixed
a = sampled[5]
earlyS, lateS = [int(sampled[i] * n) for i in [6,7]]
result = np.zeros(8)
result[0] = l / lambda_ # length of G1
result[1] = m / mu # length of S
result[2] = n / nu # length of G2
transitions = _make_transitions(sampled)
eig = np.linalg.eig(transitions)
index = np.argmax(eig[0])
result[3] = eig[0][index].real # growth rate
ss_fractions = eig[1][:, index].real
ss_fractions /= np.sum(ss_fractions)
result[4] = np.sum(ss_fractions[0:l]) #G1 fraction
result[5] = np.sum(ss_fractions[l:l+m]) #S fraction
result[6] = np.sum(ss_fractions[l+m:l+m+n]) #G2 fraction
result[7] = ss_fractions.real[l+m+n] # G0 fraction
return result
|
# produce mouse genome exon by exon
import gzip
from Bio import SeqIO
import pickle
import os
import pandas as pd
chromosomes = {}
filename_base = os.path.join(os.path.dirname(__file__), "static/data/GRCm38/chr{}.fa.gz")
print("starting refGene load")
refGeneFilename = os.path.join(os.path.dirname(__file__), "static/data/gtex/refGene_GRCm38.txt")
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
refGene["exonStarts"] = refGene.apply(lambda x: x['exonStarts'].split(',')[:-1], axis=1)
refGene["exonEnds"] = refGene.apply(lambda x: x['exonEnds'].split(',')[:-1], axis=1)
refGene["exonFrames"] = refGene.apply(lambda x: x['exonFrames'].split(',')[:-1], axis=1)
print("completed refGene load")
chroms = ["1","2","3","4","5","6","7","8","9","10","11","12","12","13","14","15","15","16","17","18","19","M","X","Y"]
print("starting read in all chrom")
for c in chroms:
filename = filename_base.format(c)
handle = gzip.open(filename)
chromosomes[c] = SeqIO.read(handle, "fasta")
print("finished read in all chrom")
print("processing each gene")
for index, row in refGene.iterrows():
gene_name, exon_count = row["name2"], row["exonCount"]
exonStarts = row["exonStarts"]
exonEnds = row["exonEnds"]
chrom = row["chrom"].split('chr')[1]
if not chrom in chroms:
continue
full_sequence = chromosomes[chrom]
for exon in range(int(exon_count)):
start = int(exonStarts[exon])
end = int(exonEnds[exon])
sequence = full_sequence.seq[start:end]
filename = "{0}_{1}".format(gene_name, exon)
path = os.path.join('static/data/GRCm38_exons/', filename)
print filename
with open(path, 'w') as outfile:
outfile.write(str(sequence))
print("finished generating GRCm38 exons")
|
#!/usr/bin/python
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import input
import pytumblr
import yaml
import os
import code
from requests_oauthlib import OAuth1Session
def new_oauth(yaml_path):
'''
Return the consumer and oauth tokens with three-legged OAuth process and
save in a yaml file in the user's home directory.
'''
print('Retrieve consumer key and consumer secret from http://www.tumblr.com/oauth/apps')
consumer_key = input('79CX2zUJwFdFul45BU8uPrgX8vj2xnYmUxqBY6gv9wkbx9pauv ')
consumer_secret = input('o5QQKAFtAoWunbpWFLWP8hlXpYhT9K18wsx6WYRecWn6NNQ5Bv ')
request_token_url = 'http://www.tumblr.com/oauth/request_token'
authorize_url = 'http://www.tumblr.com/oauth/authorize'
access_token_url = 'http://www.tumblr.com/oauth/access_token'
# STEP 1: Obtain request token
oauth_session = OAuth1Session(consumer_key, client_secret=consumer_secret)
fetch_response = oauth_session.fetch_request_token(request_token_url)
resource_owner_key = fetch_response.get('oauth_token')
resource_owner_secret = fetch_response.get('oauth_token_secret')
# STEP 2: Authorize URL + Rresponse
full_authorize_url = oauth_session.authorization_url(authorize_url)
# Redirect to authentication page
print('\nPlease go here and authorize:\n{}'.format(full_authorize_url))
redirect_response = input('Allow then paste the full redirect URL here:\n')
# Retrieve oauth verifier
oauth_response = oauth_session.parse_authorization_response(redirect_response)
verifier = oauth_response.get('oauth_verifier')
# STEP 3: Request final access token
oauth_session = OAuth1Session(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier
)
oauth_tokens = oauth_session.fetch_access_token(access_token_url)
tokens = {
'consumer_key': consumer_key,
'consumer_secret': consumer_secret,
'oauth_token': oauth_tokens.get('oauth_token'),
'oauth_token_secret': oauth_tokens.get('oauth_token_secret')
}
yaml_file = open(yaml_path, 'w+')
yaml.dump(tokens, yaml_file, indent=2)
yaml_file.close()
return tokens
if __name__ == '__main__':
yaml_path = os.path.expanduser('~') + '/.tumblr'
if not os.path.exists(yaml_path):
tokens = new_oauth(yaml_path)
else:
yaml_file = open(yaml_path, "r")
tokens = yaml.safe_load(yaml_file)
yaml_file.close()
client = pytumblr.TumblrRestClient(
tokens['consumer_key'],
tokens['consumer_secret'],
tokens['oauth_token'],
tokens['oauth_token_secret']
)
print('pytumblr client created. You may run pytumblr commands prefixed with "client".\n')
code.interact(local=dict(globals(), **{'client': client}))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.