content
stringlengths 5
1.05M
|
|---|
import os
import time
from boto3 import Session
from botocore.exceptions import ValidationError, ClientError, \
ParamValidationError, WaiterError
from .exceptions import EmptyStackName, DeployFailed, TemplateNotSpecified, \
TemplateValidationError, EmptyChangeSet, UpdateStackError, \
StackDoesntExist, StackAlreadyExist
CFN_KWARGS = (
'StackName', 'TemplateBody', 'TemplateURL', 'Parameters',
'NotificationARNs', 'Capabilities', 'ResourceTypes', 'RoleARN',
'StackPolicyBody', 'StackPolicyURL', 'Tags', 'ClientRequestToken'
)
class Stack:
KWARGS = CFN_KWARGS
CREATE_KWARGS = KWARGS + (
'DisableRollback', 'TimeoutInMinutes', 'OnFailure'
)
UPDATE_KWARGS = KWARGS + (
'UsePreviousTemplate', 'StackPolicyDuringUpdateBody',
'StackPolicyDuringUpdateURL',
)
DELETE_KWARGS = (
'StackName', 'RetainResources', 'RoleARN', 'ClientRequestToken'
)
DESCRIBE_STACKS = ('StackName', 'NextToken')
CREATE_CHANGE_SET_KWARGS = KWARGS + (
'UsePreviousTemplate', 'ChangeSetName', 'ClientToken',
'Description', 'ChangeSetType')
EXECUTE_CHANGE_SET = ('ChangeSetName', 'StackName', 'ClientRequestToken')
CHANGE_SET_PREFIX = 'stack-change-set-'
WAITER_DELAY = 5
WAIT_KWARGS = DESCRIBE_STACKS
WAIT_CHANGE_SET_KWARGS = DESCRIBE_STACKS + ('ChangeSetName',)
def __init__(self, **kwargs):
"""
Creates customized specific initial state.
:param kwargs:
"""
if 'StackName' not in kwargs:
raise EmptyStackName
self.kwargs = kwargs
self._client = Session(profile_name=kwargs.get('Profile', None)).client(
'cloudformation')
def _prepare_kwargs(self, kwargs_list):
"""
Prepares kwargs based on allowed ones from kwargs_list.
:param kwargs_list: list of allowed kwargs.
:return: Filtered kwargs.
"""
return {key: self.kwargs[key] for key in self.kwargs if key in
getattr(self, kwargs_list)}
def _validate_template(self):
"""
Validates CloudFormation template. It can be file, url or template body.
:return:
"""
try:
if 'TemplateBody' in self.kwargs:
if os.path.isfile(self.kwargs['TemplateBody']):
with open(self.kwargs['TemplateBody'], 'r') as body:
template_body = body.read()
self.kwargs['TemplateBody'] = template_body
self._client.validate_template(
TemplateBody=self.kwargs['TemplateBody'])
elif 'TemplateURL' in self.kwargs:
self._client.validate_template(
TemplateURL=self.kwargs['TemplateURL'])
else:
raise KeyError
except KeyError as e:
raise TemplateNotSpecified(error=e)
except (ValidationError, ClientError, ParamValidationError) as e:
raise TemplateValidationError(error=e)
def _describe_stack(self):
"""
Describes CloudFormation Stack.
:return: Returns Stack information if Stack exists. False otherwise.
"""
try:
stacks = self._client.describe_stacks(
**self._prepare_kwargs('DESCRIBE_STACKS'))
return stacks['Stacks'][0]
except ClientError as ex:
if 'Stack with id {0} does not exist'.format(
self.kwargs['StackName']) in str(ex):
return False
else:
raise ex
def _stack_exists(self):
"""
Checks if a CloudFormation stack with given name exists.
:return: True if Stack exists. False otherwise.
"""
stack = self._describe_stack()
if not stack:
return False
return stack['StackStatus'] != 'REVIEW_IN_PROGRESS'
def _create_change_set(self):
"""
Creates CloudFormation Change Set.
:return:
"""
if 'ChangeSetName' not in self.kwargs:
self.kwargs['ChangeSetName'] = self.CHANGE_SET_PREFIX + \
str(int(time.time()))
self.kwargs['ChangeSetType'] = 'UPDATE'
if not self._stack_exists():
self.kwargs['ChangeSetType'] = 'CREATE'
try:
self._client.create_change_set(**self._prepare_kwargs(
'CREATE_CHANGE_SET_KWARGS'))
except Exception as e:
raise e
def _wait_for_stack(self, waiter):
"""
Waits for CloudFormation action to be completed.
:param waiter: create, update, delete.
:return:
"""
waiter = self._client.get_waiter('stack_{0}_complete'.format(waiter))
waiter.config.delay = self.WAITER_DELAY
try:
waiter.wait(**self._prepare_kwargs('WAIT_KWARGS'))
except WaiterError as ex:
raise ex
def _wait_for_change_set(self):
"""
Waits for CloudFormation Change Set to be created.
:return:
"""
waiter = self._client.get_waiter('change_set_create_complete')
waiter.config.delay = self.WAITER_DELAY
try:
waiter.wait(**self._prepare_kwargs('WAIT_CHANGE_SET_KWARGS'))
except WaiterError as e:
resp = e.last_response
status = resp['Status']
reason = resp['StatusReason']
msg = ('No updates are to be performed',
'The submitted information didn\'t contain changes.')
if status == 'FAILED' and (msg[0] or msg[1] not in reason):
raise EmptyChangeSet(stack_name=self.kwargs['StackName'])
raise e
def _execute_change_set(self):
"""
Executes CloudFormation Change Set.
:return:
"""
return self._client.execute_change_set(**self._prepare_kwargs(
'EXECUTE_CHANGE_SET'))
def _wait_for_execute(self):
"""
Wait for Cloud Formation Change Set to be executed.
:return:
"""
change_set_type = self.kwargs['ChangeSetType']
if change_set_type == 'CREATE':
waiter = self._client.get_waiter('stack_create_complete')
elif change_set_type == 'UPDATE':
waiter = self._client.get_waiter('stack_update_complete')
try:
waiter.wait(**self._prepare_kwargs('DESCRIBE_STACKS'))
except WaiterError as ex:
raise DeployFailed(stack_name=self.kwargs['StackName'], ex=ex)
def _update_stack(self):
"""
Updates CloudFormation Stack.
:return:
"""
if not self._stack_exists():
raise StackDoesntExist(stack_name=self.kwargs['StackName'])
try:
self._client.update_stack(**self._prepare_kwargs('UPDATE_KWARGS'))
except ClientError as ex:
if 'No updates are to be performed.' in str(ex):
raise UpdateStackError(stack_name=self.kwargs['StackName'])
raise ClientError
def _create_stack(self):
"""
Creates CloudFormation Stack.
:return:
"""
if self._stack_exists():
raise StackAlreadyExist(stack_name=self.kwargs['StackName'])
self._client.create_stack(**self._prepare_kwargs('CREATE_KWARGS'))
def _delete_stack(self):
"""
Deletes CloudFormation Stack.
:return:
"""
if not self._stack_exists():
raise StackDoesntExist(stack_name=self.kwargs['StackName'])
self._client.delete_stack(**self._prepare_kwargs('DELETE_KWARGS'))
def deploy(self, execute_change_set=True):
"""
Method creates CloudFormation Stack Change Set and executes Change Set.
It validates Template first.
:param execute_change_set: If True Change Set will be executed.
:return:
"""
self._validate_template()
self._create_change_set()
self._wait_for_change_set()
print('\nDeploying CF Stack...')
if execute_change_set:
self._execute_change_set()
self._wait_for_execute()
return 0
def create(self):
"""
Metohd creates CloudFormation Stack.
:return:
"""
self._validate_template()
print('Creating CF Stack...')
self._create_stack()
self._wait_for_stack('create')
return 0
def update(self):
"""
Metohd updates CloudFormation Stack.
:return:
"""
self._validate_template()
print('Updating CF Stack...')
self._update_stack()
self._wait_for_stack('update')
return 0
def delete(self):
"""
Metohd deletes CloudFormation Stack.
:return:
"""
print('Deleting CF Stack...')
self._delete_stack()
self._wait_for_stack('delete')
return 0
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import time
from azure.eventhub import EventHubProducerClient, EventHubConsumerClient, TransportType, RetryMode
def test_custom_endpoint():
producer = EventHubProducerClient(
"fake.host.com",
"fake_eh",
None,
)
assert not producer._config.custom_endpoint_hostname
assert producer._config.transport_type == TransportType.Amqp
assert producer._config.connection_port == 5671
producer = EventHubProducerClient(
"fake.host.com",
"fake_eh",
None,
custom_endpoint_address="https://12.34.56.78"
)
assert producer._config.custom_endpoint_hostname == '12.34.56.78'
assert producer._config.transport_type == TransportType.AmqpOverWebsocket
assert producer._config.connection_port == 443
producer = EventHubProducerClient(
"fake.host.com",
"fake_eh",
None,
custom_endpoint_address="sb://fake.endpoint.com:443"
)
assert producer._config.custom_endpoint_hostname == 'fake.endpoint.com'
assert producer._config.transport_type == TransportType.AmqpOverWebsocket
assert producer._config.connection_port == 443
producer = EventHubProducerClient(
"fake.host.com",
"fake_eh",
None,
custom_endpoint_address="https://fake.endpoint.com:200"
)
assert producer._config.custom_endpoint_hostname == 'fake.endpoint.com'
assert producer._config.transport_type == TransportType.AmqpOverWebsocket
assert producer._config.connection_port == 200
producer = EventHubProducerClient(
"fake.host.com",
"fake_eh",
None,
custom_endpoint_address="fake.endpoint.com:200"
)
assert producer._config.custom_endpoint_hostname == 'fake.endpoint.com'
assert producer._config.transport_type == TransportType.AmqpOverWebsocket
assert producer._config.connection_port == 200
consumer = EventHubConsumerClient(
"fake.host.com",
"fake_eh",
"fake_group",
None,
)
assert not consumer._config.custom_endpoint_hostname
assert consumer._config.transport_type == TransportType.Amqp
assert consumer._config.connection_port == 5671
consumer = EventHubConsumerClient(
"fake.host.com",
"fake_eh",
"fake_group",
None,
custom_endpoint_address="https://12.34.56.78/"
)
assert consumer._config.custom_endpoint_hostname == '12.34.56.78'
assert consumer._config.transport_type == TransportType.AmqpOverWebsocket
assert consumer._config.connection_port == 443
consumer = EventHubConsumerClient(
"fake.host.com",
"fake_eh",
"fake_group",
None,
custom_endpoint_address="sb://fake.endpoint.com:443"
)
assert consumer._config.custom_endpoint_hostname == 'fake.endpoint.com'
assert consumer._config.transport_type == TransportType.AmqpOverWebsocket
assert consumer._config.connection_port == 443
consumer = EventHubConsumerClient(
"fake.host.com",
"fake_eh",
"fake_group",
None,
custom_endpoint_address="https://fake.endpoint.com:200"
)
assert consumer._config.custom_endpoint_hostname == 'fake.endpoint.com'
assert consumer._config.transport_type == TransportType.AmqpOverWebsocket
assert consumer._config.connection_port == 200
consumer = EventHubConsumerClient(
"fake.host.com",
"fake_eh",
"fake_group",
None,
custom_endpoint_address="fake.endpoint.com:200"
)
assert consumer._config.custom_endpoint_hostname == 'fake.endpoint.com'
assert consumer._config.transport_type == TransportType.AmqpOverWebsocket
assert consumer._config.connection_port == 200
def test_custom_certificate():
producer = EventHubProducerClient(
"fake.host.com",
"fake_eh",
None,
connection_verify='/usr/bin/local/cert'
)
assert producer._config.connection_verify == '/usr/bin/local/cert'
consumer = EventHubConsumerClient(
"fake.host.com",
"fake_eh",
"fake_group",
None,
connection_verify='D:/local/certfile'
)
assert consumer._config.connection_verify == 'D:/local/certfile'
def test_backoff_fixed_retry():
client = EventHubProducerClient(
'fake.host.com',
'fake_eh',
None,
retry_mode=RetryMode.FIXED
)
backoff = client._config.backoff_factor
start_time = time.time()
client._backoff(retried_times=1, last_exception=Exception('fake'), timeout_time=None)
sleep_time = time.time() - start_time
# exp = 0.8 * (2 ** 1) = 1.6
# time.sleep() in _backoff will take AT LEAST time 'exp' for RetryMode.EXPONENTIAL
# check that fixed is less than 'exp'
assert sleep_time < backoff * (2 ** 1)
|
from ._42 import _42
|
import pytest
import numpydoc.validate
import numpydoc.tests
validate_one = numpydoc.validate.validate
class GoodDocStrings:
"""
Collection of good doc strings.
This class contains a lot of docstrings that should pass the validation
script without any errors.
See Also
--------
AnotherClass : With its description.
Examples
--------
>>> result = 1 + 1
"""
def one_liner(self):
"""Allow one liner docstrings (including quotes)."""
# This should fail, but not because of the position of the quotes
pass
def plot(self, kind, color="blue", **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Parameters
----------
kind : str
Kind of matplotlib plot, e.g.::
'foo'
color : str, default 'blue'
Color name or rgb code.
**kwargs
These parameters will be passed to the matplotlib plotting
function.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def swap(self, arr, i, j, *args, **kwargs):
"""
Swap two indicies on an array.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
Parameters
----------
arr : list
The list having indexes swapped.
i, j : int
The indexes being swapped.
*args, **kwargs
Extraneous parameters are being permitted.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def sample(self):
"""
Generate and return a random number.
The value is sampled from a continuous uniform distribution between
0 and 1.
Returns
-------
float
Random number generated.
- Make sure you set a seed for reproducibility
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def random_letters(self):
"""
Generate and return a sequence of random letters.
The length of the returned string is also random, and is also
returned.
Returns
-------
length : int
Length of the returned string.
letters : str
String of random letters.
.. versionadded:: 0.1
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def sample_values(self):
"""
Generate an infinite sequence of random numbers.
The values are sampled from a continuous uniform distribution between
0 and 1.
Yields
------
float
Random number generated.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def head(self):
"""
Return the first 5 elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Returns
-------
int
Subset of the original series with the 5 first values.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
Examples
--------
>>> 1 + 1
2
"""
return 1
def head1(self, n=5):
"""
Return the first elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Parameters
----------
n : int
Number of values to return.
Returns
-------
int
Subset of the original series with the n first values.
See Also
--------
tail : Return the last n elements of the Series.
Examples
--------
>>> s = 10
>>> s
10
With the `n` parameter, we can change the number of returned rows:
>>> s + 1
11
"""
return 1
def summary_starts_with_number(self, n=5):
"""
2nd rule of summaries should allow this.
3 Starting the summary with a number instead of a capital letter.
Also in parameters, returns, see also...
Parameters
----------
n : int
4 Number of values to return.
Returns
-------
int
5 Subset of the original series with the n first values.
See Also
--------
tail : 6 Return the last n elements of the Series.
Examples
--------
>>> s = 10
>>> s
10
7 With the `n` parameter, we can change the number of returned rows:
>>> s + 1
11
"""
return 1
def contains(self, pat, case=True, na=float('NaN')):
"""
Return whether each value contains `pat`.
In this case, we are illustrating how to use sections, even
if the example is simple enough and does not require them.
Parameters
----------
pat : str
Pattern to check for within each element.
case : bool, default True
Whether check should be done with case sensitivity.
na : object, default np.nan
Fill value for missing data.
See Also
--------
related : Something related.
Examples
--------
>>> s = 25
>>> s
25
**Case sensitivity**
With `case_sensitive` set to `False` we can match `a` with both
`a` and `A`:
>>> s + 1
26
**Missing values**
We can fill missing values in the output using the `na` parameter:
>>> s * 2
50
"""
pass
def mode(self, axis, numeric_only):
"""
Ensure reST directives don't affect checks for leading periods.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
Parameters
----------
axis : str
Sentence ending in period, followed by single directive.
.. versionchanged:: 0.1.2
numeric_only : bool
Sentence ending in period, followed by multiple directives.
.. versionadded:: 0.1.2
.. deprecated:: 0.00.0
A multiline description,
which spans another line.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def good_imports(self):
"""
Ensure import other than numpy and pandas are fine.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
See Also
--------
related : Something related.
Examples
--------
This example does not import pandas or import numpy.
>>> import datetime
>>> datetime.MAXYEAR
9999
"""
pass
def no_returns(self):
"""
Say hello and have no returns.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def empty_returns(self):
"""
Say hello and always return None.
Since this function never returns a value, this
docstring doesn't need a return section.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
def say_hello():
return "Hello World!"
say_hello()
if True:
return
else:
return None
def multiple_variables_on_one_line(self, matrix, a, b, i, j):
"""
Swap two values in a matrix.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
Parameters
----------
matrix : list of list
A double list that represents a matrix.
a, b : int
The indicies of the first value.
i, j : int
The indicies of the second value.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
class BadGenericDocStrings:
"""Everything here has a bad docstring
"""
def func(self):
"""Some function.
With several mistakes in the docstring.
It has a blank like after the signature `def func():`.
The text 'Some function' should go in the line after the
opening quotes of the docstring, not in the same line.
There is a blank line between the docstring and the first line
of code `foo = 1`.
The closing quotes should be in the next line, not in this one."""
foo = 1
bar = 2
return foo + bar
def astype(self, dtype):
"""
Casts Series type.
Verb in third-person of the present simple, should be infinitive.
"""
pass
def astype1(self, dtype):
"""
Method to cast Series type.
Does not start with verb.
"""
pass
def astype2(self, dtype):
"""
Cast Series type
Missing dot at the end.
"""
pass
def astype3(self, dtype):
"""
Cast Series type from its current type to the new type defined in
the parameter dtype.
Summary is too verbose and doesn't fit in a single line.
"""
pass
def two_linebreaks_between_sections(self, foo):
"""
Test linebreaks message GL03.
Note 2 blank lines before parameters section.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def linebreak_at_end_of_docstring(self, foo):
"""
Test linebreaks message GL03.
Note extra blank line at end of docstring.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def plot(self, kind, **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Note the blank line between the parameters title and the first
parameter. Also, note that after the name of the parameter `kind`
and before the colon, a space is missing.
Also, note that the parameter descriptions do not start with a
capital letter, and do not finish with a dot.
Finally, the `**kwargs` parameter is missing.
Parameters
----------
kind: str
kind of matplotlib plot
"""
pass
def unknown_section(self):
"""
This section has an unknown section title.
Unknown Section
---------------
This should raise an error in the validation.
"""
def sections_in_wrong_order(self):
"""
This docstring has the sections in the wrong order.
Parameters
----------
name : str
This section is in the right position.
Examples
--------
>>> print('So far Examples is good, as it goes before Parameters')
So far Examples is good, as it goes before Parameters
See Also
--------
function : This should generate an error, as See Also needs to go
before Examples.
"""
def deprecation_in_wrong_order(self):
"""
This docstring has the deprecation warning in the wrong order.
This is the extended summary. The correct order should be
summary, deprecation warning, extended summary.
.. deprecated:: 1.0
This should generate an error as it needs to go before
extended summary.
"""
def method_wo_docstrings(self):
pass
def directives_without_two_colons(self, first, second):
"""
Ensure reST directives have trailing colons.
Parameters
----------
first : str
Sentence ending in period, followed by single directive w/o colons.
.. versionchanged 0.1.2
second : bool
Sentence ending in period, followed by multiple directives w/o
colons.
.. versionadded 0.1.2
.. deprecated 0.00.0
"""
pass
class BadSummaries:
def no_summary(self):
"""
Returns
-------
int
Always one.
"""
def heading_whitespaces(self):
"""
Summary with heading whitespaces.
Returns
-------
int
Always one.
"""
def wrong_line(self):
"""Quotes are on the wrong line.
Both opening and closing."""
pass
def no_punctuation(self):
"""
Has the right line but forgets punctuation
"""
pass
def no_capitalization(self):
"""
provides a lowercase summary.
"""
pass
def no_infinitive(self):
"""
Started with a verb that is not infinitive.
"""
def multi_line(self):
"""
Extends beyond one line
which is not correct.
"""
def two_paragraph_multi_line(self):
"""
Extends beyond one line
which is not correct.
Extends beyond one line, which in itself is correct but the
previous short summary should still be an issue.
"""
class BadParameters:
"""
Everything here has a problem with its Parameters section.
"""
def no_type(self, value):
"""
Lacks the type.
Parameters
----------
value
A parameter without type.
"""
def type_with_period(self, value):
"""
Has period after type.
Parameters
----------
value : str.
A parameter type should not finish with period.
"""
def no_description(self, value):
"""
Lacks the description.
Parameters
----------
value : str
"""
def missing_params(self, kind, **kwargs):
"""
Lacks kwargs in Parameters.
Parameters
----------
kind : str
Foo bar baz.
"""
def bad_colon_spacing(self, kind):
"""
Has bad spacing in the type line.
Parameters
----------
kind: str
Needs a space after kind.
"""
def no_description_period(self, kind):
"""
Forgets to add a period to the description.
Parameters
----------
kind : str
Doesn't end with a dot
"""
def no_description_period_with_directive(self, kind):
"""
Forgets to add a period, and also includes a directive.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionadded:: 0.00.0
"""
def no_description_period_with_directives(self, kind):
"""
Forgets to add a period, and also includes multiple directives.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionchanged:: 0.00.0
.. deprecated:: 0.00.0
"""
def parameter_capitalization(self, kind):
"""
Forgets to capitalize the description.
Parameters
----------
kind : str
this is not capitalized.
"""
def blank_lines(self, kind):
"""
Adds a blank line after the section header.
Parameters
----------
kind : str
Foo bar baz.
"""
pass
def integer_parameter(self, kind):
"""
Uses integer instead of int.
Parameters
----------
kind : integer
Foo bar baz.
"""
pass
def string_parameter(self, kind):
"""
Uses string instead of str.
Parameters
----------
kind : string
Foo bar baz.
"""
pass
def boolean_parameter(self, kind):
"""
Uses boolean instead of bool.
Parameters
----------
kind : boolean
Foo bar baz.
"""
pass
def list_incorrect_parameter_type(self, kind):
"""
Uses list of boolean instead of list of bool.
Parameters
----------
kind : list of boolean, integer, float or string
Foo bar baz.
"""
pass
def bad_parameter_spacing(self, a, b):
"""
The parameters on the same line have an extra space between them.
Parameters
----------
a, b : int
Foo bar baz.
"""
pass
class BadReturns:
def return_not_documented(self):
"""
Lacks section for Returns
"""
return "Hello world!"
def yield_not_documented(self):
"""
Lacks section for Yields
"""
yield "Hello world!"
def no_type(self):
"""
Returns documented but without type.
Returns
-------
Some value.
"""
return "Hello world!"
def no_description(self):
"""
Provides type but no descrption.
Returns
-------
str
"""
return "Hello world!"
def no_punctuation(self):
"""
Provides type and description but no period.
Returns
-------
str
A nice greeting
"""
return "Hello world!"
def named_single_return(self):
"""
Provides name but returns only one value.
Returns
-------
s : str
A nice greeting.
"""
return "Hello world!"
def no_capitalization(self):
"""
Forgets capitalization in return values description.
Returns
-------
foo : str
The first returned string.
bar : str
the second returned string.
"""
return "Hello", "World!"
def no_period_multi(self):
"""
Forgets period in return values description.
Returns
-------
foo : str
The first returned string
bar : str
The second returned string.
"""
return "Hello", "World!"
class BadSeeAlso:
def no_desc(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail
"""
pass
def desc_no_period(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n
"""
pass
def desc_first_letter_lowercase(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
pass
def prefix_pandas(self):
"""
Have `pandas` prefix in See Also section.
See Also
--------
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
pass
class BadExamples:
def missing_whitespace_around_arithmetic_operator(self):
"""
Examples
--------
>>> 2+5
7
"""
pass
def indentation_is_not_a_multiple_of_four(self):
"""
Examples
--------
>>> if 2 + 5:
... pass
"""
pass
def missing_whitespace_after_comma(self):
"""
Examples
--------
>>> import datetime
>>> value = datetime.date(2019,1,1)
"""
pass
class TestValidator:
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "numpydoc.tests.test_validate"
if klass:
base_path = ".".join([base_path, klass])
if func:
base_path = ".".join([base_path, func])
return base_path
def test_one_liner(self, capsys):
result = validate_one(self._import_path(klass="GoodDocStrings", func='one_liner'))
errors = " ".join(err[1] for err in result["errors"])
assert 'should start in the line immediately after the opening quotes' not in errors
assert 'should be placed in the line after the last text' not in errors
def test_good_class(self, capsys):
errors = validate_one(self._import_path(klass="GoodDocStrings"))["errors"]
assert isinstance(errors, list)
assert not errors
@pytest.mark.parametrize(
"func",
[
"plot",
"swap",
"sample",
"random_letters",
"sample_values",
"head",
"head1",
"summary_starts_with_number",
"contains",
"mode",
"good_imports",
"no_returns",
"empty_returns",
"multiple_variables_on_one_line",
],
)
def test_good_functions(self, capsys, func):
errors = validate_one(self._import_path(klass="GoodDocStrings", func=func))[
"errors"
]
assert isinstance(errors, list)
assert not errors
def test_bad_class(self, capsys):
errors = validate_one(self._import_path(klass="BadGenericDocStrings"))["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"func",
[
"func",
"astype",
"astype1",
"astype2",
"astype3",
"plot",
"directives_without_two_colons",
],
)
def test_bad_generic_functions(self, capsys, func):
errors = validate_one(
self._import_path(klass="BadGenericDocStrings", func=func) # noqa:F821
)["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"klass,func,msgs",
[
# See Also tests
(
"BadGenericDocStrings",
"unknown_section",
('Found unknown section "Unknown Section".',),
),
(
"BadGenericDocStrings",
"sections_in_wrong_order",
(
"Sections are in the wrong order. Correct order is: Parameters, "
"See Also, Examples",
),
),
(
"BadGenericDocStrings",
"deprecation_in_wrong_order",
("Deprecation warning should precede extended summary",),
),
(
"BadGenericDocStrings",
"directives_without_two_colons",
(
"reST directives ['versionchanged', 'versionadded', "
"'deprecated'] must be followed by two colons",
),
),
(
"BadSeeAlso",
"no_desc",
('Missing description for See Also "Series.tail" reference',),
),
(
"BadSeeAlso",
"desc_no_period",
('Missing period at end of description for See Also "Series.iloc"',),
),
(
"BadSeeAlso",
"desc_first_letter_lowercase",
('should be capitalized for See Also "Series.tail"',),
),
# Summary tests
(
"BadSummaries",
"no_summary",
("No summary found",),
),
(
"BadSummaries",
"heading_whitespaces",
("Summary contains heading whitespaces",),
),
(
"BadSummaries",
"wrong_line",
("should start in the line immediately after the opening quotes",
"should be placed in the line after the last text"),
),
("BadSummaries", "no_punctuation", ("Summary does not end with a period",)),
(
"BadSummaries",
"no_capitalization",
("Summary does not start with a capital letter",),
),
(
"BadSummaries",
"no_capitalization",
("Summary must start with infinitive verb",),
),
("BadSummaries", "multi_line", ("Summary should fit in a single line",)),
(
"BadSummaries",
"two_paragraph_multi_line",
("Summary should fit in a single line",),
),
# Parameters tests
(
"BadParameters",
"no_type",
('Parameter "value" has no type',),
),
(
"BadParameters",
"type_with_period",
('Parameter "value" type should not finish with "."',),
),
(
"BadParameters",
"no_description",
('Parameter "value" has no description',),
),
(
"BadParameters",
"missing_params",
("Parameters {'**kwargs'} not documented",),
),
(
"BadParameters",
"bad_colon_spacing",
(
'Parameter "kind" requires a space before the colon '
"separating the parameter name and type",
),
),
(
"BadParameters",
"no_description_period",
('Parameter "kind" description should finish with "."',),
),
(
"BadParameters",
"no_description_period_with_directive",
('Parameter "kind" description should finish with "."',),
),
(
"BadParameters",
"parameter_capitalization",
('Parameter "kind" description should start with a capital letter',),
),
(
"BadParameters",
"integer_parameter",
('Parameter "kind" type should use "int" instead of "integer"',),
),
(
"BadParameters",
"string_parameter",
('Parameter "kind" type should use "str" instead of "string"',),
),
(
"BadParameters",
"boolean_parameter",
('Parameter "kind" type should use "bool" instead of "boolean"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "bool" instead of "boolean"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "int" instead of "integer"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "str" instead of "string"',),
),
(
"BadParameters",
"bad_parameter_spacing",
("Parameters {'b'} not documented", "Unknown parameters {' b'}"),
),
pytest.param(
"BadParameters",
"blank_lines",
("No error yet?",),
marks=pytest.mark.xfail,
),
# Returns tests
("BadReturns", "return_not_documented", ("No Returns section found",)),
("BadReturns", "yield_not_documented", ("No Yields section found",)),
pytest.param("BadReturns", "no_type", ("foo",), marks=pytest.mark.xfail),
("BadReturns", "no_description", ("Return value has no description",)),
(
"BadReturns",
"no_punctuation",
('Return value description should finish with "."',),
),
(
"BadReturns",
"named_single_return",
(
"The first line of the Returns section should contain only the "
"type, unless multiple values are being returned",
),
),
(
"BadReturns",
"no_capitalization",
("Return value description should start with a capital letter",),
),
(
"BadReturns",
"no_period_multi",
('Return value description should finish with "."',),
),
(
"BadGenericDocStrings",
"method_wo_docstrings",
("The object does not have a docstring",),
),
(
"BadGenericDocStrings",
"two_linebreaks_between_sections",
(
"Double line break found; please use only one blank line to "
"separate sections or paragraphs, and do not leave blank lines "
"at the end of docstrings",
),
),
(
"BadGenericDocStrings",
"linebreak_at_end_of_docstring",
(
"Double line break found; please use only one blank line to "
"separate sections or paragraphs, and do not leave blank lines "
"at the end of docstrings",
),
),
],
)
def test_bad_docstrings(self, capsys, klass, func, msgs):
with pytest.warns(None) as w:
result = validate_one(self._import_path(klass=klass, func=func))
if len(w):
assert all('Unknown section' in str(ww.message) for ww in w)
for msg in msgs:
assert msg in " ".join(err[1] for err in result["errors"])
class TestDocstringClass:
@pytest.mark.parametrize("invalid_name", ["unknown_mod", "unknown_mod.MyClass"])
def test_raises_for_invalid_module_name(self, invalid_name):
msg = 'No module can be imported from "{}"'.format(invalid_name)
with pytest.raises(ImportError, match=msg):
numpydoc.validate.Docstring(invalid_name)
@pytest.mark.parametrize(
"invalid_name", ["datetime.BadClassName", "datetime.bad_method_name"]
)
def test_raises_for_invalid_attribute_name(self, invalid_name):
name_components = invalid_name.split(".")
obj_name, invalid_attr_name = name_components[-2], name_components[-1]
msg = "'{}' has no attribute '{}'".format(obj_name, invalid_attr_name)
with pytest.raises(AttributeError, match=msg):
numpydoc.validate.Docstring(invalid_name)
|
from unittest import TestCase
from main import find_missed_num
class MissingNumberTestCase(TestCase):
def test_without_first(self):
numbers = [2, 3]
self.assertEqual(find_missed_num(numbers), 1)
def test_without_last(self):
numbers = [1, 2]
self.assertEqual(find_missed_num(numbers), 3)
def test_without_middle(self):
numbers = [1, 3]
self.assertEqual(find_missed_num(numbers), 2)
def test_without_middle_random_order(self):
numbers = [3, 1, 5, 4]
self.assertEqual(find_missed_num(numbers), 2)
def test_len_2_without_first(self):
numbers = [2]
self.assertEqual(find_missed_num(numbers), 1)
def test_len_2_without_last(self):
numbers = [1]
self.assertEqual(find_missed_num(numbers), 2)
|
#!/usr/bin/python
# Python has class str to represent and deal with string
first_name = "Sanjeev"
last_name = 'Jaiswal'
nick_name = '''Jassi'''
address = """ Mailing Address right?
if so, it's Hyderabad, Madhapur.
Pin: 500081"""
mobile_num = 9618961800
print("First Name:", first_name)
print("First Name: " + first_name) # String Concatenation
print("Multi line address string: " + address)
greetings = 'Howdy'
print("Length of the string is " + str(len(greetings))) # len() for the length of the string
print(greetings + nick_name) ## Howdy Jassi. String Concatenation
pi = 3.14 # text = 'The value of pi is ' + pi ## NO, does not work
text = 'The value of pi is ' + str(pi) ## need specifically to convert number to str type to print
|
import enolib
input = '''
fieldset:
entry = entry value
other = other value
'''.strip()
entry = enolib.parse(input).fieldset().entry('entry')
def test_required_string_value_returns_the_value():
assert entry.required_string_value() == 'entry value'
def test_required_string_value_touches_the_fieldset_itself():
virgin_entry = enolib.parse(input).fieldset().entry('entry')
virgin_entry.required_string_value()
assert hasattr(virgin_entry, '_touched')
def test_required_value_returns_the_processed_value():
assert entry.required_value(lambda value: value.upper()) == 'ENTRY VALUE'
def test_required_value_touches_the_fieldset_itself():
virgin_entry = enolib.parse(input).fieldset().entry('entry')
virgin_entry.required_string_value()
virgin_entry.required_value(lambda value: value.upper())
assert hasattr(virgin_entry, '_touched')
|
"""
CLI for gpip utility.
"""
# AVAILABLE COMMANDS
META_COMMANDS = [
"get",
"install",
"version"
]
from .cli import main
|
'''
@Author danielvallejo237
'''
import json
import numpy as np
import re
import stanza
from spacy_stanza import StanzaLanguage
import unidecode
import random
NLPProc=None
JSONOBJ=None
with open("recipes_full_v2.json") as jsonFile:
jsonObject = json.load(jsonFile)
JSONOBJ=jsonObject
jsonFile.close()
snlp = stanza.Pipeline(lang="es")
NLPProc = StanzaLanguage(snlp)
def lematize(words,lematizer=NLPProc):
lematizadas=[unidecode.unidecode(lematizer(p)[0].lemma_).lower() for p in words]
return lematizadas
def query_score(palabras,text,title):
st=""
for p in palabras:
st=st+p+"|"
st=st[:-1]
rx=re.compile(st)
return 10*len(re.findall(rx,''.join(title.split())))+len(re.findall(rx, ''.join(text.split())))
def query(palabras,Json=JSONOBJ,lematizer=NLPProc):
scores={}
palabras=lematize(palabras,lematizer=lematizer)
for i,j in enumerate(Json):
js=json.loads(Json[j])
scores[j]=query_score(palabras,js['kwd'],js['name'])
sort_orders = sorted(scores.items(), key=lambda x: x[1], reverse=True)
top3=sort_orders[:3]
lst=[]
if top3[0][1] > 0:
lst.append(top3[0][0])
if top3[1][1] > 0:
lst.append(top3[1][0])
if top3[2][1] > 0:
lst.append(top3[2][0])
return lst
def processTweet(tweet,Json=JSONOBJ,lematizer=NLPProc):
KWD=re.findall('#[\w]+',tweet)
KWD=[k[1:] for k in KWD]
return query(KWD,Json,lematizer)
def Interact(l1,l2,l3,Json=JSONOBJ):
L=l1+l2+l3
L=list(set(L))
random.shuffle(L)
L=L[:min(3,len(L))]
result=""
if len(L)>0:
for l in L:
js=json.loads(Json[l])
result=result+' Nombre: '+js['name']+' No.Ingredientes: '+str(len(js['ing']))+" Link: "+js['source']+' \n'
return result
if __name__=="__main__":
pass
|
from telethon import TelegramClient
import time
from telethon import sync, events
from json2table import convert
import requests
import json
import array
from datetime import datetime
import re
import io
api_id = 54245
api_hash = "452452452"
session = "NameOfSession"
client = TelegramClient(session, api_id, api_hash)
client.start()
def main():
# while True:
# print("Scan...")
# now = datetime.now()
# print("start at: ", now)
check_ip()
time.sleep(3)
# print("timer dosent not start")
# 'x-apikey': 'dfghfghdfg',
def check_ip():
headers = {
'x-apikey': 'hfghdgfh',
}
lastmsg = client.get_messages('shodanresponse_bot', 300)
ip_addr_for_vtotal = []
for i in lastmsg:
ip_addr_list = re.findall("\d+.\d+.\d+.\d+", i.text)
time.sleep(0.1)
ip_addr_for_vtotal.append(ip_addr_list[0])
for now in ip_addr_for_vtotal:
with open("scannedips200.txt") as f:
# now = ip_addr_for_vtotal
if now in f.read():
print(f"IP arleady in file.({now})")
# return None
pass
else:
with open("scannedips200.txt", "a") as ad:
ad.write(now + '\n')
ip = now
# delay
time.sleep(2)
r = requests.get('https://www.virustotal.com/api/v3/search?query=' + ip, headers=headers) # curl get запрос
json_data = r.json() # делает вместо кода респонса,жсон ответ
result_json = json.dumps(json_data, indent=5) # делает норм вид жсона, чисто для вида
tmp_arr = []
root = json_data['data'][0]['attributes']
# root['last_analysis_stats'][i] in for == harmless, malicious ...
if root['last_analysis_stats']['malicious'] != 0:
print("malicious:" + str(root['last_analysis_stats']['malicious']))
# delay
time.sleep(2)
prntres(json_data, ip)
if root['last_analysis_stats']['suspicious'] != 0:
# delay
time.sleep(2)
print("suspicious:" + str(root['last_analysis_stats']['suspicious']))
prntres(json_data, ip)
else:
pass
def prntres(json_d, ip):
tmp_arr = []
repoort = []
root = json_d['data'][0]['attributes']
#PREPARING REPORT (HARMLESS ETC.)
for i in root['last_analysis_stats']:
if root['last_analysis_stats'][i] != 0:
tmp = i + ":" + str(root['last_analysis_stats'][i])
tmp_arr.append(tmp)
#ПАРСИНГ ОТВЕТА ОТ ВИРУСТОТАЛА, ВЫДЕРГЫВАЕТСЯ НЕ КЛИЗ АНТИВИРУСЫ
for i in root['last_analysis_results']:
if str(root['last_analysis_results'][i]['result']) == "clean":
print("Undetected!")
else:
print("Detected!!")
Engine_name = "**" + root['last_analysis_results'][i]["engine_name"] + "**" + "\n"
repoort.append(Engine_name)
Result = "Result: " + root['last_analysis_results'][i]['result'] + "\n \n"
repoort.append(Result)
#FORMATING AV DETAILS TO STR FOR SENDING TO TELEGRAM
str1 = ""
arr = tmp_arr
for i in repoort:
str1 += i
#PREPARING
try:
report1 = arr[0] + '\n' + arr[1] + '\n' + arr[2]
except IndexError:
print("first eRrOR")
try:
report1 = arr[0] + '\n' + arr[1]
except IndexError:
print("second eRrOR")
report1 = arr[0]
# DATA
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
# SEND FULL NUDES
send_IP = "**" + ip + "**" #BOLD TEXT IP
client.send_message("https://t.me/virtot", "IP" + ip + "\n \n " + report1 + "\n " + "AV Details: \n" + "\n" + str1)
# client.send_message("https://t.me/virtot", dt_string + "\n \n" +"IP address: \n"+ send_IP + ": \n\n" +"Result: \n" + report1 + "\n")
print("Scan done! Check Public")
"""#########################################################################################################"""
"""#############################################START#######################################################"""
"""#########################################################################################################"""
main()
|
"""Support for Somfy Covers."""
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
DEVICE_CLASS_BLIND,
DEVICE_CLASS_SHUTTER,
CoverEntity,
)
from homeassistant.const import STATE_CLOSED, STATE_OPEN
from homeassistant.helpers import config_validation as cv, entity_platform, service
from homeassistant.helpers.restore_state import RestoreEntity
from pymfy.api.devices.blind import Blind
from pymfy.api.devices.category import Category
import voluptuous as vol
from voluptuous.validators import All, Range
from . import SomfyEntity
from .const import (
API,
CONF_OPTIMISTIC,
COORDINATOR,
DOMAIN,
SERVICE_CLOSE_COVER_SLOWLY,
SERVICE_OPEN_COVER_SLOWLY,
SERVICE_SET_COVER_POSITION_SLOWLY,
)
BLIND_DEVICE_CATEGORIES = {Category.INTERIOR_BLIND.value, Category.EXTERIOR_BLIND.value}
SHUTTER_DEVICE_CATEGORIES = {Category.EXTERIOR_BLIND.value}
SUPPORTED_CATEGORIES = {
Category.ROLLER_SHUTTER.value,
Category.INTERIOR_BLIND.value,
Category.EXTERIOR_BLIND.value,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Somfy cover platform."""
domain_data = hass.data[DOMAIN]
coordinator = domain_data[COORDINATOR]
api = domain_data[API]
somfy_covers = [
SomfyCover(coordinator, device_id, api, domain_data[CONF_OPTIMISTIC])
for device_id, device in coordinator.data.items()
if SUPPORTED_CATEGORIES & set(device.categories)
]
async_add_entities(somfy_covers)
if any(
somfy_cover.has_capability("position_low_speed") for somfy_cover in somfy_covers
):
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_CLOSE_COVER_SLOWLY,
None,
"close_cover_slowly",
)
platform.async_register_entity_service(
SERVICE_OPEN_COVER_SLOWLY,
None,
"open_cover_slowly",
)
platform.async_register_entity_service(
SERVICE_SET_COVER_POSITION_SLOWLY,
{vol.Required("position"): All(int, Range(min=1, max=100))},
"set_cover_position_slowly",
)
class SomfyCover(SomfyEntity, RestoreEntity, CoverEntity):
"""Representation of a Somfy cover device."""
def __init__(self, coordinator, device_id, api, optimistic):
"""Initialize the Somfy device."""
super().__init__(coordinator, device_id, api)
self.categories = set(self.device.categories)
self.optimistic = optimistic
self._closed = None
self._is_opening = None
self._is_closing = None
self.cover = None
self._create_device()
def _create_device(self):
"""Update the device with the latest data."""
self.cover = Blind(self.device, self.api)
async def async_close_cover(self, **kwargs):
"""Close the cover."""
self._is_closing = True
self.async_write_ha_state()
try:
# Blocks until the close command is sent
await self.hass.async_add_executor_job(self.cover.close)
self._closed = True
finally:
self._is_closing = None
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
self._is_opening = True
self.async_write_ha_state()
try:
# Blocks until the open command is sent
await self.hass.async_add_executor_job(self.cover.open)
self._closed = False
finally:
self._is_opening = None
self.async_write_ha_state()
def open_cover_slowly(self):
"""Open slowly the cover."""
self.cover.set_position(0, low_speed=True)
def close_cover_slowly(self):
"""Close slowy the cover."""
self.cover.set_position(100, low_speed=True)
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.cover.stop()
def set_cover_position(self, **kwargs):
"""Move the cover shutter to a specific position."""
self.cover.set_position(100 - kwargs[ATTR_POSITION])
def set_cover_position_slowly(self, position: int):
"""Move the cover shutter to a specific position slowly."""
self.cover.set_position(100 - position, low_speed=True)
@property
def device_class(self):
"""Return the device class."""
if self.categories & BLIND_DEVICE_CATEGORIES:
return DEVICE_CLASS_BLIND
if self.categories & SHUTTER_DEVICE_CATEGORIES:
return DEVICE_CLASS_SHUTTER
return None
@property
def current_cover_position(self):
"""Return the current position of cover shutter."""
position = None
if self.has_capability("position"):
position = 100 - self.cover.get_position()
return position
@property
def is_opening(self):
"""Return if the cover is opening."""
if not self.optimistic:
return None
return self._is_opening
@property
def is_closing(self):
"""Return if the cover is closing."""
if not self.optimistic:
return None
return self._is_closing
@property
def is_closed(self):
"""Return if the cover is closed."""
is_closed = None
if self.has_capability("position"):
is_closed = self.cover.is_closed()
elif self.optimistic:
is_closed = self._closed
return is_closed
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
orientation = None
if self.has_capability("rotation"):
orientation = 100 - self.cover.orientation
return orientation
def set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
self.cover.orientation = 100 - kwargs[ATTR_TILT_POSITION]
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
self.cover.orientation = 0
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
self.cover.orientation = 100
def stop_cover_tilt(self, **kwargs):
"""Stop the cover."""
self.cover.stop()
async def async_added_to_hass(self):
"""Complete the initialization."""
await super().async_added_to_hass()
if not self.optimistic:
return
# Restore the last state if we use optimistic
last_state = await self.async_get_last_state()
if last_state is not None and last_state.state in (
STATE_OPEN,
STATE_CLOSED,
):
self._closed = last_state.state == STATE_CLOSED
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This class manages the state and some services related to the TAC for an agent."""
import datetime
import random
from typing import Any, List, Optional, Set, Tuple, Dict, Union, Sequence, cast
from aea.channels.oef.connection import MailStats
from aea.mail.base import Address
from aea.protocols.oef.models import Description, Query
from tac.agents.participant.v1.base.dialogues import Dialogues, Dialogue
from tac.agents.participant.v1.base.helpers import (
build_dict,
build_query,
get_goods_quantities_description,
)
from tac.agents.participant.v1.base.states import AgentState, WorldState
from tac.agents.participant.v1.base.stats_manager import StatsManager
from tac.agents.participant.v1.base.strategy import Strategy
from tac.agents.participant.v1.base.transaction_manager import TransactionManager
from tac.gui.dashboards.agent import AgentDashboard
from tac.platform.game.base import GamePhase, GameConfiguration
from tac.platform.game.base import GameData, Transaction
from tac.platform.protocols.tac.message import TACMessage
class Search:
"""This class deals with the search state."""
def __init__(self):
"""Instantiate the search class."""
self._id = 0
self.ids_for_tac = set() # type: Set[int]
self.ids_for_sellers = set() # type: Set[int]
self.ids_for_buyers = set() # type: Set[int]
@property
def id(self) -> int:
"""Get the search id."""
return self._id
def get_next_id(self) -> int:
"""
Generate the next search id and stores it.
:return: a search id
"""
self._id += 1
return self._id
class GameInstance:
"""The GameInstance maintains state of the game from the agent's perspective."""
def __init__(
self,
agent_name: str,
strategy: Strategy,
mail_stats: MailStats,
expected_version_id: str,
services_interval: int = 10,
pending_transaction_timeout: int = 10,
dashboard: Optional[AgentDashboard] = None,
) -> None:
"""
Instantiate a game instance.
:param agent_name: the name of the agent.
:param strategy: the strategy of the agent.
:param mail_stats: the mail stats of the mailbox.
:param expected_version_id: the expected version of the TAC.
:param services_interval: the interval at which services are updated.
:param pending_transaction_timeout: the timeout after which transactions are removed from the lock manager.
:param dashboard: the agent dashboard.
:return: None
"""
self.agent_name = agent_name
self.controller_pbk = None # type: Optional[str]
self._strategy = strategy
self._search = Search()
self._dialogues = Dialogues()
self._game_phase = GamePhase.PRE_GAME
self._expected_version_id = expected_version_id
self._game_configuration = None # type: Optional[GameConfiguration]
self._initial_agent_state = None # type: Optional[AgentState]
self._agent_state = None # type: Optional[AgentState]
self._world_state = None # type: Optional[WorldState]
self._services_interval = datetime.timedelta(0, services_interval)
self._last_update_time = datetime.datetime.now() - self._services_interval
self._last_search_time = datetime.datetime.now() - datetime.timedelta(
0, round(services_interval / 2.0)
)
self.goods_supplied_description = None
self.goods_demanded_description = None
self.transaction_manager = TransactionManager(
agent_name, pending_transaction_timeout=pending_transaction_timeout
)
self.stats_manager = StatsManager(mail_stats, dashboard)
self.dashboard = dashboard
if self.dashboard is not None:
self.dashboard.start()
self.stats_manager.start()
def init(self, game_data: GameData, agent_pbk: Address) -> None:
"""
Populate data structures with the game data.
:param game_data: the game instance data
:param agent_pbk: the public key of the agent
:return: None
"""
# TODO: extend TAC messages to include reference to version id; then replace below with assert
game_data.version_id = self.expected_version_id
self._game_configuration = GameConfiguration(
game_data.version_id,
game_data.nb_agents,
game_data.nb_goods,
game_data.tx_fee,
game_data.agent_pbk_to_name,
game_data.good_pbk_to_name,
)
self._initial_agent_state = AgentState(
game_data.money, game_data.endowment, game_data.utility_params
)
self._agent_state = AgentState(
game_data.money, game_data.endowment, game_data.utility_params
)
if self.strategy.is_world_modeling:
opponent_pbks = self.game_configuration.agent_pbks
opponent_pbks.remove(agent_pbk)
self._world_state = WorldState(
opponent_pbks,
self.game_configuration.good_pbks,
self.initial_agent_state,
)
def on_state_update(self, message: TACMessage, agent_pbk: Address) -> None:
"""
Update the game instance with a State Update from the controller.
:param state_update: the state update
:param agent_pbk: the public key of the agent
:return: None
"""
self.init(message.get("initial_state"), agent_pbk)
self._game_phase = GamePhase.GAME
for tx in message.get("transactions"):
self.agent_state.update(tx, message.get("initial_state").get("tx_fee"))
@property
def expected_version_id(self) -> str:
"""Get the expected version id of the TAC."""
return self._expected_version_id
@property
def strategy(self) -> Strategy:
"""Get the strategy."""
return self._strategy
@property
def search(self) -> Search:
"""Get the search."""
return self._search
@property
def dialogues(self) -> Dialogues:
"""Get the dialogues."""
return self._dialogues
@property
def game_phase(self) -> GamePhase:
"""Get the game phase."""
return self._game_phase
@property
def game_configuration(self) -> GameConfiguration:
"""Get the game configuration."""
assert self._game_configuration is not None, "Game configuration not assigned!"
return self._game_configuration
@property
def initial_agent_state(self) -> AgentState:
"""Get the initial agent state."""
assert (
self._initial_agent_state is not None
), "Initial agent state not assigned!"
return self._initial_agent_state
@property
def agent_state(self) -> AgentState:
"""Get the agent state."""
assert self._agent_state is not None, "Agent state not assigned!"
return self._agent_state
@property
def world_state(self) -> WorldState:
"""Get the world state."""
assert self._world_state is not None, "World state not assigned!"
return self._world_state
@property
def services_interval(self) -> datetime.timedelta:
"""Get the services interval."""
return self._services_interval
@property
def last_update_time(self) -> datetime.datetime:
"""Get the last services update time."""
return self._last_update_time
@property
def last_search_time(self) -> datetime.datetime:
"""Get the last services search time."""
return self._last_search_time
def is_time_to_update_services(self) -> bool:
"""
Check if the agent should update the service directory.
:return: bool indicating the action
"""
now = datetime.datetime.now()
result = now - self.last_update_time > self.services_interval
if result:
self._last_update_time = now
return result
def is_time_to_search_services(self) -> bool:
"""
Check if the agent should search the service directory.
:return: bool indicating the action
"""
now = datetime.datetime.now()
result = now - self.last_search_time > self.services_interval
if result:
self._last_search_time = now
return result
def is_profitable_transaction(
self, transaction: Transaction, dialogue: Dialogue
) -> Tuple[bool, str]:
"""
Check if a transaction is profitable.
Is it a profitable transaction?
- apply all the locks for role.
- check if the transaction is consistent with the locks (enough money/holdings)
- check that we gain score.
:param transaction: the transaction
:param dialogue: the dialogue
:return: True if the transaction is good (as stated above), False otherwise.
"""
state_after_locks = self.state_after_locks(dialogue.is_seller)
if not state_after_locks.check_transaction_is_consistent(
transaction, self.game_configuration.tx_fee
):
message = "[{}]: the proposed transaction is not consistent with the state after locks.".format(
self.agent_name
)
return False, message
proposal_delta_score = state_after_locks.get_score_diff_from_transaction(
transaction, self.game_configuration.tx_fee
)
result = self.strategy.is_acceptable_proposal(proposal_delta_score)
message = "[{}]: is good proposal for {}? {}: tx_id={}, delta_score={}, amount={}".format(
self.agent_name,
dialogue.role,
result,
transaction.transaction_id,
proposal_delta_score,
transaction.amount,
)
return result, message
def get_service_description(self, is_supply: bool) -> Description:
"""
Get the description of the supplied goods (as a seller), or the demanded goods (as a buyer).
:param is_supply: Boolean indicating whether it is supply or demand.
:return: the description (to advertise on the Service Directory).
"""
desc = get_goods_quantities_description(
self.game_configuration.good_pbks,
self.get_goods_quantities(is_supply),
is_supply=is_supply,
)
return desc
def build_services_query(self, is_searching_for_sellers: bool) -> Optional[Query]:
"""
Build a query to search for services.
In particular, build the query to look for agents
- which supply the agent's demanded goods (i.e. sellers), or
- which demand the agent's supplied goods (i.e. buyers).
:param is_searching_for_sellers: Boolean indicating whether the search is for sellers or buyers.
:return: the Query, or None.
"""
good_pbks = self.get_goods_pbks(is_supply=not is_searching_for_sellers)
res = (
None
if len(good_pbks) == 0
else build_query(good_pbks, is_searching_for_sellers)
)
return res
def build_services_dict(
self, is_supply: bool
) -> Optional[Dict[str, Sequence[str]]]:
"""
Build a dictionary containing the services demanded/supplied.
:param is_supply: Boolean indicating whether the services are demanded or supplied.
:return: a Dict.
"""
good_pbks = self.get_goods_pbks(is_supply=is_supply)
res = None if len(good_pbks) == 0 else build_dict(good_pbks, is_supply)
return res
def is_matching(
self,
cfp_services: Dict[str, Union[bool, List[Any]]],
goods_description: Description,
) -> bool:
"""
Check for a match between the CFP services and the goods description.
:param cfp_services: the services associated with the cfp.
:param goods_description: a description of the goods.
:return: Bool
"""
services = cfp_services["services"]
services = cast(List[Any], services)
if cfp_services["description"] is goods_description.data_model.name:
# The call for proposal description and the goods model name cannot be the same for trading agent pairs.
return False
for good_pbk in goods_description.data_model.attributes_by_name.keys():
if good_pbk not in services:
continue
return True
return False
def get_goods_pbks(self, is_supply: bool) -> Set[str]:
"""
Wrap the function which determines supplied and demanded good public keys.
:param is_supply: Boolean indicating whether it is referencing the supplied or demanded public keys.
:return: a list of good public keys
"""
state_after_locks = self.state_after_locks(is_seller=is_supply)
good_pbks = (
self.strategy.supplied_good_pbks(
self.game_configuration.good_pbks, state_after_locks.current_holdings
)
if is_supply
else self.strategy.demanded_good_pbks(
self.game_configuration.good_pbks, state_after_locks.current_holdings
)
)
return good_pbks
def get_goods_quantities(self, is_supply: bool) -> List[int]:
"""
Wrap the function which determines supplied and demanded good quantities.
:param is_supply: Boolean indicating whether it is referencing the supplied or demanded quantities.
:return: the vector of good quantities offered/requested.
"""
state_after_locks = self.state_after_locks(is_seller=is_supply)
quantities = (
self.strategy.supplied_good_quantities(state_after_locks.current_holdings)
if is_supply
else self.strategy.demanded_good_quantities(
state_after_locks.current_holdings
)
)
return quantities
def state_after_locks(self, is_seller: bool) -> AgentState:
"""
Apply all the locks to the current state of the agent.
This assumes, that all the locked transactions will be successful.
:param is_seller: Boolean indicating the role of the agent.
:return: the agent state with the locks applied to current state
"""
assert self._agent_state is not None, "Agent state not assigned!"
transactions = (
list(self.transaction_manager.locked_txs_as_seller.values())
if is_seller
else list(self.transaction_manager.locked_txs_as_buyer.values())
)
state_after_locks = self._agent_state.apply(
transactions, self.game_configuration.tx_fee
)
return state_after_locks
def generate_proposal(
self, cfp_services: Dict[str, Union[bool, List[Any]]], is_seller: bool
) -> Optional[Description]:
"""
Wrap the function which generates proposals from a seller or buyer.
If there are locks as seller, it applies them.
:param cfp_services: the query associated with the cfp.
:param is_seller: Boolean indicating the role of the agent.
:return: a list of descriptions
"""
state_after_locks = self.state_after_locks(is_seller=is_seller)
candidate_proposals = self.strategy.get_proposals(
self.game_configuration.good_pbks,
state_after_locks.current_holdings,
state_after_locks.utility_params,
self.game_configuration.tx_fee,
is_seller,
self._world_state,
)
proposals = []
for proposal in candidate_proposals:
if not self.is_matching(cfp_services, proposal):
continue
if not proposal.values["price"] > 0:
continue
proposals.append(proposal)
if not proposals:
return None
else:
return random.choice(proposals)
def stop(self):
"""Stop the services attached to the game instance."""
self.stats_manager.stop()
|
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.const import *
CONF_UPDATE_INSTANT = "update_instant"
CONF_MAPPING = 'mapping'
CONF_CONTROL_PARAMS = 'params'
CONF_CLOUD = 'update_from_cloud'
CONF_MODEL = 'model'
CONF_SENSOR_PROPERTY = "sensor_property"
CONF_SENSOR_UNIT = "sensor_unit"
CONF_DEFAULT_PROPERTIES = "default_properties"
ATTR_STATE_VALUE = "state_value"
ATTR_MODEL = "model"
ATTR_FIRMWARE_VERSION = "firmware_version"
ATTR_HARDWARE_VERSION = "hardware_version"
DOMAIN = 'xiaomi_miot_raw'
SUPPORTED_DOMAINS = [
"sensor",
"switch",
"light",
"fan",
"cover",
"humidifier",
"media_player",
"climate",
"lock",
"water_heater",
]
DEFAULT_NAME = "Xiaomi MIoT Device"
DUMMY_IP = "255.255.255.255"
DUMMY_TOKEN = "00000000000000000000000000000000"
SCHEMA = {
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UPDATE_INSTANT, default=True): cv.boolean,
vol.Optional(CONF_CLOUD): vol.All(),
vol.Optional('cloud_write'):vol.All(),
vol.Required(CONF_MAPPING):vol.All(),
vol.Required(CONF_CONTROL_PARAMS):vol.All(),
vol.Optional(CONF_SENSOR_PROPERTY): cv.string,
vol.Optional(CONF_SENSOR_UNIT): cv.string,
}
MAP = {
"sensor": {
"air_monitor",
"water_purifier",
"cooker",
"pressure_cooker",
"induction_cooker",
"power_consumption",
"electricity",
"environment",
"filter",
"filter_2",
"filter_3",
"filter_4",
"temperature_humidity_sensor",
"magnet_sensor",
"motion_sensor",
"submersion_sensor",
"tds_sensor",
"air_fryer",
"remain_clean_time",
},
"switch": {
"switch",
"outlet",
"switch_2",
"switch_3",
"switch_4",
"coffee_machine",
},
"light": {
"light",
"light_2",
"light_3",
"light_4",
"indicator_light",
},
"fan": {
"a_l",
"fan",
"ceiling_fan",
"air_fresh",
"air_purifier",
"washer",
"hood",
"fan_control",
"dryer",
"toilet",
"settings",
"settings_2",
"air_fresh_heater",
"bed",
"pet_drinking_fountain",
},
"cover": {
"curtain",
"airer",
},
"humidifier": {
"humidifier",
"dehumidifier",
},
"media_player": {
"media_player",
"speaker",
"play_control",
},
"climate": {
"air_conditioner",
"heater",
},
"lock": {
"physical_controls_locked",
},
"water_heater": {
"water_heater",
"kettle",
"dishwasher",
},
}
UNIT_MAPPING = {
"percentage" : PERCENTAGE , # 百分比
"celsius" : TEMP_CELSIUS , # 摄氏度
"seconds" : "秒" , # 秒
"minutes" : "分钟" , # 分
"hours" : "小时" , # 小时
"days" : "天" , # 天
"kelvin" : TEMP_KELVIN , # 开氏温标
"pascal" : "Pa" , # 帕斯卡(大气压强单位)
"arcdegrees" : "rad" , # 弧度(角度单位)
"rgb" : "RGB" , # RGB(颜色)
"watt" : POWER_WATT , # 瓦特(功率)
"litre" : VOLUME_LITERS , # 升
"ppm" : CONCENTRATION_PARTS_PER_MILLION , # ppm浓度
"lux" : LIGHT_LUX , # 勒克斯(照度)
"mg/m3" : CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER , # 毫克每立方米
}
|
"""
This module provides an API for tracking
*data lineage* -- the history of how a given result was created, including the
versions of original source data and the various steps run in the *data pipeline*
to produce the final result.
The basic idea is that your workflow is a sequence of *pipeline steps*::
---------- ---------- ---------- ----------
| | | | | | | |
| Step 1 |---->| Step 2 |---->| Step 3 |---->| Step 4 |
| | | | | | | |
---------- ---------- ---------- ----------
A step could be a command line script, a Jupyter notebook or perhaps
a step in an automated workflow tool (e.g. Apache Airflow).
Each step takes a number of *inputs* and *parameters* and generates *outputs*.
The inputs are resources in your workspace (or subpaths within a resource) from
which the step will read to perform its task. The parameters are configuration
values passed to the step (e.g. the command line arguments of a script). The outputs
are the resources (or subpaths within a resource), which are written to by
the step. The outputs may represent results or intermediate data to be consumed
by downstream steps.
The lineage API captures this data for each step. Here is a view of the data captured::
Parameters
|| || ||
\\/ \\/ \\/
------------
=>| |=>
Input resources =>| Step i |=> Output resources
=>| |=>
------------
/\\
||
Code
Dependencies
To do this, we need use the following classes:
* :class:`~ResourceRef` - A reference to a resource for use as a step input or output.
A ResourceRef contains a resource name and an optional path within that resource.
This lets you manage lineage down to the directory or even file level. The APIs also
support specifying a path on the local filesystem instead of a ResourceRef. This path
is automatically resolved to a ResourceRef (it must map to the a location under the
local path of a resource). By storing :class:`~ResourceRef`s instead of hard-coded
filesystem paths, we can include non-local resources (like an S3 bucket) and ensure
that the workspace is easily deployed on a new machine.
* :class:`~Lineage` - The main lineage object, instantiated at the start of your step.
At the beginning of your step, you specify the inputs, parameters, and outputs. At the
end of the step, the data is saved, along with any results you might have from that step.
Lineage instances are
`context managers <https://docs.python.org/3/reference/datamodel.html#context-managers>`_,
which means you can use a ``with`` statement to manage their lifecycle.
* :class:`~LineageBuilder` - This is a helper class to guide the creation of your lineage
object.
**Example**
Here is an example usage of the lineage API in a command line script::
import argparse
from dataworkspaces.lineage import LineageBuilder
def main():
...
parser = argparse.ArgumentParser()
parser.add_argument('--gamma', type=float, default=0.01,
help="Regularization parameter")
parser.add_argument('input_data_dir', metavar='INPUT_DATA_DIR', type=str,
help='Path to input data')
parser.add_argument('results_dir', metavar='RESULTS_DIR', type=str,
help='Path to where results should be stored')
args = parser.parse_args()
...
# Create a LineageBuilder instance to specify the details of the step
# to the lineage API.
builder = LineageBuilder()\\
.as_script_step()\\
.with_parameters({'gamma':args.gamma})\\
.with_input_path(args.input_data_dir)\\
.as_results_step(args.results_dir)
# builder.eval() will construct the lineage object. We call it within a
# with statement to get automatic save/cleanup when we leave the
# with block.
with builder.eval() as lineage:
... do your work here ...
# all done, write the results
lineage.write_results({'accuracy':accuracy,
'precision':precision,
'recall':recall,
'roc_auc':roc_auc})
# When leaving the with block, the lineage is automatically saved to the
# workspace. If an exception is thrown, the lineage is not saved, but the
# outputs are marked as being in an unknown state.
return 0
# boilerplate to call our main function if this is called as a script.
if __name__ == '__main__:
sys.exit(main())
"""
import sys
from abc import ABC, abstractmethod
import contextlib
from collections import OrderedDict
import datetime
from typing import List, Union, Any, Type, Iterable, Dict, Optional, cast
from os.path import curdir, join, isabs, abspath, expanduser, exists, basename
from argparse import ArgumentParser, Namespace
from copy import copy
from dataworkspaces.errors import ConfigurationError
from dataworkspaces.workspace import (
Workspace,
load_workspace,
FileResourceMixin,
PathNotAResourceError,
SnapshotWorkspaceMixin,
ResourceRoles,
_find_containing_workspace,
)
from dataworkspaces.utils.lineage_utils import (
ResourceRef,
StepLineage,
infer_step_name,
infer_script_path,
LineageError,
)
##########################################################################
# Main lineage API
##########################################################################
class Lineage(contextlib.AbstractContextManager):
"""This is the main object for tracking the execution of a step.
Rather than instantiating it directly, use the :class:`~LineageBuilder`
class to construct your :class:`~Lineage` instance.
"""
def __init__(
self,
step_name: str,
start_time: datetime.datetime,
parameters: Dict[str, Any],
inputs: List[Union[str, ResourceRef]],
code: List[Union[str, ResourceRef]],
workspace: Workspace,
command_line: Optional[List[str]] = None,
current_directory: Optional[str] = None,
):
self.workspace = workspace # type: Workspace
self.instance = workspace.get_instance()
# if not isinstance(workspace, SnapshotWorkspaceMixin) or not workspace.supports_lineage():
# raise ConfigurationError("Backend for workspace %s does not support lineage" % workspace.name)
self.store = cast(SnapshotWorkspaceMixin, workspace).get_lineage_store()
input_resource_refs = [] # type: List[ResourceRef]
for r_or_p in inputs:
if isinstance(r_or_p, ResourceRef):
workspace.validate_resource_name(r_or_p.name, r_or_p.subpath)
input_resource_refs.append(r_or_p)
else:
ref = workspace.map_local_path_to_resource(r_or_p)
input_resource_refs.append(ref)
code_resource_refs = [] # type: List[ResourceRef]
for r_or_p in code:
if isinstance(r_or_p, ResourceRef):
self.workspace.validate_resource_name(
r_or_p.name, r_or_p.subpath, expected_role=ResourceRoles.CODE
)
code_resource_refs.append(r_or_p)
else:
ref = workspace.map_local_path_to_resource(r_or_p, expecting_a_code_resource=True)
# For now, we will resolve code paths at the resource level.
# We drop the subpath, unless the user provided it explicitly
# through a ResourceRef.
crr = ResourceRef(ref.name, None)
if crr not in code_resource_refs:
code_resource_refs.append(crr)
# The run_from_directory can be either a resource reference (best),
# a path on the local filesystem, or None
try:
if current_directory is not None:
if not isabs(current_directory):
current_directory = abspath(expanduser((current_directory)))
run_from_directory = workspace.map_local_path_to_resource(
current_directory
) # type: Optional[ResourceRef]
else:
run_from_directory = None
except PathNotAResourceError:
run_from_directory = None
self.step = StepLineage.make_step_lineage(
workspace.get_instance(),
step_name,
start_time,
parameters,
input_resource_refs,
code_resource_refs,
self.store,
command_line=command_line,
run_from_directory=run_from_directory,
)
self.in_progress = True
def add_input_path(self, path: str) -> None:
if not exists(path):
raise LineageError("Path %s does not exist" % path)
ref = self.workspace.map_local_path_to_resource(path) # mypy: ignore
self.step.add_input(self.workspace.get_instance(), self.store, ref) # mypy: ignore
def add_input_ref(self, ref: ResourceRef) -> None:
self.step.add_input(self.workspace.get_instance(), self.store, ref)
def add_output_path(self, path: str) -> None:
"""Resolve the path to a resource name and subpath. Add
that to the lineage as an output of the step. From this point on,
if the step fails (:func:`~abort` is called), the associated resource
and subpath will be marked as being in an "unknown" state.
"""
ref = self.workspace.map_local_path_to_resource(path) # mypy: ignore
self.step.add_output(self.workspace.get_instance(), self.store, ref) # mypy: ignore
def add_output_ref(self, ref: ResourceRef):
"""Add the resource reference to the lineage as an output of the step.
From this point on, if the step fails (:func:`~abort` is called), the
associated resource and subpath will be marked as being in an
"unknown" state.
"""
self.step.add_output(self.workspace.get_instance(), self.store, ref)
def add_param(self, name: str, value) -> None:
"""Add or update one of the step's parameters.
"""
assert self.in_progress # should only do while step running
self.step.parameters[name] = value
def abort(self):
"""The step has failed, so we mark its outputs in an unknown state.
If you create the lineage via a "with" statement, then this will be
called for you automatically.
"""
if not self.in_progress:
print(
"WARNING: Lineage.abort() called after complete() or abort() call for %s"
% self.step.step_name,
file=sys.stderr,
)
else:
self.in_progress = False
for output_cert in self.step.output_resources:
self.store.clear_entry(self.instance, output_cert.ref)
def _set_execution_time(self):
"""If the execution time has not already been set, and the start timestamp
was captured, compute and set the exeuction time. This may be called from
two places: :func:`ResultsResource.write_results` and from :func:`complete`,
which is called when exiting the lineage's context manager ("with") block.
Since the user could potentially call both, we only set it the first call.
Both calls should happen after the real work for the step, so that should
be ok.
"""
if self.step.execution_time_seconds is None and self.step.start_time is not None:
self.step.execution_time_seconds = (
datetime.datetime.now() - self.step.start_time
).total_seconds()
def complete(self):
"""The step has completed. Save the outputs.
If you create the lineage via a "with" statement, then this will be
called for you automatically.
"""
if not self.in_progress:
print(
"WARNING: Lineage.complete() called after complete() or abort() call for %s"
% self.step.step_name,
file=sys.stderr,
)
else:
self.in_progress = False
self._set_execution_time()
self.store.store_entry(self.instance, self.step)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.complete()
else:
self.abort()
return False # don't suppress any exception
class ResultsLineage(Lineage):
"""Lineage for a results step. This subclass is returned by the
:class:`~LineageBuilder` when :func:`~LineageBuilder.as_results_step` is called.
This marks the :class:`~Lineage` object as generating results.
It adds the :func:`~write_results`
method for writing a JSON summary of the final results.
Results resources will also have a ``lineage.json`` file added
when the next snapshot is taken. This file contains the full
lineage graph collected for the resource.
"""
def __init__(
self,
step_name: str,
start_time: datetime.datetime,
parameters: Dict[str, Any],
inputs: List[Union[str, ResourceRef]],
code: List[Union[str, ResourceRef]],
results_dir_or_ref: Union[str, ResourceRef],
workspace: Workspace,
run_description: Optional[str] = None,
command_line: Optional[List[str]] = None,
current_directory: Optional[str] = None,
):
super().__init__(
step_name,
start_time,
parameters,
inputs,
code,
workspace,
command_line,
current_directory,
)
if isinstance(results_dir_or_ref, str):
self.results_ref = self.workspace.map_local_path_to_resource(results_dir_or_ref)
else:
self.results_ref = cast(ResourceRef, results_dir_or_ref)
self.results_resource = self.workspace.get_resource(self.results_ref.name)
self.add_output_ref(self.results_ref)
self.run_description = run_description
if not isinstance(self.results_resource, FileResourceMixin):
raise ConfigurationError(
"Resource '%s' does not support a file API and thus won't support writing results."
% self.results_ref.name
)
def write_results(self, metrics: Dict[str, Any]):
"""Write a ``results.json`` file to the results directory
specified when creating the lineage object (e.g. via
:func:`~LineageBuilder.as_results_step`).
This json file contains information
about the step execution (e.g. start time), parameters,
and the provided metrics.
"""
self._set_execution_time()
data = {
"step": self.step.step_name,
"start_time": self.step.start_time.isoformat(),
"execution_time_seconds": self.step.execution_time_seconds,
"parameters": self.step.parameters,
"run_description": self.run_description,
"metrics": metrics,
}
if self.results_ref.subpath is not None:
results_relpath = join(self.results_ref.subpath, "results.json")
else:
results_relpath = "results.json"
cast(FileResourceMixin, self.results_resource).add_results_file(data, results_relpath)
print("Wrote results to %s:%s" % (self.results_ref.name, results_relpath))
class LineageBuilder:
"""Use this class to declaratively build :class:`~Lineage` objects. Instantiate
a LineageBuilder instance, and call a sequence of configuration methods
to specify your inputs, parameters, your workspace (if the script is not
already inside the workspace), and whether this is a results step. Each
configuration method returns the builder, so you can chain them together.
Finally, call :func:`~eval` to instantiate the :class:`~Lineage` object.
**Configuration Methods**
To specify the workflow step's name, call one of:
* :func:`~as_script_step` - the script's name will be used to infer the step
and the associated code resource
* with_step_name - explicitly specify the step name
To specify the parameters of the step (e.g. command line arguments), use the
:func:`~with_parameters` method.
To specify the input of the step call one or more of:
* :func:`~with_input_path` - resolve the local filesystem path to a resource and
subpath and add it to the lineage as inputs. May be called more than once.
* :func:`~with_input_paths` - resolve a list of local filesystem paths to
resources and subpaths and add them to the lineage as inputs. May be called
more than once.
* :func:`~with_input_ref` - add the resource and subpath to the lineage as an input.
May be called more than once.
* :func:`~with_no_inputs` - mutually exclusive with the other input methods. This
signals that there are no inputs to this step.
To specify code resource dependencies for the step, you can call
:func:`~with_code_ref`. For command-line Python scripts, the
main code resource is handled automatically in :func:`~as_script_step`.
Other subclasses of the LineageBuilder may provide similar functionality
(e.g. the LineageBuilder for JupyterNotebooks will try to figure out the resource
containing your notebook and set it in the lineage).
If you need to specify the workspace's root directory, use the
:func:`~with_workspace_directory` method. Otherwise, the lineage API will attempt
to infer the workspace directory by looking at the path of the script.
Call :func:`~as_results_step` to indicate that this step is producing results.
This will add a method :func:`~ResultsLineage.write_results` to the :class:`~Lineage` object
returned by :func:`~eval`. The method :func:`~as_results_step` takes two parameters:
`results_dir` and, optionally, `run_description`. The results directory should
correspond to either the root directory of a results resource or a subdirectory
within the resource. If you have multiple steps of your workflow that produce results,
you can create separate subdirectories for each results-producing step.
**Example**
Here is an example where we build a :class:`~Lineage` object for a script,
that has one input, and that produces results::
lineage = LineageBuilder()\\
.as_script_step()\\
.with_parameters({'gamma':0.001})\\
.with_input_path(args.intermediate_data)\\
.as_results_step('../results').eval()
**Methods**
"""
def __init__(self):
self.step_name = None # type: Optional[str]
self.command_line = None # type: Optional[List[str]]
self.current_directory = None # type: Optional[str]
self.parameters = None # type: Optional[Dict[str, Any]]
self.inputs = None # type: Optional[List[Union[str, ResourceRef]]]
self.no_inputs = False # type: Optional[bool]
self.code = [] # type: List[Union[str, ResourceRef]]
self.workspace_dir = None # type: Optional[str]
self.results_dir = None # type: Optional[str]
self.run_description = None # type: Optional[str]
def as_script_step(self) -> "LineageBuilder":
assert self.step_name is None, "attempting to set step name twice!"
self.step_name = infer_step_name()
self.command_line = [sys.executable] + sys.argv
self.current_directory = curdir
self.code.append(infer_script_path())
return self
def with_step_name(self, step_name: str) -> "LineageBuilder":
assert self.step_name is None, "attempting to set step name twice!"
self.step_name = step_name
return self
def with_parameters(self, parameters: Dict[str, Any]) -> "LineageBuilder":
assert self.parameters is None, "attemping to specify parameters twice"
self.parameters = parameters
return self
def with_input_path(self, path: str) -> "LineageBuilder":
assert self.no_inputs is False, "Cannot specify both inputs and no inputs"
if self.inputs is None:
self.inputs = [path]
else:
self.inputs.append(path)
return self
def with_input_paths(self, paths: List[str]) -> "LineageBuilder":
assert self.no_inputs is False, "Cannot specify both inputs and no inputs"
if self.inputs is None:
self.inputs = cast(Optional[List[Union[str, ResourceRef]]], copy(paths))
else:
self.inputs.extend(paths)
return self
def with_input_ref(self, ref: ResourceRef) -> "LineageBuilder":
assert self.no_inputs is False, "Cannot specify both inputs and no inputs"
if self.inputs is None:
self.inputs = [ref]
else:
self.inputs.append(ref)
return self
def with_no_inputs(self) -> "LineageBuilder":
assert self.inputs is None, "Cannot specify inputs and with_no_inputs()"
self.no_inputs = True
return self
def with_code_path(self, path: str) -> "LineageBuilder":
self.code.append(path)
return self
def with_code_ref(self, ref: ResourceRef) -> "LineageBuilder":
self.code.append(ref)
return self
def with_workspace_directory(self, workspace_dir: str) -> "LineageBuilder":
load_workspace("git:" + workspace_dir, False, False)
self.workspace_dir = workspace_dir # does validation
return self
def as_results_step(
self, results_dir: str, run_description: Optional[str] = None
) -> "LineageBuilder":
assert self.results_dir is None, "attempting to specify results directory twice"
self.results_dir = results_dir
self.run_description = run_description
return self
def eval(self) -> Lineage:
"""Validate the current configuration, making sure all required
properties have been specified, and return a :class:`~Lineage` object
with the requested configuration.
"""
assert self.parameters is not None, "Need to specify parameters"
assert self.no_inputs or (
self.inputs is not None
), "Need to specify either inputs or no inputs"
inputs = self.inputs if self.inputs is not None else [] # type: List[Union[str, Any]]
if self.workspace_dir is None:
self.workspace_dir = _find_containing_workspace()
if self.workspace_dir is None:
raise ConfigurationError("Could not find a workspace, starting at %s" % curdir)
if self.step_name is None and len(self.code) > 0:
# try to infer step name from code
if isinstance(self.code[0], ResourceRef) and self.code[0].subpath is not None:
self.step_name = (
basename(self.code[0].subpath).replace(".py", "").replace(".ipynb", "")
)
elif isinstance(self.code[0], str):
self.step_name = basename(self.code[0]).replace(".py", "").replace(".ipynb", "")
assert self.step_name is not None, (
"Unable to infer the step name. Please specify step name directly via with_step_name() "
+ "or indirectly through with_code_path()."
)
# TODO: need to make this handle other backends as well.
workspace = load_workspace("git:" + self.workspace_dir, False, False)
if self.results_dir is not None:
return ResultsLineage(
self.step_name,
datetime.datetime.now(),
self.parameters,
inputs,
self.code,
self.results_dir,
workspace=workspace,
run_description=self.run_description,
command_line=self.command_line,
current_directory=self.current_directory,
)
else:
return Lineage(
self.step_name,
datetime.datetime.now(),
self.parameters,
inputs,
self.code,
workspace,
self.command_line,
self.current_directory,
)
##########################################################################
# Helper classes for defining program parameters
##########################################################################
class LineageParameter(ABC):
def __init__(self, name: str, default: Any):
self.name = name
self.default = default
@abstractmethod
def get_value(self, parsed_args: Namespace):
pass
@abstractmethod
def add_to_arg_parser(self, arg_parser: ArgumentParser):
pass
class CmdLineParameter(LineageParameter):
def __init__(self, name: str, default: Any, type: Type, help: str):
super().__init__(name, default)
self.type = type
self.help = help
def get_arg_name(self) -> str:
return "--" + self.name.replace("_", "-")
def add_to_arg_parser(self, arg_parser: ArgumentParser):
arg_parser.add_argument(
self.get_arg_name(), type=self.type, default=self.default, help=self.help
)
def get_value(self, parsed_args: Namespace):
return getattr(parsed_args, self.name)
class BooleanParameter(CmdLineParameter):
def __init__(self, name: str, default: bool, help: str):
super().__init__(name, default, bool, help)
if self.default:
self.action = "store_false"
else:
self.action = "store_true"
def get_arg_name(self) -> str:
if self.default:
return "--no-" + self.name.replace("_", "-")
else:
return "--" + self.name.replace("_", "-")
def add_to_arg_parser(self, arg_parser: ArgumentParser):
arg_parser.add_argument(
self.get_arg_name(),
default=self.default,
action=self.action,
help=self.help,
dest=self.name,
)
class ChoiceParameter(CmdLineParameter):
def __init__(self, name: str, choices: Iterable[Any], default: Any, type: Type, help: str):
super().__init__(name, default, type, help)
self.choices = choices
assert default in choices
def add_to_arg_parser(self, arg_parser: ArgumentParser):
arg_parser.add_argument(
self.get_arg_name(),
type=self.type,
default=self.default,
choices=self.choices,
help=self.help,
)
class ConstantParameter(LineageParameter):
def get_value(self, parsed_args: Namespace):
return self.default
def add_lineage_parameters_to_arg_parser(
parser: ArgumentParser, params: Iterable[LineageParameter]
):
for param in params:
param.add_to_arg_parser(parser)
def get_lineage_parameter_values(params: Iterable[LineageParameter], parsed_args: Namespace):
values = OrderedDict() # type: Dict[str,Any]
for param in params:
values[param.name] = param.get_value(parsed_args)
return values
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 19 12:36:25 2022
@author: fatimamh
"""
import os
import pandas as pd
from nltk.tokenize import sent_tokenize
'''----------------------------------------------------------------
'''
def get_folders(root):
folders = list(filter(lambda x: os.path.isdir(os.path.join(root, x)), os.listdir(root)))
#print(folders)
return folders
'''----------------------------------------------------------------
'''
def get_pairfolder(f):
parts = f.split('_')
print(parts)
parts[1] = 'cross'
print(parts)
out_f = '_'.join(parts)
return out_f
'''----------------------------------------------------------------
'''
if __name__ == '__main__':
"""
open simple_summaries from mono folder. We have simplified summaries in it.
Now we have to merge cross reference summaries with simplified so it can be used for scoring.
Also scoring for mono has to be done. Convert it into list.
Doing only merging here. mono sentence split
11-9 simple summaries for mono has been converted
"""
root = "/hits/basement/nlp/fatimamh/outputs/hipo/exp06"
folders = get_folders(root)
subs = 'mono'
file = 'summaries.csv'
new_file = 'simple_summaries.csv'
mono_df = pd.DataFrame()
cross_df = pd.DataFrame()
count = 0
for f in folders:
if subs in f:
count = count + 1
mono_folder = os.path.join(root, f)
cross_folder = get_pairfolder(f)
cross_folder = os.path.join(root, cross_folder)
if os.path.isdir(mono_folder):
print("mono folder: {}".format(mono_folder))
mono_file = os.path.join(mono_folder, new_file)
if os.path.isfile(mono_file):
print("mono file: {}\n".format(mono_file))
mono_df = pd.read_csv(mono_file, index_col= False)
if os.path.isdir(cross_folder):
print("cross folder: {}".format(cross_folder))
cross_file = os.path.join(cross_folder, file)
if os.path.isfile(cross_file):
print("cross file: {}\n".format(cross_file))
cross_df = pd.read_csv(cross_file, index_col= False)
cross_df.drop(cross_df.columns[cross_df.columns.str.contains('unnamed',case = False)],
axis = 1, inplace = True)
if 'meta' in cross_df.columns:
cross_df.drop('meta', axis= 1, inplace=True)
print('\n===========================================================================')
print(mono_df.head(3))
print(cross_df.head(3))
print('\n===========================================================================')
cross_df['system'] = mono_df['system']
mono_df['system'] = mono_df['system'].apply(lambda x: sent_tokenize(x))
print(mono_df.head(5))
print(cross_df.head(5))
print('\n===========================================================================')
out_file = os.path.join(cross_folder, new_file)
print("new cross file: {}\n".format(out_file))
cross_df.to_csv(out_file, index=False)
out_file = os.path.join(mono_folder, 'sim_summaries.csv')
print("mono file: {}\n".format(out_file))
mono_df.to_csv(out_file, index=False)
print('total folders processed: {}'.format(count))
|
import threading
import time
from random import random
class Proxy:
def __init__(self, object, object_pool):
self._object = object
self._object_pool = object_pool
def __enter__(self):
return self._object
def __exit__(self, exc_type, exc_val, exc_tb):
self._object_pool._release_object(self._object)
class Object_Pool:
_instance = None
def __init__(self):
# Object Pool should be a singleton class
if Object_Pool._instance is not None:
print("This class has already created:", self.getInstance())
self._list = []
self._server_number = 0
@classmethod
def getInstance(cls):
if cls._instance is None:
cls._instance = Object_Pool()
return cls._instance
def allocate_object(self):
# Wait until object resource is ready
if (len(self._list) <= 0):
self._list.append(Server(self._server_number))
self._server_number += 1
# Get the object instance from the queue.
return Proxy(self._list.pop(), self)
def _release_object(self, object):
# Put the object instance back to the queue.
self._list.append(object)
class Server():
def __init__(self, id):
self.id = id
def get_id(self):
return self.id
class Client(threading.Thread):
def __init__(self, id, obj_pool):
threading.Thread.__init__(self)
self.id = id
def run(self):
# Try to play around with the pre-run time to see how many servers are needed to support all clients.
time.sleep(random() * 5)
with object_pool.allocate_object() as server:
print('Client %d is using server %d' % (self.id, server.get_id()))
time.sleep(random()) # Pretend to work for a second
print('Client %d released server %d' % (self.id, server.get_id()))
if __name__ == '__main__':
client_number = 10
object_pool = Object_Pool()
for i in range(client_number):
client = Client(i, object_pool)
client.start()
|
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from Analytics.app import create_app
import sqlalchemy
sql = 'mysql+pymysql'
psql = 'postgresql+psycopg2'
app = create_app()
db_uri = '%s://%s:%s@%s/' % (psql, app.config['DB_USERNAME'], app.config['DB_PASSWORD'], app.config['DB_HOST'])
engine = sqlalchemy.create_engine(db_uri)
conn = engine.connect()
conn.execute("commit")
databases = engine.execute('show databases;')
for d in databases:
if d[0] == app.config['DATABASE_NAME']:
conn.execute("drop database " + app.config['DATABASE_NAME'])
conn.execute("create database " + app.config['DATABASE_NAME'])
conn.close()
|
import tensorflow as tf
from mlp import mlp_layer
"""
An MLP generator
"""
def generator_head(dimZ, dimH, n_layers, name):
fc_layer_sizes = [dimZ] + [dimH for i in range(n_layers)]
layers = []
N_layers = len(fc_layer_sizes) - 1
for i in range(N_layers):
d_in = fc_layer_sizes[i]
d_out = fc_layer_sizes[i + 1]
name_layer = name + '_head_l%d' % i
layers.append(mlp_layer(d_in, d_out, 'relu', name_layer))
print('decoder head MLP of size', fc_layer_sizes)
def apply(x):
for layer in layers:
x = layer(x)
return x
return apply
def generator_shared(dimX, dimH, n_layers, last_activation, name):
# now construct a decoder
fc_layer_sizes = [dimH for i in range(n_layers)] + [dimX]
layers = []
N_layers = len(fc_layer_sizes) - 1
for i in range(N_layers):
d_in = fc_layer_sizes[i]
d_out = fc_layer_sizes[i + 1]
if i < N_layers - 1:
activation = 'relu'
else:
activation = last_activation
name_layer = name + '_shared_l%d' % i
layers.append(mlp_layer(d_in, d_out, activation, name_layer))
print('decoder shared MLP of size', fc_layer_sizes)
def apply(x):
for layer in layers:
x = layer(x)
return x
return apply
def generator(head_net, shared_net):
def apply(x, sampling=True):
x = head_net(x)
x = shared_net(x)
return x
return apply
def construct_gen(gen, dimZ, sampling=True):
def gen_data(N):
# start from sample z_0, generate data
z = tf.random.normal(shape=(N, dimZ))
return gen(z)
return gen_data
|
from unittest import TestCase
from app import create_app
from config import HEADERS
app = create_app("testing")
class BaseTest(TestCase):
"""Base class which is inherited by all system test classes."""
request_headers = HEADERS
@classmethod
def setUpClass(cls):
pass
def setUp(self):
"""Create all db tables before each test."""
self.client = app.test_client()
self.app_context = app.app_context()
def tearDown(self):
"""Clear db tables after each test"""
pass
|
# Copyright 2016 Brocade Communications Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
# pylint: disable=no-member
import re
import socket
import ipaddress
from enum import Enum
import requests.exceptions
import pyswitchlib.asset
import pyswitch.device
from pyswitch.exceptions import InvalidInterfaceName
from pyswitch.exceptions import InvalidInterfaceType
from pyswitch.exceptions import InvalidVlanId
from pyswitch.AbstractDevice import DeviceCommError
from st2common.runners.base_action import Action
class NosDeviceAction(Action):
def __init__(self, config=None, action_service=None):
super(
NosDeviceAction,
self).__init__(
config=config,
action_service=action_service)
self.result = {'changed': False, 'changes': {}}
self.pmgr = pyswitch.device.Device
self.host = None
self.conn = None
self.auth = None
self.auth_snmp = None
self.rest_proto = None
self.asset = pyswitchlib.asset.Asset
self.RestInterfaceError = pyswitchlib.exceptions.RestInterfaceError
self.ConnectionError = requests.exceptions.ConnectionError
def setup_connection(self, host, user=None, passwd=None):
self.host = host
self.rest_proto = self._get_rest_proto(host=host)
self.conn = (host, '22', self.rest_proto)
# check if device is registered
check_user = self._lookup_st2_store('user')
if not check_user:
raise ValueError('Device is not registered.'
'Register using register_device_credentials action.')
self.auth_snmp = self._get_auth(host=host, user=user, passwd=passwd)
def _lookup_st2_store(self, key, decrypt=False):
"""
API to retrieve from st2 store lookup
"""
lookup_key = self._get_lookup_key(host=self.host, lookup=key)
user_kv = self.action_service.get_value(name=lookup_key, local=False,
decrypt=decrypt)
"""
if not user_kv:
lookup_key = self._get_user_default_lookup_key(lookup=key)
user_kv = self.action_service.get_value(name=lookup_key, local=False,
decrypt=decrypt)
"""
return user_kv
def _get_snmp_credentials(self, host):
"""
API to retrieve snmp credentials from st2 store.
SNMP port, SNMP community and SNMP version are
retrieved here.
"""
snmpconfig = {}
os_type = 'unknown'
lookup_key = self._get_lookup_key(host=self.host, lookup='ostype')
os_kv = self.action_service.get_value(name=lookup_key, local=False,
decrypt=False)
if os_kv:
os_type = os_kv
ver_kv = self._lookup_st2_store('snmpver')
if not ver_kv:
if os_type == 'unknown' or os_type == 'ni':
snmpconfig['version'] = 2
else:
snmpconfig['version'] = 0
elif ver_kv == 'v2':
snmpconfig['version'] = 2
elif ver_kv == 'v3':
snmpconfig['version'] = 3
else:
snmpconfig['version'] = 0
port_kv = self._lookup_st2_store('snmpport')
if not port_kv:
snmpconfig['snmpport'] = 161
else:
snmpconfig['snmpport'] = int(port_kv)
v2c_kv = self._lookup_st2_store('snmpv2c', decrypt=True)
if not v2c_kv:
snmpconfig['snmpv2c'] = 'public'
else:
snmpconfig['snmpv2c'] = v2c_kv
snmpconfig['authpass'] = ''
snmpconfig['privpass'] = ''
if snmpconfig['version'] == 3:
v3_user = self._lookup_st2_store('v3user')
if not v3_user:
snmpconfig['v3user'] = 'user'
else:
snmpconfig['v3user'] = v3_user
v3auth = self._lookup_st2_store('v3auth')
if not v3auth or v3auth == 'noauth':
snmpconfig['v3auth'] = 'noauth'
snmpconfig['authpass'] = ''
else:
snmpconfig['v3auth'] = v3auth
authpass = self._lookup_st2_store('authpass', decrypt=True)
if not authpass:
snmpconfig['authpass'] = ''
else:
snmpconfig['authpass'] = authpass
v3priv = self._lookup_st2_store('v3priv')
if not v3priv or v3priv == 'nopriv':
snmpconfig['v3priv'] = 'nopriv'
snmpconfig['privpass'] = ''
else:
snmpconfig['v3priv'] = v3priv
privpass = self._lookup_st2_store('privpass', decrypt=True)
if not privpass:
snmpconfig['privpass'] = ''
else:
snmpconfig['privpass'] = privpass
else:
snmpconfig['v3user'] = ''
snmpconfig['v3auth'] = 'noauth'
snmpconfig['autpass'] = ''
snmpconfig['v3priv'] = 'nopriv'
snmpconfig['privpass'] = ''
return snmpconfig
def _get_auth(self, host, user, passwd):
"""
Method to retrieve username, password,
enable password and snmp credentials.
"""
if not user:
user = self._lookup_st2_store('user')
if not user:
user = 'admin'
if not passwd:
passwd = self._lookup_st2_store('passwd', decrypt=True)
if not passwd:
passwd = 'password'
enablepass = self._lookup_st2_store('enablepass', decrypt=True)
if not enablepass:
enablepass = None
snmpconfig = self._get_snmp_credentials(host=host)
return (user, passwd, enablepass, snmpconfig)
def _get_rest_proto(self, host):
"""
Method to retrieve rest protocol from st2 persistent store.
"""
rest_proto = self._lookup_st2_store('restproto')
return rest_proto
def _get_lookup_key(self, host, lookup):
return 'switch.%s.%s' % (host, lookup)
def _get_user_default_lookup_key(self, lookup):
return 'switch.USER.DEFAULT.%s' % (lookup)
def get_device(self):
try:
# pylint: disable=unexpected-keyword-arg
device = self.asset(ip_addr=self.host, auth_snmp=self.auth_snmp,
rest_proto=self.rest_proto)
self.logger.info('successfully connected to %s',
self.host)
return device
except AttributeError as e:
self.logger.error("Failed to connect to %s due to %s",
self.host, e.message)
raise self.ConnectionError(
'Failed to connect to %s due to %s', self.host, e.message)
except ValueError as verr:
self.logger.error("Error while logging in to %s due to %s",
self.host, verr.message)
raise self.ConnectionError("Error while logging in to %s due to %s",
self.host, verr.message)
except IndexError as ierr:
self.logger.error("Error while logging in to %s due to wrong Username/Password",
self.host)
raise self.ConnectionError("Error while logging in to %s due to %s",
self.host, ierr.message)
except self.ConnectionError as cerr:
self.logger.error("Connection failed while logging in to %s due to %s",
self.host, cerr.message)
raise self.ConnectionError("Connection failed while logging in to %s due to %s",
self.host, cerr.message)
except self.RestInterfaceError as rierr:
self.logger.error("Failed to get a REST response while logging in "
"to %s due to %s", self.host, rierr.message)
raise self.ConnectionError("Failed to get a REST response while logging in "
"to %s due to %s", self.host, rierr.message)
def check_int_description(self, intf_description):
"""
Check for valid interface description
"""
err_code = len(intf_description)
if err_code < 1:
self.logger.info('Pls specify a valid description')
return False
elif err_code <= 63:
return True
else:
self.logger.info(
'Length of the description is more than the allowed size')
return False
def expand_vlan_range(self, vlan_id, device):
"""Fail the task if vlan id is zero or one or above 4096 .
"""
re_pattern1 = r"^(\d+)$"
re_pattern2 = r"^(\d+)\-?(\d+)$"
re_pattern3 = r"^(\d+)\,?(\d+)$"
if re.search(re_pattern1, vlan_id):
try:
vlan_id = (int(vlan_id),)
except ValueError:
self.logger.info("Could not convert data to an integer.")
return None
elif re.search(re_pattern2, vlan_id):
try:
vlan_id = re.match(re_pattern2, vlan_id)
except ValueError:
self.logger.info("Not in valid range format.")
return None
if int(vlan_id.groups()[0]) == int(vlan_id.groups()[1]):
self.logger.warning("Use range command only for diff vlans")
vlan_id = range(int(vlan_id.groups()[0]), int(
vlan_id.groups()[1]) + 1)
elif re.search(re_pattern3, vlan_id):
vlan_id = vlan_id.split(",")
vlan_id = map(int, vlan_id)
else:
self.logger.info("Invalid vlan format")
return None
for vid in vlan_id:
if device.os_type == 'NI':
if vid > 4090:
self.logger.error("VLAN %s is out of range", vid)
return None
if vid > 4096:
extended = "true"
else:
extended = "false"
tmp_vlan_id = pyswitch.utilities.valid_vlan_id(vid, extended=extended)
if not tmp_vlan_id:
self.logger.error("'Not a valid vlan %s", vid)
return None
# this reserved vlan is only for NOS and not for SLX/NI devices
if device.os_type == 'nos':
reserved_vlan_list = range(4087, 4096)
if vid in reserved_vlan_list:
self.logger.info(
"User provided vlans contains reserved vlans %s", vid)
return vlan_id
def expand_interface_range(self, intf_type, intf_name, rbridge_id):
msg = None
int_list = intf_name
re_pattern1 = r"^(\d+)$"
re_pattern2 = r"^(\d+)\-?(\d+)$"
re_pattern3 = r"^(\d+)\/(\d+)\/(\d+)$|^\d+/\d+$"
re_pattern4 = r"^(\d+)\/(\d+)\/(\d+)\-?(\d+)$|^(\d+)/(\d+)-(\d+)$"
re_pattern5 = r"^(\d+)\/(\d+)\/(\d+)(:(\d+))?$"
re_pattern6 = r"^(\d+)\/(\d+)(:(\d+))?$"
re_pattern7 = r"^(\d+)\/(\d+)\/(\d+)(:(\d+))-(\d+)$"
re_pattern8 = r"^(\d+)\/(\d+)(:(\d+))-(\d+)$"
intTypes = ["port_channel", "gigabitethernet", "tengigabitethernet", "fortygigabitethernet",
"hundredgigabitethernet", "ve"]
if re.search(re_pattern1, int_list):
int_list = ((int_list),)
elif re.search(re_pattern2, int_list):
try:
int_list = re.match(re_pattern2, int_list)
except Exception:
return None
if int(int_list.groups()[0]) == int(int_list.groups()[1]):
self.logger.info("Use range command only for unique values")
int_list = range(int(int_list.groups()[0]), int(
int_list.groups()[1]) + 1)
elif re.search(re_pattern3, int_list):
int_list = ((int_list),)
elif re.search(re_pattern4, int_list):
try:
temp_list = re.match(re_pattern4, int_list)
except Exception:
return None
int_list = []
try:
if int(temp_list.groups()[0]) == int(temp_list.groups()[1]):
self.logger.info("Use range command only for unique values")
intList = range(int(temp_list.groups()[2]), int(
temp_list.groups()[3]) + 1)
for intf in intList:
int_list.append(temp_list.groups()[0] + '/' + # noqa: W504
temp_list.groups()[1] + '/' + str(intf))
except Exception:
intList = range(int(temp_list.groups()[5]), int(
temp_list.groups()[6]) + 1)
for intf in intList:
int_list.append(temp_list.groups()[4] + '/' + str(intf))
int_list = int_list
elif re.search(re_pattern5, int_list):
int_list = ((int_list),)
elif re.search(re_pattern6, int_list):
int_list = ((int_list),)
elif re.search(re_pattern7, int_list):
try:
temp_list = re.match(re_pattern7, int_list)
except Exception:
return None
int_list = []
try:
if int(temp_list.groups()[4]) == int(temp_list.groups()[5]):
self.logger.info("Use range command only for unique values")
intList = range(int(temp_list.groups()[4]), int(
temp_list.groups()[5]) + 1)
for intf in intList:
int_list.append(temp_list.groups()[0] + '/' + # noqa: W504
temp_list.groups()[1] + '/' + # noqa: W504
temp_list.groups()[2] + ':' + str(intf))
except Exception:
msg = "Invalid interface format"
elif re.search(re_pattern8, int_list):
try:
temp_list = re.match(re_pattern8, int_list)
except Exception:
return None
int_list = []
try:
if int(temp_list.groups()[3]) == int(temp_list.groups()[4]):
self.logger.info("Use range command only for unique values")
intList = range(int(temp_list.groups()[3]), int(
temp_list.groups()[4]) + 1)
for intf in intList:
int_list.append(temp_list.groups()[0] + '/' + # noqa: W504
temp_list.groups()[1] + ':' + str(intf))
except Exception:
msg = "Invalid interface format"
else:
msg = 'Invalid interface format'
if msg is not None:
self.logger.info(msg)
return None
for intf in int_list:
intTypes = ["ve", "loopback"]
if intf_type not in intTypes:
tmp_vlan_id = pyswitch.utilities.valid_interface(
intf_type, name=str(intf))
if not tmp_vlan_id:
self.logger.info(
"Not a valid interface type %s or name %s", intf_type, intf)
return None
return int_list
def extend_interface_range(self, intf_type, intf_name):
msg = None
int_list = intf_name
re_pattern1 = r"^(\d+)\-?(\d+)$"
re_pattern2 = r"^(\d+)\/(\d+)\-?(\d+)$"
re_pattern3 = r"^(\d+)\/(\d+)\/(\d+)\-?(\d+)$"
if re.search(re_pattern1, int_list):
try:
int_list = re.match(re_pattern1, int_list)
except Exception:
return None
if int(int_list.groups()[0]) == int(int_list.groups()[1]):
self.logger.info("Use range command only for unique values")
int_list = range(int(int_list.groups()[0]), int(
int_list.groups()[1]) + 1)
elif re.search(re_pattern2, int_list):
try:
temp_list = re.match(re_pattern2, int_list)
except Exception:
return None
if int(temp_list.groups()[1]) == int(temp_list.groups()[2]):
self.logger.info("Use range command only for unique values")
intList = range(int(temp_list.groups()[1]), int(
temp_list.groups()[2]) + 1)
int_list = []
for intf in intList:
int_list.append(temp_list.groups()[0] + '/' + str(intf))
int_list = int_list
elif re.search(re_pattern3, int_list):
try:
temp_list = re.match(re_pattern3, int_list)
except Exception:
return None
if int(temp_list.groups()[2]) == int(temp_list.groups()[3]):
self.logger.info("Use range command only for unique values")
intList = range(int(temp_list.groups()[2]), int(
temp_list.groups()[3]) + 1)
int_list = []
for intf in intList:
int_list.append(temp_list.groups()[0] + '/' + temp_list.groups()[1] + # noqa: W504
'/' + str(intf))
int_list = int_list
else:
msg = 'Invalid interface format'
if msg is not None:
self.logger.error(msg)
return None
return int_list
@staticmethod
def is_valid_mac(mac):
"""
This will only validate the HHHH.HHHH.HHHH MAC format. Will need to be expanded to
validate other formats of MAC.
:param mac:
:return:
"""
if re.match('[0-9A-Fa-f]{4}[.][0-9A-Fa-f]{4}[.][0-9A-Fa-f]{4}$', mac):
return True
else:
return False
@staticmethod
def is_valid_ip(ip):
try:
ipaddress.ip_address(ip.decode('utf-8'))
return True
except ValueError:
return False
except AttributeError:
return False
@staticmethod
def mac_converter(old_mac):
"""
This method converts MAC from xxxx.xxxx.xxxx to xx:xx:xx:xx:xx:xx. This
helps provide consistency across persisting MACs in the DB.
Args:
old_mac: MAC in a format xxxx.xxxx.xxxx
Returns:
dict: updated MAC in the xx:xx:xx:xx:xx:xx format
"""
new_mac = old_mac.replace('.', '')
newer_mac = ':'.join([new_mac[i:i + 2]
for i in range(0, len(new_mac), 2)])
return newer_mac
def get_rbridge_id(self, intf_name):
"""
This method fetches rbridge_id from single interface name.This
helps user not to pass the rbridge_id as input.
Args:
intf_name: Name of the interface
Returns:
rbridge_id: rbridge id of the interface
"""
re_pattern1 = r"^(\d+)\/(\d+)\/(\d+)$"
if not intf_name:
self.logger.info('Input for `intf_name` is empty')
return False
elif re.search(re_pattern1, intf_name):
try:
intf_name = re.match(re_pattern1, intf_name)
except Exception:
return False
rbridge_id = int(intf_name.groups()[0])
if rbridge_id < 1 or rbridge_id > 239:
self.logger.info('Invalid Rbridge_id %s', rbridge_id)
return False
else:
self.logger.info('Invalid Interface Name %s', intf_name)
return False
return rbridge_id
def _validate_ip_network(self, addr):
try:
ipaddress.ip_network(addr)
return True
except socket.error:
return False
def _validate_ip_(self, addr):
try:
socket.inet_aton(addr)
return True
except socket.error:
return False
def _validate_ipv6_(self, addr):
try:
socket.inet_pton(socket.AF_INET6, addr)
return True
except socket.error:
return False
def validate_interface(self, intf_type, intf_name, rbridge_id=None, os_type=None):
msg = None
# int_list = intf_name
re_pattern1 = r"^(\d+)$"
re_pattern2 = r"^(\d+)\/(\d+)\/(\d+)(:(\d+))?$"
re_pattern3 = r"^(\d+)\/(\d+)(:(\d+))?$"
intTypes = ["port_channel", "gigabitethernet", "tengigabitethernet",
"fortygigabitethernet", "hundredgigabitethernet", "ethernet"]
NosIntTypes = [
"gigabitethernet",
"tengigabitethernet",
"fortygigabitethernet",
"hundredgigabitethernet"
]
if os_type is None or os_type == "nos":
if rbridge_id is None and 'loopback' in intf_type:
msg = 'Must specify `rbridge_id` when specifying a `loopback`'
elif rbridge_id is None and 've' in intf_type:
msg = 'Must specify `rbridge_id` when specifying a `ve`'
elif rbridge_id is not None and intf_type in intTypes:
msg = 'Should not specify `rbridge_id` when specifying a ' + intf_type
elif re.search(re_pattern1, intf_name):
intf = intf_name
elif re.search(re_pattern2, intf_name) and intf_type in NosIntTypes:
intf = intf_name
elif re.search(re_pattern3, intf_name) and 'ethernet' in intf_type:
intf = intf_name
else:
msg = 'Invalid interface format'
elif os_type == "slxos" or os_type == "NI":
if re.search(re_pattern1, intf_name):
intf = intf_name
elif re.search(re_pattern2, intf_name) and intf_type in NosIntTypes:
intf = intf_name
elif re.search(re_pattern3, intf_name) and 'ethernet' in intf_type:
intf = intf_name
else:
msg = 'Invalid interface format'
if msg is not None:
self.logger.error(msg)
return False
intTypes = ["ve", "loopback", "ethernet"]
if intf_type not in intTypes:
tmp_vlan_id = pyswitch.utilities.valid_interface(
intf_type, name=str(intf))
if not tmp_vlan_id:
self.logger.error(
"Not a valid interface type %s or name %s", intf_type, intf)
return False
return True
def _get_acl_type_(self, device, acl_name):
acl_type = {}
try:
get = device.ip_access_list_standard_get(acl_name)
acl_type['type'] = str(get[1][0][self.host]['response']['json']['output'].keys()[0])
acl_type['protocol'] = 'ip'
return acl_type
except Exception:
pass
try:
get = device.ip_access_list_extended_get(acl_name)
acl_type['type'] = str(get[1][0][self.host]['response']['json']['output'].keys()[0])
acl_type['protocol'] = 'ip'
return acl_type
except Exception:
pass
try:
get = device.mac_access_list_standard_get(acl_name)
acl_type['type'] = str(get[1][0][self.host]['response']['json']['output'].keys()[0])
acl_type['protocol'] = 'mac'
return acl_type
except Exception:
pass
try:
get = device.mac_access_list_extended_get(acl_name)
acl_type['type'] = str(get[1][0][self.host]['response']['json']['output'].keys()[0])
acl_type['protocol'] = 'mac'
return acl_type
except Exception:
pass
try:
get = device.ipv6_access_list_standard_get(acl_name)
acl_type['type'] = str(get[1][0][self.host]['response']['json']['output'].keys()[0])
acl_type['protocol'] = 'ipv6'
return acl_type
except Exception:
pass
try:
get = device.ipv6_access_list_extended_get(acl_name)
acl_type['type'] = str(get[1][0][self.host]['response']['json']['output'].keys()[0])
acl_type['protocol'] = 'ipv6'
return acl_type
except Exception:
self.logger.error('Cannot get acl-type for %s', acl_name)
return None
def _get_seq_id_(self, device, acl_name, acl_type, ip_type=None):
if ip_type is None:
get = device.ip_access_list_extended_get if acl_type == 'extended' else \
device.ip_access_list_standard_get
elif ip_type == 'ipv6':
get = device.ipv6_access_list_extended_get if acl_type == 'extended' else \
device.ipv6_access_list_standard_get
elif ip_type == 'mac':
get = device.mac_access_list_extended_get if acl_type == 'extended' else \
device.mac_access_list_standard_get
try:
get_output = get(acl_name)[1][0][
self.host]['response']['json']['output']
if acl_type in get_output:
acl_dict = get_output[acl_type]
else:
self.logger.error(
'%s access list %s does not exist', acl_type, acl_name)
return None
if 'seq' in acl_dict:
seq_list = acl_dict['seq']
if isinstance(seq_list, list):
last_seq_id = int(seq_list[len(seq_list) - 1]['seq-id'])
else:
last_seq_id = int(seq_list['seq-id'])
if last_seq_id % 10 == 0: # divisible by 10
seq_id = last_seq_id + 10
else:
# rounding up to the nearest 10
seq_id = (last_seq_id + 9) // 10 * 10
else:
seq_id = 10
return seq_id
except KeyError:
return None
def _get_seq_(self, device, acl_name, acl_type, seq_id, address_type=None):
if address_type == 'ipv6':
get = device.ipv6_access_list_extended_get if acl_type == 'extended' else \
device.ipv6_access_list_standard_get
elif address_type == 'mac':
get = device.mac_access_list_extended_get if acl_type == 'extended' else \
device.mac_access_list_standard_get
else:
get = device.ip_access_list_extended_get if acl_type == 'extended' else \
device.ip_access_list_standard_get
try:
get_output = get(acl_name, resource_depth=3)
acl_dict = get_output[1][0][self.host][
'response']['json']['output'][acl_type]
if isinstance(acl_dict, list):
acl_dict = acl_dict[0]
if 'seq' in acl_dict:
seq_list = acl_dict['seq']
seq_list = seq_list if isinstance(
seq_list, list) else [seq_list, ]
for seq in seq_list:
if seq['seq-id'] == str(seq_id):
return seq
else:
self.logger.error('No seq present in acl %s', acl_name)
return None
except Exception:
self.logger.error('cannot get seq in acl %s', acl_name)
return None
def _get_port_channel_members(self, device, portchannel_num):
members = []
results = []
port_channel_exist = False
keys = ['interface-type', 'rbridge-id', 'interface-name', 'sync']
port_channel_get = self._get_port_channels(device)
if port_channel_get:
for port_channel in port_channel_get:
if port_channel['aggregator-id'] == str(portchannel_num):
port_channel_exist = True
if 'aggr-member' in port_channel:
members = port_channel['aggr-member']
else:
self.logger.info('Port Channel %s does not have any members',
str(portchannel_num))
return results
else:
return None
if not port_channel_exist:
self.logger.info('Port Channel %s is not configured on the device',
str(portchannel_num))
return results
if isinstance(members, dict):
members = [members, ]
for member in members:
result = {}
for key, value in member.iteritems():
if key in keys:
result[key] = value
results.append(result)
return results
def _get_port_channels(self, device):
connected = False
for _ in range(5):
get = device.get_port_channel_detail_rpc()
if get[0]:
output = get[1][0][self.host]['response']['json']['output']
connected = True
break
if not connected:
self.logger.error(
'Cannot get Port Channels')
raise self.ConnectionError(
get[1][0][self.host]['response']['json']['output'])
if 'lacp' in output:
port_channel_get = output['lacp']
else:
self.logger.info(
'Port Channel is not configured on the device')
return None
if isinstance(port_channel_get, dict):
port_channel_get = [port_channel_get, ]
return port_channel_get
def _get_switchport(self, device):
connected = False
for _ in range(5):
get = device.get_interface_switchport_rpc()
if get[0]:
output = get[1][0][self.host]['response']['json']['output']
connected = True
break
if not connected:
self.logger.error(
'Cannot get switchport')
raise self.ConnectionError(
get[1][0][self.host]['response']['json']['output'])
if 'switchport' in output:
switchport_get = output['switchport']
else:
self.logger.info(
'Switchport is not configured on the device')
return None
if isinstance(switchport_get, dict):
switchport_get = [switchport_get, ]
return switchport_get
def _interface_update(self, device, intf_type, intf_name,
ifindex=None, description=None, shutdown=None, mtu=None):
if intf_type == 'ethernet':
update = device.interface_ethernet_update
elif intf_type == 'gigabitethernet':
update = device.interface_gigabitethernet_update
elif intf_type == 'tengigabitethernet':
update = device.interface_tengigabitethernet_update
elif intf_type == 'fortygigabitethernet':
update = device.interface_fortygigabitethernet_update
elif intf_type == 'hundredgigabitethernet':
update = device.interface_hundredgigabitethernet_update
elif intf_type == 'port-channel':
update = device.interface_port_channel_update
else:
self.logger.error('intf_type %s is not supported',
intf_type)
return False
try:
result = update(intf_name, ifindex=ifindex,
description=description, shutdown=shutdown,
mtu=mtu)
if result[0]:
self.logger.info('Updating %s %s interface is done',
intf_type, intf_name)
return True
else:
self.logger.error('Updating %s %s interface failed because %s',
intf_type, intf_name,
result[1][0][self.host]['response']['json']['output'])
return False
except (TypeError, AttributeError, ValueError) as e:
self.logger.error('Interface update failed because %s', e.message)
return False
def _get_interface_admin_state(self, device, intf_type, intf_name):
last_rcvd_interface = None
while True:
admin_state = None
connected = False
for _ in range(5):
get = device.get_interface_detail_rpc(
last_rcvd_interface=last_rcvd_interface)
if get[0]:
output = get[1][0][self.host]['response']['json']['output']
connected = True
break
if not connected:
self.logger.error(
'Cannot get interface details')
raise self.ConnectionError()
if 'interface' in output:
intf_dict = output['interface']
if isinstance(intf_dict, dict):
intf_dict = [intf_dict, ]
for out in intf_dict:
if intf_name in out[
'if-name'] and intf_type == out['interface-type']:
admin_state = out['line-protocol-state']
return admin_state
last_rcvd_interface = (
out['interface-type'], out['interface-name'])
if output['has-more']:
continue
else:
self.logger.info("No interfaces found in host %s", self.host)
return admin_state
def _get_os_type(self, device):
os_name = None
try:
get = device.show_firmware_version_rpc()[1][0][
self.host]['response']['json']['output']['show-firmware-version']['os-name']
if 'Network' in get:
os_name = 'NOS'
elif 'SLX' in get:
os_name = 'SLX-OS'
except (TypeError, KeyError, AttributeError):
self.logger.error("Cannot get OS version")
return os_name
def _get_interface_address(
self, device, intf_type, intf_name, ip_version, rbridge_id=None):
if ip_version == 4:
ip = 'ip'
elif ip_version == 6:
ip = 'ipv6'
method = 'rbridge_id_interface_{}_get'. \
format(intf_type) if rbridge_id \
else 'interface_{}_get'.format(intf_type)
get_intf = eval('device.{}'.format(method))
get = get_intf(
rbridge_id,
intf_name) if rbridge_id else get_intf(intf_name)
if get[0]:
output = get[1][0][self.host]['response']['json']['output']
else:
return None
if output is not None:
ip_intf = output.itervalues().next()[ip]
while True:
if 'address' not in ip_intf:
try:
ip_intf = ip_intf.pop()
except Exception:
return None
else:
ip_intf = ip_intf['address']
break
if ip == 'ip':
while True:
if 'address' not in ip_intf:
try:
ip_intf = ip_intf.pop()
except Exception:
return None
else:
return ip_intf['address']
elif ip == 'ipv6':
while True:
if 'ipv6-address' not in ip_intf:
try:
ip_intf = ip_intf.pop()
except Exception:
return None
else:
ip_intf = ip_intf['ipv6-address']
break
while True:
if 'address' not in ip_intf:
try:
ip_intf = ip_intf.pop()
except Exception:
return None
else:
return ip_intf['address']
else:
return None
def _get_ip_intf(self, device, intf_type=None):
connected = False
for _ in range(5):
get = device.get_ip_interface_rpc()
if get[0]:
output = get[1][0][self.host]['response']['json']['output']
connected = True
break
if not connected:
self.logger.error(
'Cannot get interface details')
raise self.ConnectionError(
get[1][0][self.host]['response']['json']['output'])
if 'interface' in output:
ip_intf = output['interface']
if isinstance(ip_intf, dict):
ip_intf = [ip_intf, ]
else:
self.logger.info("No interfaces found in host %s", self.host)
return None
if intf_type is None:
return [x['if-name'] for x in ip_intf]
else:
return [x['if-name']
for x in ip_intf if intf_type in x['if-name'].lower()]
def vlag_pair(self, device):
""" Fetch the RB list if VLAG is configured"""
rb_list = []
result = device.vcs.vcs_nodes
for each_rb in result:
if each_rb['node-status'] == 'Co-ordinator' or each_rb['node-status'] == 'Connected ' \
'to Cluster':
rb_list.append(each_rb['node-rbridge-id'])
if len(rb_list) >= 3:
raise ValueError('VLAG PAIR must be <= 2 leaf nodes')
return list(set(rb_list))
def extract_port_list(self, device, intf_type, port_list):
interface_list = []
for intf in port_list:
if "-" not in str(intf):
interface_list.append(str(intf))
else:
ex_intflist = self.extend_interface_range(intf_type=intf_type,
intf_name=intf)
for ex_intf in ex_intflist:
interface_list.append(str(ex_intf))
for intf in interface_list:
if not self.validate_interface(intf_type, intf, os_type=device.os_type):
msg = "Input is not a valid Interface"
self.logger.error(msg)
raise ValueError(msg)
return interface_list
def validate_supports_rbridge(self, device, rbridge_id):
if device.suports_rbridge:
if rbridge_id is None:
self.logger.info('Device requires rbridge-id')
raise ValueError('Device requires rbridge-id')
return True
if rbridge_id is not None:
self.logger.info('Device does not support rbridge')
raise ValueError('Device does not support rbridge')
def get_vlan_list(self, vlan_id, device):
""" Expand the vlan_id values into a list """
vlan_list = []
vlanlist = vlan_id.split(',')
for val in vlanlist:
temp = self.expand_vlan_range(vlan_id=val, device=device)
if temp is None:
raise ValueError('Reserved/Control/Invalid vlans passed in args `vlan_id`')
vlan_list.append(temp)
vlan_list = list(itertools.chain.from_iterable(vlan_list))
return vlan_list
def expand_ve_range(self, ve_id, device):
"""Fail the task if vlan id is zero or one or above 4096 .
"""
re_pattern1 = r"^(\d+)$"
re_pattern2 = r"^(\d+)\-?(\d+)$"
re_pattern3 = r"^(\d+)\,?(\d+)$"
vlan_id = ve_id
if re.search(re_pattern1, vlan_id):
try:
vlan_id = (int(vlan_id),)
except ValueError:
self.logger.info("Could not convert data to an integer.")
return None
elif re.search(re_pattern2, vlan_id):
try:
vlan_id = re.match(re_pattern2, vlan_id)
except ValueError:
self.logger.info("Not in valid range format.")
return None
if int(vlan_id.groups()[0]) == int(vlan_id.groups()[1]):
self.logger.warning("Use range command only for diff vlans")
vlan_id = range(int(vlan_id.groups()[0]), int(
vlan_id.groups()[1]) + 1)
elif re.search(re_pattern3, vlan_id):
vlan_id = vlan_id.split(",")
vlan_id = map(int, vlan_id)
else:
self.logger.info("Invalid VE format")
return None
for vid in vlan_id:
if device.os_type == 'slxos' and vid > 4096:
self.logger.error("VE %s is out of range."
" Valid range is 1-4096", vid)
return None
if device.os_type == 'nos' and vid > 8191:
self.logger.error("VE %s is out of range."
" Valid range is 1-4096/8191", vid)
return None
if device.os_type == 'NI' and vid > 255:
self.logger.error("VE %s is out of range."
" Valid range is 1-255", vid)
return None
return vlan_id
def get_ve_list(self, ve_id, device):
""" Expand the vlan_id values into a list """
ve_list = []
velist = ve_id.split(',')
for val in velist:
temp = self.expand_ve_range(ve_id=val, device=device)
if temp is None:
raise ValueError('Invalid VE IDs passed in args `ve_id`')
ve_list.append(temp)
ve_list = list(itertools.chain.from_iterable(ve_list))
return ve_list
# log_exceptions decorator
def log_exceptions(func):
def wrapper(*args, **kwds):
logger = args[0].logger
host = args[0].host
try:
return func(*args, **kwds)
except AttributeError as e:
logger.error(
'Failed to connect to %s due to %s'
% (host,
e.message))
raise
except ValueError as verr:
logger.error("Error encountered on %s due to %s"
% (host, verr.message))
raise
except requests.exceptions.ConnectionError as cerr:
# pylint: disable=no-member
logger.error("Connection failed while logging in to %s "
"due to %s"
% (host, cerr.message.reason))
raise
except pyswitchlib.asset.RestInterfaceError as rierr:
logger.error(
"Failed to get a REST response on "
"%s due to %s" % (host, rierr.message))
raise
except Exception as ex:
logger.error(
"Error while logging in to %s due to %s"
% (host, ex.message))
raise
return wrapper
def check_status_code(self, operation, device_ip):
status_code = operation[1][0][device_ip]['response']['status_code']
self.logger.debug("Operation returned %s", status_code)
if status_code >= 400:
error_msg = operation[1][0][device_ip]['response']['text']
self.logger.debug(
"REST Operation failed with status code %s",
status_code)
raise ValueError(error_msg)
class ValidateErrorCodes(Enum):
SUCCESS = 0
INVALID_USER_INPUT = 1
DEVICE_CONNECTION_ERROR = 2
DEVICE_VALIDATION_ERROR = 3
# Add new error codes here
UNKNOWN_ERROR = 255
def capture_exceptions(func):
def wrapper(*args, **kwds):
changes = {}
try:
return func(*args, **kwds)
except AttributeError as e:
reason_code = ValidateErrorCodes.INVALID_USER_INPUT
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
except ValueError as e:
reason_code = ValidateErrorCodes.DEVICE_VALIDATION_ERROR
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
except requests.exceptions.ConnectionError as e:
reason_code = ValidateErrorCodes.DEVICE_CONNECTION_ERROR
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
except pyswitchlib.asset.RestInterfaceError as e:
reason_code = ValidateErrorCodes.DEVICE_CONNECTION_ERROR
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
except pyswitchlib.asset.InvalidAuthenticationCredentialsError as e:
reason_code = ValidateErrorCodes.DEVICE_CONNECTION_ERROR
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
except DeviceCommError as e:
reason_code = ValidateErrorCodes.DEVICE_CONNECTION_ERROR
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
except InvalidInterfaceName as e:
reason_code = ValidateErrorCodes.INVALID_USER_INPUT
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
except InvalidInterfaceType as e:
reason_code = ValidateErrorCodes.INVALID_USER_INPUT
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
except InvalidVlanId as e:
reason_code = ValidateErrorCodes.INVALID_USER_INPUT
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
except Exception as e:
reason_code = ValidateErrorCodes.DEVICE_VALIDATION_ERROR
changes['reason_code'] = reason_code.value
changes['reason'] = e.message
return (False, changes)
return wrapper
|
__version__ = '4.0.0.r1'
|
import os
import xml.etree.ElementTree as ET
import shutil
import ntpath
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import argparse
import csv
import unidecode
import collections
fileDir = os.path.dirname(os.path.abspath(__file__))
parentDir = os.path.dirname(fileDir)
def trim_name(name):
unaccent_name = unidecode.unidecode(name)
for i, ch in enumerate(reversed(unaccent_name)):
if ('a' <= ch <= 'z') or ('A' <= ch <= 'Z'):
if i == 0:
return name
else:
return name[:-1 * i]
def get_allinfo(xml_files, filter=False, move=True):
"""
:param xml_files: input files
:param filter: Select/filter the files that have all needed headers (important_headers.txt in Data directory) [True/False]
:param move: if move is True, copy the selected files based (based on filter option) to a new directory
:return:
dictOfFiles: a dictionary that files are keys and the headers of the files are values
dictOfHeaders: a dictionary that headers are keys and the files that have that headers are values
header_cooccurrences: a dictionary that headers are keys and a dictionary of its co-occurance headers and number of occure are values
dictOfHeaders_childs: a dictionary that headers are keys and a list of variants of the headers are values
"""
header_list = os.path.join(parentDir, "data/headers.txt")
dictOfHeaders = dict()
dictOfHeaders_childs = dict()
with open(header_list) as f:
for i in f:
head = i.strip().split("\t")
if len(head) >= 2:
h = head[1].strip()
if (dictOfHeaders.get(h) == None):
dictOfHeaders_childs[h] = []
dictOfHeaders[h] = []
content = []
if filter:
content = get_importantheaders()
dictOfFiles = dict()
header_cooccurrences = dict()
count = 0
for file in xml_files:
filename = ntpath.basename(file)
tags = []
try:
root = ET.parse(file).getroot()
pre = ""
name = ""
counter = 1
pre_header = ""
for type_tag in root.findall('Section'):
for type_child in type_tag.findall('name'):
name = type_child.text
x = str(type_tag.get('id')).strip()
# pure_name_eq = name.split("=",2)
# pure_name = pure_name_eq[0].split("-!-",2)
#
# name = pure_name[0]
if (x != "DEFAULT_HEADER") and (pre_header != x):
counter += 1
if pre == "":
pre = x
else:
new = x
if header_cooccurrences.get(pre) is not None:
co_occoure_pre = header_cooccurrences.get(pre)
if co_occoure_pre.get(new) is not None:
co_occoure_pre[new] = co_occoure_pre.get(new) + 1
else:
co_occoure_pre[new] = 1
header_cooccurrences[pre] = co_occoure_pre
else:
header_cooccurrences[pre] = {new: 1}
pre = new
if x not in tags:
tags.append(x)
if dictOfHeaders_childs.get(x) is not None:
listchilds = dictOfHeaders_childs.get(x)
trimedname = trim_name(name)
if trimedname not in listchilds:
listchilds.append(trimedname)
updated = {x: listchilds}
dictOfHeaders_childs.update(updated)
pre_header = x
if filter == True:
acceptable = True
for cont in content:
if cont not in tags:
acceptable = False
break
if acceptable:
for val in tags:
if dictOfHeaders.get(val) is not None:
list_headers = dictOfHeaders.get(val)
list_headers.append(filename)
updated = {val: list_headers}
dictOfHeaders.update(updated)
else:
print("This tag in XML file is not exist in HEADER list: " + val)
dictOfFiles[filename] = tags
if move:
pathdir = ntpath.dirname(file).replace("XML_SECTION", "SELECTED_XML")
os.makedirs(pathdir,exist_ok=True)
shutil.copy(file, pathdir)
else:
dictOfFiles[filename] = tags
for val in tags:
if (dictOfHeaders.get(val) != None):
list_headers = dictOfHeaders.get(val)
list_headers.append(filename)
updated = {val: list_headers}
dictOfHeaders.update(updated)
else:
print("This tag in XML file is not exist in HEADER list: " + val)
f.close()
except:
print("ERROR", filename, sys.exc_info())
return dictOfFiles, dictOfHeaders, header_cooccurrences, dictOfHeaders_childs
def showbasicinfo(x, y):
"""
:param x: a list of all files
:param y: a list of number of headers of each file (in order of x)
:return:
PLOT the data
"""
plt_dir = os.path.join(parentDir, "analysis_headers/PLOT/")
os.makedirs(plt_dir, exist_ok=True)
plot_file = os.path.join(plt_dir, "Fiq" + ".png")
d = {"Headers": x, "Filesnumber": y}
data = pd.DataFrame(d)
data.set_index('Headers', inplace=True)
colors = plt.get_cmap()(
np.linspace(0.15, 0.85, len(y)))
ax1 = data.sort_values(by='Filesnumber').plot(kind='barh', figsize=(30, 20), color='#86bf91', fontsize=8,
legend=False)
ax1.set_alpha(0.4)
ax1.set_xlabel("Number of Files", labelpad=20, fontsize=12)
ax1.set_ylabel("Name of Headers", labelpad=20, fontsize=12)
totals = []
for i in ax1.patches:
totals.append(i.get_width())
total = sum(totals)
for i in ax1.patches:
ax1.text(i.get_width() + .3, i.get_y() + .10,
" " + str(i.get_width()), fontsize=10,
color='dimgrey')
plt.margins(0.1)
plt.subplots_adjust(left=0.25)
plt.savefig(str(plot_file), bbox_inches='tight')
plt.show()
def print_csv(dict_of_files, x, y, yy, header_cooccurrences, dict_of_headers_childs):
"""
:param dictOfFiles: a dictionary that files are keys and the headers of the files are values
:param x: a list of all files
:param y: a list of number of headers of each file (in order of x)
:param yy: a list of all headers of each file (in order of x)
:param header_cooccurrences: a dictionary that headers are keys and a dictionary of its co-occurrences headers and number of occure are values
:param dict_of_headers_childs: a dictionary that headers are keys and a list of variants of the headers are values
:return:
Save analysis on csv files
"""
csv_dir = os.path.join(parentDir, "analysis_headers/CSV/")
os.makedirs(csv_dir, exist_ok=True)
csv_files = os.path.join(csv_dir, "analysis_files.csv")
csv_headers = os.path.join(csv_dir, "analysis_headers.csv")
csv_headers_number = os.path.join(csv_dir, "analysis_headers-number.csv")
csv_header_cooccurrences = os.path.join(csv_dir, "analysis_header_co-occurrences.csv")
csv_header_children = os.path.join(csv_dir, "analysis_original_headers_in_report.csv")
csv_top_10_cooccurrences = os.path.join(csv_dir, "top_10_header_co-occurrences.csv")
d = {"Headers": x, "Filesnumber": y}
data = pd.DataFrame(d)
data_sorted = data.sort_values(by=["Filesnumber"], ascending=False)
data_sorted.to_csv(csv_headers_number, index=False, sep='\t')
with open(csv_headers, mode='w+') as csv_headers_f:
for key, value in zip(x, yy):
csv_headers_f.write(key + "\t" + value)
csv_headers_f.write('\n')
with open(csv_header_cooccurrences, 'w+') as f:
csv_writer = csv.writer(f, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(["\t"] + x)
temp = {}
for r in x:
values = {}
if header_cooccurrences.get(r) is not None:
values = header_cooccurrences.get(r)
output = []
output.append(r)
for c in x:
if values.get(c) is not None:
sum_r_c = values.get(c)
if header_cooccurrences.get(c) is not None and c != r:
if header_cooccurrences.get(c).get(r) is not None:
sum_r_c += header_cooccurrences.get(c).get(r)
output.append(sum_r_c)
temp_rc = r.replace("SECCION_", "") + "\t" + c.replace("SECCION_", "")
temp_cr = c.replace("SECCION_", "") + "\t" + r.replace("SECCION_", "")
if temp_rc not in temp.keys() and temp_cr not in temp.keys():
temp[temp_rc] = sum_r_c
else:
sum_r_c = 0
if header_cooccurrences.get(c) is not None and c != r:
if header_cooccurrences.get(c).get(r) is not None:
sum_r_c += header_cooccurrences.get(c).get(r)
output.append(sum_r_c)
csv_writer.writerow(output)
sorted_x = sorted(temp.items(), key=lambda kv: kv[1], reverse=True)
sorted_dict = collections.OrderedDict(sorted_x)
row = 0
w = open(csv_top_10_cooccurrences,"w")
csv_writer = csv.writer(w, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for key, value in sorted_dict.items():
if row == 10:
break
csv_writer.writerow(key.split("\t") + [str(value)])
row += 1
w.close()
with open(csv_files, mode='w') as csv_f:
csv_writer = csv.writer(csv_f, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(["\t"] + x)
for keys, values in dict_of_files.items():
output = []
output.append(keys)
for val in x:
if val in values:
output.append(1)
else:
output.append(0)
csv_writer.writerow(output)
with open(csv_header_children, mode='w') as csv_f:
csv_writer = csv.writer(csv_f, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for keys, values in dict_of_headers_childs.items():
output = []
output.append(keys.replace("SECCION_",""))
output.append(len(values))
for val in values:
output.append(val)
csv_writer.writerow(output)
def get_importantheaders():
importnat_list = os.path.join(parentDir, "data/important_headers.txt")
with open(importnat_list) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def analysis(**kwargs):
"""
:param kwargs: the given arguments by user
:return:
save the analysis data on csv file (see the README) and plot the number of headers
"""
filter = kwargs['filter']
strict = kwargs['strict']
xml_files = kwargs['xml_files']
dictOfFiles, dictOfHeaders, header_cooccurrences, dictOfHeaders_childs = get_allinfo(xml_files, filter)
if len(dictOfHeaders)== 0:
if filter:
print("No file has the requested headers in important_headers.txt file")
else:
print("No file has been found")
importantHeaders = get_importantheaders()
x = []
y = []
yy = []
for key, value in dictOfHeaders.items():
if len(value) > 0:
if strict:
if key in importantHeaders:
x.append(key)
y.append(len(value))
yy.append(",".join(value))
# print("Header: " + key + "\tFiles: " + "\t".join(value))
else:
x.append(key)
y.append(len(value))
yy.append(",".join(value))
# else:
# print("The files do not have any section about: " + key)
print_csv(dictOfFiles, x, y, yy, header_cooccurrences, dictOfHeaders_childs)
if len(x) > 0:
showbasicinfo(x, y)
# else:
# print("No files have been found")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="analysis")
parser.add_argument('-f', '--filter',
help="filter files that have all headers at important_headers.txt (in data directory), and move the filterred file into 'SELECTED_XML' directory ",
action="store_true")
parser.add_argument('-s', '--strict',
help="Analysis headers at important_headers.txt ",
action="store_true")
args = parser.parse_args()
list_files = []
list_file_names = []
main_root = os.path.join(parentDir, "documents", "XML_SECTION")
for xml_files in os.listdir(main_root):
if xml_files.endswith(".xml"):
list_files.append(os.path.join(main_root, xml_files))
analysis(filter=args.filter, strict=args.strict, xml_files=list_files)
|
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import numpy as np
import common.utils as utils
class MyDataset(Dataset):
def __init__(self, shuffle_seed=0, is_train=True):
idx_0 = 0
idx_100 = 100
if not is_train:
idx_0 += 1
idx_100 += 1
print(f"loading exp_1/obs_data_{idx_0}.npy...")
x_0 = np.load(f"exp_1/obs_data_{idx_0}.npy")
# x_0 = np.zeros(shape=[10,len_obs], dtype=np.float32)
y_0 = np.zeros(shape=(len(x_0)))
print(f"loading exp_1/obs_data_{idx_100}.npy...")
x_1 = np.load(f"exp_1/obs_data_{idx_100}.npy")
# x_1 = np.ones(shape=[10,len_obs], dtype=np.float32)
x = np.concatenate((x_0, x_1))
y = np.zeros(shape=(len(x)), dtype=np.int64)
y[len(x_0):] = 1
idx = np.arange(start=0, stop=len(x))
np.random.seed(shuffle_seed)
p = np.random.permutation(len(x))
self.x, self.y, self.idx = x[p], y[p], idx[p]
self.y = self.y.astype(np.int64)
if args.exclude_z:
self.x = self.x[:,1:]
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = (self.x[idx], self.y[idx], self.idx[idx])
return sample
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
len_obs = 28
if args.exclude_z:
len_obs -= 1
self.fc1 = nn.Linear(len_obs, 2)
# self.fc2 = nn.Linear(2, 2)
def forward(self, x):
x = self.fc1(x)
# x = F.relu(x)
# x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target, _) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(f'Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}')
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target, idx in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
wrong_items, _ = np.where(pred.eq(target.view_as(pred)).numpy()==False)
idx = idx.numpy()
print("wrong predictions usually happen at the beginning of an episode.")
print(sorted(idx[wrong_items]))
test_loss /= len(test_loader.dataset)
print(f'\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{ len(test_loader.dataset)} ({100. * correct / len(test_loader.dataset):.0f}%)\n')
if __name__ == "__main__":
args = utils.args
torch.manual_seed(args.seed)
train_kwargs = {'batch_size': args.batch_size}
# test_kwargs = {'batch_size': args.test_batch_size}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ds_train = MyDataset(shuffle_seed=0, is_train=True)
ds_test = MyDataset(shuffle_seed=0, is_train=False)
train_loader = DataLoader(ds_train, shuffle=True, **train_kwargs)
test_loader = DataLoader(ds_test, shuffle=False, batch_size=len(ds_test))
model = Net()
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
for p in model.parameters():
data = p.detach().numpy()
print("="*10)
print("[data]")
print(data)
print("[most important dimension]")
print(np.argsort(np.sum(np.abs(data),axis=0))[::-1])
print("[shape]")
print(data.shape)
|
# Copyright (c) 2020. Robin Thibaut, Ghent University
import os
import subprocess
import time
import uuid
import warnings
from os.path import join as jp
from loguru import logger
from pysgems.utils.sgutils import joinlist
class Sgems:
def __init__(
self,
project_name: str = "sgems_test",
project_wd: str = "",
res_dir: str = "",
script_dir: str = "",
exe_name: str = "",
nodata: int = -9966699, # sgems default value, do not change this
check_env: bool = True,
**kwargs,
):
logger.add(jp(project_wd, f"{project_name}.log"), rotation="100 MB")
logger.info(f"Project {project_name} initiated")
if check_env:
# First check if sgems installation files are in the user environment variables
gstl_home = os.environ.get("GSTLAPPLIHOME")
if not gstl_home:
msg = "GSTLAPPLIHOME environment variable does not exist"
warnings.warn(msg)
logger.warning(msg)
else:
msg = "GSTLAPPLIHOME environment variable found"
logger.info(msg)
path = os.getenv("Path")
if gstl_home not in path:
msg = f"Variable {gstl_home} does not exist in Path environment variable"
warnings.warn(msg)
logger.warning(msg)
if not exe_name: # If no sgems exe file name is provided,
# checks for sgems exe file in the GSTLAPPLIHOME path
for file in os.listdir(gstl_home):
if (
file.endswith(".exe")
and ("sgems" in file)
and ("uninstall" not in file)
):
exe_name = file
msg = f"sgems exe file : {exe_name} in {gstl_home}"
logger.info(msg)
# Project name
self.project_name = project_name
# Working directory
self.project_wd = project_wd
if not self.project_wd:
self.project_wd = os.getcwd()
# Results directory
self.res_dir = res_dir
# result directory generated according to project and algorithm name
if self.res_dir is None:
# Generate result directory if none is given
self.res_dir = jp(
self.project_wd,
"results",
"_".join([self.project_name, uuid.uuid1().hex]),
)
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
# Exe name
self.exe_name = exe_name
self.dis = None # Discretization instance
self.point_set = None # Point set manager instance
self.algo = None # XML manipulation instance
self.nodata = nodata
self.object_file_names = [] # List of features name needed for the algorithm
self.command_name = ""
if not script_dir:
dir_path = os.path.abspath(__file__ + "/../../")
# Python template file path
self.template_file = jp(dir_path, "script_templates", "script_template.py")
def write_command(self):
"""
Write python script that sgems will run.
"""
self.command_name = jp(self.res_dir, f"{self.project_name}_commands.py")
# This empty str will replace the # in front of the commands meant to execute sgems
run_algo_flag = ""
# within its python environment
try:
name = self.algo.root.find("algorithm").attrib["name"] # Algorithm name
try:
# When performing simulations, sgems automatically add '__realn'
# to the name of the nth generated property.
nr = int(self.algo.root.find("Nb_Realizations").attrib["value"])
name_op = "::".join([name + "__real" + str(i) for i in range(nr)])
except AttributeError:
name_op = name
with open(self.algo.op_file) as alx: # Remove unwanted \n
algo_xml = alx.read().strip("\n")
except AttributeError or FileNotFoundError:
name = "None"
name_op = name
algo_xml = "None"
run_algo_flag = "#" # If no algorithm loaded, then just loads the data
sgrid = [
self.dis.ncol,
self.dis.nrow,
self.dis.nlay,
self.dis.dx,
self.dis.dy,
self.dis.dz,
self.dis.xo,
self.dis.yo,
self.dis.zo,
] # Grid information
grid = joinlist("::", sgrid) # Grid in sgems format
sgems_files = [f"{sf}.sgems" for sf in self.object_file_names]
# The list below is the list of flags that will be replaced in the sgems python script
# TODO: add option to change output file name (now default 'results.grid')
params = [
[run_algo_flag, "#~"],
# for sgems convention...
[self.res_dir.replace("\\", "//"), "RES_DIR"],
[grid, "GRID"],
[self.project_name, "PROJECT_NAME"],
["results", "FEATURE_OUTPUT"], # results.grid = output file
[name, "ALGORITHM_NAME"],
[name_op, "OUTPUT_LIST"],
[algo_xml, "ALGORITHM_XML"],
[str(sgems_files), "OBJECT_FILES"],
]
with open(self.template_file) as sst:
template = sst.read()
for i in range(len(params)): # Replaces the parameters
template = template.replace(params[i][1], params[i][0])
with open(self.command_name, "w") as sstw: # Write sgems python file
sstw.write(template)
def script_file(self):
"""Create script file"""
run_script = jp(self.res_dir, "sgems.script")
rscpt = open(run_script, "w")
rscpt.write(" ".join(["RunScript", self.command_name]))
rscpt.close()
def bat_file(self):
"""Create bat file"""
if not os.path.isfile(jp(self.res_dir, "sgems.script")):
self.script_file()
batch = jp(self.res_dir, "RunSgems.bat")
bat = open(batch, "w")
bat.write(" ".join(["cd", self.res_dir, "\n"]))
bat.write(" ".join([self.exe_name, "sgems.script"]))
bat.close()
def run(self):
"""Call bat file, run sgems"""
batch = jp(self.res_dir, "RunSgems.bat")
if not os.path.isfile(batch):
self.bat_file()
start = time.time()
try:
os.remove(self.algo.op_file)
except FileNotFoundError:
pass
subprocess.call([batch]) # Opens the BAT file
logger.info(f"ran algorithm in {time.time() - start} s")
|
# https://www.hackerrank.com/challenges/computing-the-correlation
import pytest
def pearson_correlation(x,y):
n = len(x)
sxy = 0
sx = 0
sy = 0
sx2 = 0
sy2 = 0
for xi,yi in zip(x,y):
sxy += xi*yi
sx += xi
sy += yi
sx2 += pow(xi,2)
sy2 += pow(yi,2)
top = (n*sxy) - (sx*sy)
m1 = (n*sx2) - pow(sx,2)
m2 = (n*sy2) - pow(sy,2)
bot = pow(m1*m2,0.5)
r = 1.0*top/bot
return r
def test_pc_1():
math = [73,48,95,95,33,47,98,91,95,93,70,85,33,47,95,84,43,95,54,72]
physics = [72,67,92,95,59,58,95,94,84,83,70,79,67,73,87,86,63,92,80,76]
chemistry = [76,76,95,96,79,74,97,97,90,90,78,91,76,90,95,95,75,100,87,90]
rmp = pearson_correlation(math,physics)
rpc = pearson_correlation(physics,chemistry)
rcm = pearson_correlation(chemistry,math)
assert round(rmp,2) == 0.89
assert round(rpc,2) == 0.92
assert round(rcm,2) == 0.81
if __name__ == "__main__":
N = int(raw_input())
math = []
physics = []
chemistry = []
for i in xrange(N):
(m,p,c) = raw_input().split()
math.append(int(m))
physics.append(int(p))
chemistry.append(int(c))
r1 = pearson_correlation(math,physics)
r2 = pearson_correlation(physics,chemistry)
r3 = pearson_correlation(chemistry,math)
print "{0:.2f}\n{1:.2f}\n{2:.2f}".format(round(r1,2),round(r2,2),round(r3,2))
|
import os
from pandas.testing import assert_frame_equal
import pandas as pd
import numpy as np
from yaetos.pandas_utils import load_csvs, load_df, save_pandas_local
# TODO: check to remove .reset_index(drop=True), using assert_frame_equal(d1, d2, check_index_type=False) instead
def test_load_csvs():
# Test multiple file option
path = 'tests/fixtures/data_sample/wiki_example/input/'
actual = load_csvs(path, read_kwargs={}).sort_values('uuid').reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
{'uuid': 'u4', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u5', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u6', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
assert_frame_equal(actual, expected)
# Test single file option
path = 'tests/fixtures/data_sample/wiki_example/input/part1.csv'
actual = load_csvs(path, read_kwargs={}).reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
assert_frame_equal(actual, expected)
def test_load_df():
# Test multiple file option
path = 'tests/fixtures/data_sample/wiki_example/input/'
actual = load_df(path, file_type='csv', read_func='read_csv', read_kwargs={}).sort_values('uuid').reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
{'uuid': 'u4', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u5', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u6', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
assert_frame_equal(actual, expected)
# Test single file option, csv
path = 'tests/fixtures/data_sample/wiki_example/input/part1.csv'
actual = load_df(path, file_type='csv', read_func='read_csv', read_kwargs={}).reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
assert_frame_equal(actual, expected)
# Test single file option, parquet
path = 'tests/fixtures/data_sample/wiki_example/input_parquet/part1.parquet'
actual = load_df(path, file_type='parquet', read_func='read_parquet', read_kwargs={}).reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
assert_frame_equal(actual, expected)
# Test single file option, excel
path = 'tests/fixtures/data_sample/wiki_example/input_excel/parts.xlsx'
actual = load_df(path, file_type='xlsx', read_func='read_excel', read_kwargs={'engine': 'openpyxl', 'sheet_name': 0, 'header': 1}).reset_index(drop=True)
expected = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2, 'session_id': 's1', 'group': 'g1', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p1', 'n_results': 5.0, 'result_position': np.nan},
{'uuid': 'u2', 'timestamp': 2, 'session_id': 's2', 'group': 'g2', 'action': 'searchResultPage', 'checkin': np.nan, 'page_id': 'p2', 'n_results': 9.0, 'result_position': np.nan},
{'uuid': 'u3', 'timestamp': 2, 'session_id': 's3', 'group': 'g3', 'action': 'checkin', 'checkin': 30, 'page_id': 'p3', 'n_results': np.nan, 'result_position': np.nan},
]).reset_index(drop=True)
assert_frame_equal(actual, expected)
# TODO Add test 'test_load_multiple_csvs())', for completeness. functionality already tested above..
def test_save_pandas_local():
# Test save to csv
path = 'tests/tmp/output.csv'
df_in = pd.DataFrame([
{'uuid': 'u1', 'timestamp': 2.0, 'session_id': 's1'},
{'uuid': 'u2', 'timestamp': 2.0, 'session_id': 's2'},
{'uuid': 'u3', 'timestamp': 2.0, 'session_id': 's3'},
])
save_pandas_local(df_in, path, save_method='to_csv', save_kwargs={})
assert os.path.exists(path)
# Test save to parquet
path = 'tests/tmp/output.parquet'
save_pandas_local(df_in, path, save_method='to_parquet', save_kwargs={})
assert os.path.exists(path)
|
import psi4
import XC_Inversion
import matplotlib.pyplot as plt
import libcubeprop
import numpy as np
if __name__ == "__main__":
psi4.set_num_threads(3)
psi4.set_memory('4 GB')
functional = 'svwn'
basis = "cc-pvdz"
vxc_basis = None
ortho_basis = False
svd = 1e-5
opt_method="trust-krylov"
method = "WuYangMN"
v0 = "FermiAmaldi"
title = method +"_"+ opt_method +"_"+v0+ "_" + basis+"_"+ \
str(vxc_basis) + "_"\
+ str(ortho_basis) + "_" + str(svd)
print(title)
psi4.set_output_file("Be.psi4")
Full_Molec = psi4.geometry("""
nocom
noreorient
Be
units bohr
symmetry c1
""")
Full_Molec.set_name("Be")
# Exact
Be = np.genfromtxt('/home/yuming/PDFT/pdft/pdft/data/Atom0/be.new8/Data')
Be_xyz = np.concatenate((-np.flip(Be[:, 1]), Be[:, 1]))
Be_vxc = np.concatenate((np.flip(Be[:, 3]), Be[:, 3]))
#Psi4 Options:
psi4.set_options({
'DFT_SPHERICAL_POINTS': 302,
'DFT_RADIAL_POINTS': 77,
'MAXITER': 1000,
"opdm": True,
"tpdm": True,
'REFERENCE': 'RHF'
})
# Get wfn for target density
_, input_density_wfn = psi4.properties("CISD"+"/"+basis, molecule=Full_Molec,
return_wfn=True, properties=["dipole"])
print("Target Density Calculation Finished.")
#Psi4 Options:
# psi4.set_options({
# 'REFERENCE' : 'UHF'
# })
mol = XC_Inversion.Molecule(Full_Molec, basis, functional)
mol.scf(100)
if vxc_basis is not None:
vxc_basis = XC_Inversion.Molecule(Full_Molec, vxc_basis, functional)
vxc_basis.scf(100)
else:
vxc_basis = mol
print("Number of Basis: ", mol.nbf, vxc_basis.nbf)
inverser = XC_Inversion.Inverser(mol, input_density_wfn,
ortho_basis=ortho_basis,
vxc_basis=vxc_basis,
v0=v0,
# eHOMO=-0.5792,
# v0_wfn=v0_wfn
)
# if method == "WuYangScipy":
# inverser.find_vxc_scipy_WuYang(opt_method=opt_method, find_vxc_grid=False)
# elif method == "WuYangMN":
# hess, jac = inverser.find_vxc_manualNewton(svd_rcond=svd, line_search_method="StrongWolfe")
# elif method == "COScipy":
# inverser.find_vxc_scipy_constrainedoptimization(opt_method="L-BFGS-B")
#
# f,ax = plt.subplots(1,1,dpi=200)
# ax.plot(Be_xyz, Be_vxc, label="Exact")
# XC_Inversion.pdft.plot1d_x(inverser.vxc_a_grid, vxc_basis.Vpot, ax=ax, label="WuYang", ls='--')
# ax.legend()
# ax.set_xlim(1e-3, 14)
# ax.set_xscale("log")
# f.show()
# inverser.find_vxc_manualNewton(svd_rcond=1e-3, line_search_method="StrongWolfe", find_vxc_grid=False)
# L = [3, 0, 0]
# D = [0.1, 0.5, 0.2]
# O = [-2.1, 0, 0]
# N = [100, 1, 1]
# inverser.v_output_a = inverser.v_output[:vxc_basis.nbf]
# vout_cube_a, xyzw = libcubeprop.basis_to_cubic_grid(inverser.v_output_a, inverser.vp_basis.wfn, L, D, O, N)
# vout_cube_a.shape = 100
# xyzw[0].shape = 100
# xyzw[1].shape = 100
# xyzw[2].shape = 100
# xyzw[3].shape = 100
# mark_y = np.isclose(xyzw[1], 0)
# mark_z = np.isclose(xyzw[2], 0)
# grid = np.array([xyzw[0][mark_y&mark_z], xyzw[1][mark_y&mark_z], xyzw[2][mark_y&mark_z]])
# grid = grid.T
# inverser.get_esp4v0(grid=grid)
# inverser.get_vH_vext(grid)
# nocc = mol.ndocc
# if v0 == "FermiAmaldi":
# inverser.vxc_a_grid = vout_cube_a[mark_z&mark_y] -1 / nocc * inverser.vH4v0
# elif v0 == "Hartree":
# inverser.vxc_a_grid = vout_cube_a[mark_z&mark_y]
# grid = grid.T
#
# f,ax = plt.subplots(1,1,dpi=200)
# ax.plot(Be_xyz, Be_vxc, label="Exact")
# XC_Inversion.pdft.plot1d_x(inverser.vxc_a_grid, xyz=grid, ax=ax, label="TSVD", ls="--")
# ax.set_xlim(-2.1, 8.1)
#
# inverser.find_vxc_manualNewton(svd_rcond="GL", line_search_method="StrongWolfe", find_vxc_grid=False)
#
# inverser.v_output_a = inverser.v_output[:vxc_basis.nbf]
# vout_cube_a, _ = libcubeprop.basis_to_cubic_grid(inverser.v_output_a, inverser.vp_basis.wfn, L, D, O, N)
# vout_cube_a.shape = 100
# nocc = mol.ndocc
# if v0 == "FermiAmaldi":
# inverser.vxc_a_grid = vout_cube_a[mark_z&mark_y] -1 / nocc * inverser.vH4v0
# elif v0 == "Hartree":
# inverser.vxc_a_grid = vout_cube_a[mark_z&mark_y]
# XC_Inversion.pdft.plot1d_x(inverser.vxc_a_grid, xyz=grid, ax=ax, label="TSVD+GL", ls="--")
# ax.set_xlim(-2.1, 8.1)
# ax.legend()
# f.show()
|
from planetmint.backend.tarantool.transaction import tools
|
"""
@author: magician
@date: 2019/11/25
@file: method_demo.py
"""
import math
class MyClass:
"""
MyClass
"""
def method(self):
return 'instance method called', self
@classmethod
def classmethod(cls):
return 'class method called', cls
@staticmethod
def staticmethod():
return 'static method called'
class Pizza:
"""
Pizza
"""
def __init__(self, ingredients, radius=4):
self.ingredients = ingredients
self.radius = radius
def __repr__(self):
return 'Pizza({0}, {1})'.format(self.ingredients, self.radius)
def area(self):
return self.circle_area(self.radius)
@staticmethod
def circle_area(r):
return r ** 2 * math.pi
@classmethod
def margherita(cls):
return cls(['mozzarella', 'tomatoes'])
@classmethod
def prosciutto(cls):
return cls(['mozzarella', 'tomatoes', 'ham'])
if __name__ == '__main__':
# Instance Method
pass
# Class Method
pass
# Static Method
pass
# Let’s See Them in Action
obj = MyClass()
print(obj.method())
print(MyClass.method(obj))
print(obj.classmethod())
print(obj.staticmethod())
print(MyClass.classmethod())
print(MyClass.staticmethod())
try:
print(MyClass.method())
except Exception as e:
print(e)
# Delicious Pizza Factories With @classmethod
print(Pizza(['cheese', 'tomatoes']))
print(Pizza(['mozzarella', 'tomatoes']))
print(Pizza(['mozzarella', 'tomatoes', 'ham', 'mushrooms']))
print(Pizza(['mozzarella'] * 4))
print(Pizza.margherita())
print(Pizza.prosciutto())
# When To Use Static Method
p = Pizza(['mozzarella', 'tomatoes'], 4)
print(p)
print(p.area())
print(Pizza.circle_area(4))
|
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
from glob import glob
from os.path import join, relpath, splitext, dirname, basename
import numpy as np
from pylab import *
from skimage.color.colorconv import rgb2gray, gray2rgb
from skimage.transform._geometric import AffineTransform, warp
from skimage.util.dtype import img_as_float
from pyfacades.labelme.annotation import Annotation
from pyfacades.models.independant_12_layers.import_labelme import SingleLayerLabels, LABEL_POSITIVE
from pyfacades.rectify import Homography
LABELME_ROOT = "../from_labelme"
if 'LABELME_ROOT' in os.environ:
LABELME_ROOT = os.environ['LABELME_ROOT']
today = datetime.datetime.now()
ts = today.strftime("%Y-%m-%d")
# Command Line Arguments
p = argparse.ArgumentParser()
p.add_argument("--labelme",
help="the root location of LabelMe data",
default=LABELME_ROOT)
# the blacklist may be the 'annotationCache/Dirlist/xxxx.txt' files from past runs.
p.add_argument("--since",
help="the earliest date of an annotation to process. Format is YYYY-MM-DD",
default='2001-01-01')
p.add_argument("--output", "-o",
help="output folder",
default="./output-{}".format(ts))
p.add_argument("--collection", "-c",
help="The name of the collection (dirlist used by LabelMe)",
default="facades-{}.txt".format(ts))
p.add_argument("--ofolder",
help="the output filder (all facades are copied into a single output folder)",
default="facades-{}".format(ts))
p.add_argument("--pad", type=int,
help="The amount to padd each side of the facade by when rectifying. "
"Padding will include some features of the input that may extend past "
"the main facade",
default=100)
p.add_argument("--use-quads",
help="Use the quads to rectify, instead of using Lama's automatic approach",
action='store_true')
p.add_argument("--start-at", type=int, default=0)
p.add_argument("--list",
help="A list of XML files to process, if they exist under the LabelMe"
" annotations folder")
args = p.parse_args()
images_root = join(args.labelme, "Images")
try:
os.makedirs(args.output)
except OSError:
pass
if args.list is None:
xml_files = glob(join(args.labelme, 'Annotations', '*/*.xml'))
else:
print("Reading files from a list")
with open(args.list) as f:
xml_files = [join(args.labelme, 'Annotations', filename.strip()) for filename in f]
results = []
since = datetime.datetime.strptime(args.since, "%Y-%m-%d")
total_new_facades = 0
collection = []
for i, xml in enumerate(xml_files):
if i < args.start_at:
print (i + 1, ":", xml, "[SKIPPED]")
continue
if not os.path.isfile(xml):
print(i+1, ":", "Missing file", xml)
continue
stem = splitext(relpath(xml, join(args.labelme, 'Annotations')))[0]
folder = os.path.dirname(stem)
stem = basename(stem)
img = join(args.labelme, 'Images', folder, stem + '.jpg')
xml = join(args.labelme, 'Annotations', folder, stem + '.xml')
print(i + 1, ":", stem)
a = Annotation(xml, images_root=images_root)
assert isinstance(a, Annotation)
a.remove_deleted()
all_facades = [o for o in a.objects if o.name.lower() == 'facade']
facades = [o for o in all_facades if o.date > since]
print(" ", len(facades), "of", len(all_facades), "since ", since)
total_new_facades += len(facades)
if len(facades) == 0:
# Nothing else to do!
continue
a.update_image_size()
data = a.get_image()
data_array = np.asarray(data)
use_quad = args.use_quads
if not use_quad:
masker = SingleLayerLabels(a, nrows=a.imagesize.nrows)
assert isinstance(masker, SingleLayerLabels)
masker.mark_positives([o.polygon for o in a if o.name == 'occlusion'])
masker.mark_positives([o.polygon for o in a if o.name == 'tree'])
masker.mark_positives([o.polygon for o in a if o.name == 'sky'])
mask = np.asarray(masker.image) == LABEL_POSITIVE
for j, f in enumerate(all_facades):
if f.date <= since:
continue
in_quad = f.polygon.points
x0 = int(in_quad[:, 0].min())
y0 = int(in_quad[:, 1].min())
x1 = int(in_quad[:, 0].max())
y1 = int(in_quad[:, 1].max())
if x0 < 20 or (data.width-x1) < 20:
#Skip facades at the ends
continue
i0 = np.argmin([np.hypot(x - x0, y - y0) for (x, y) in in_quad])
i1 = np.argmin([np.hypot(x - x1, y - y0) for (x, y) in in_quad])
i2 = np.argmin([np.hypot(x - x1, y - y1) for (x, y) in in_quad])
i3 = np.argmin([np.hypot(x - x0, y - y1) for (x, y) in in_quad])
in_quad = in_quad[(i0, i1, i2, i3), :]
pad = args.pad
width = in_quad[:, 0].max() - in_quad[:, 0].min()
height = in_quad[:, 1].max() - in_quad[:, 1].min()
out_quad = array([(0, 0), (width, 0), (width, height), (0, height)]) + pad
# import ipdb; ipdb.set_trace()
metadata = dict(folder=folder, stem=stem)
metadata['polygon'] = f.polygon.points.tolist()
highlight = np.zeros((data.height, data.width), dtype=np.uint8)
f.draw(highlight, fill=255, outline=128)
if use_quad:
P = AffineTransform()
P.estimate(out_quad, in_quad)
output = warp(data, P, output_shape=(height + 2 * pad, width + 2 * pad))
sub_highlight = warp(highlight, P, output_shape=(height + 2 * pad, width + 2 * pad))
projection_matrix = P.params
metadata['use_quad'] = True
metadata['projection'] = projection_matrix.tolist()
metadata['subimage'] = None
else:
# import ipdb; ipdb.set_trace()
data_array = img_as_float(data_array)
ptop = max(0, y0 - pad)
pbottom = min(data.height, y1 + pad)
pright = min(data.width, x1 + pad)
pleft = max(0, x0 - pad)
sub_image = data_array[ptop:pbottom, pleft:pright, :].copy()
sub_mask = mask[ptop:pbottom, pleft:pright]
sub_highlight = highlight[ptop:pbottom, pleft:pright]
H = Homography(sub_image, mask=sub_mask)
output = H.rectified
sub_highlight = warp(sub_highlight,
AffineTransform(H.H),
preserve_range=True)
gs = gray2rgb(rgb2gray(output))
highlighted = output.copy()
highlighted[sub_highlight==0] = 0.5*gs[sub_highlight==0]
highlighted[sub_highlight==128] = (255, 0, 0)
projection_matrix = H.H
metadata['use_quad'] = False
metadata['projection'] = projection_matrix.tolist()
metadata['subimage'] = dict(left=ptop, right=pbottom, bottom=pright, top=pleft)
out_folder = args.ofolder
out_basename = stem + '-facade-{:02}'.format(j + 1)
fname = join(args.output, 'Images', out_folder, out_basename + '.jpg')
try:
os.makedirs(dirname(fname))
except OSError:
pass
imsave(splitext(fname)[0]+'-original.jpg', output)
imsave(splitext(fname)[0]+'-highlighted.jpg', highlighted)
imsave(splitext(fname)[0]+'-mask.jpg', sub_highlight)
with open(splitext(fname)[0] + '.json', 'w') as mdf:
json.dump(metadata, mdf)
collection.append(','.join([out_folder, out_basename + '-highlighted' + '.jpg']) + '\n')
try:
os.makedirs(join(args.output, 'annotationCache', 'DirLists'))
except OSError:
pass
with open(join(args.output, 'annotationCache', 'DirLists', args.collection), 'w') as f:
f.writelines(collection)
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import sys
import argparse
import colorsys
import re
# def _get_colors(num_colors):
# colors=[]
# for i in np.arange(0., 360., 360. / num_colors):
# hue = i/360.
# lightness = (50 + np.random.rand() * 10)/100.
# saturation = (90 + np.random.rand() * 10)/100.
# rgb = colorsys.hls_to_rgb(hue, lightness, saturation)
# rgba = str(tuple([round(x,4) for x in rgb])).strip('()') + ', 0.3'
# colors.append(rgba)
# return colors, rgb
def _get_colors(num_colors):
colors = (
"0.9019 0.0980 0.2941 0.3",
"0.2352 0.7058 0.2941 0.3",
"1.0 0.8823 0.0980 0.3",
"0.2627 0.3882 0.8470 0.3",
"0.9607 0.5098 0.1921 0.3",
"0.5686 0.1176 0.7058 0.3",
"0.2745 0.9411 0.9411 0.3",
"0.9411 0.1960 0.9019 0.3",
"0.7372 0.9647 0.0470 0.3",
"0.9803 0.7450 0.7450 0.3",
"0.0 0.5019 0.5019 0.3",
"0.9019 0.7450 1.0 0.3",
"0.6039 0.3882 0.1411 0.3",
"1.0 0.9803 0.7843 0.3",
"0.5019 0.0 0.0 0.3",
"0.6666 1.0 0.7647 0.3",
"0.5019 0.5019 0.0 0.3",
"1.0 0.8470 0.6941 0.3",
"0.0 0.0 0.4588 0.3",
"0.5019 0.5019 0.5019 0.3",
"1.0 1.0 1.0 0.3",
"0.0 0.0 0.0 0.3",
)
return colors[0:num_colors]
def str2bool(v):
return v.lower() == "true"
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--path",
type=str,
default="../models/assets/objects/in_progress/complete/bench_bjoderna_0208/bench_bjoderna_0208.xml",
)
parser.add_argument("--add_welds", type=str2bool, default=True)
parser.add_argument("--add_colors", type=str2bool, default=True)
config, unparsed = parser.parse_known_args()
tree = ET.parse(config.path) # Path to input file
root = tree.getroot()
equality = root.find("equality")
print(config.path)
# get count of conn_sites, and get map of groups->bodies
bodymap = dict()
connections = set()
# find group pairs
num_colors = 0
for body in root.find("worldbody"):
for child in body.getiterator():
if child.tag == "site" and re.search("conn_site", child.attrib["name"]):
num_colors += 1
groupPair = child.attrib["name"].split(",")[0]
groupNames = groupPair.split("-")
group1 = groupNames[0]
group2 = groupNames[1]
groupPair2 = group2 + "-" + group1
if group1 not in bodymap.keys():
bodies = set()
bodies.add(body)
bodymap[group1] = bodies
else:
bodymap[group1].add(body)
if groupPair not in connections and groupPair2 not in connections:
connections.add(groupPair)
if config.add_welds == True:
for groupPair in connections:
groupNames = groupPair.split("-")
group1 = groupNames[0]
group2 = groupNames[1]
# n*m welds needed for n bodies in group1 and m bodies in group2
for body1 in bodymap[group1]:
for body2 in bodymap[group2]:
weld = ET.SubElement(equality, "weld")
weld.set("active", "false")
weld.set("body1", body1.attrib["name"])
weld.set("body2", body2.attrib["name"])
weld.set("solimp", "1 1 0.5")
weld.set("solref", "0.01 0.3")
if config.add_colors == True:
num_colors = int(num_colors / 2)
colors = _get_colors(num_colors)
# for color in colors:
# print(color)
i = 0
colormap = dict()
for body in root.find("worldbody"):
for child in body.getiterator():
if child.tag == "site" and re.search("conn_site", child.attrib["name"]):
groupPair = child.attrib["name"].split(",")[0]
if groupPair not in colormap:
groupNames = groupPair.split("-")
group1 = groupNames[0]
group2 = groupNames[1]
colormap[groupPair] = colors[i]
groupPair2 = group2 + "-" + group1
colormap[groupPair2] = colors[i]
i += 1
# change color of conn_site
child.set("rgba", colormap[groupPair])
tree.write(config.path, encoding="UTF-8")
if __name__ == "__main__":
main()
|
"""
EmailManager - a helper class to login, search for, and delete emails.
"""
import email
import htmlentitydefs
import imaplib
import quopri
import re
import time
import types
from seleniumbase.config import settings
class EmailManager:
""" A helper class to interface with an Email account. These imap methods
can search for and fetch messages without needing a browser.
Example:
em = EmailManager()
result = em.check_for_recipient(
"[GMAIL.USER]+[SOME CODE OR TIMESTAMP KEY]@gmail.com")
"""
HTML = "text/html"
PLAIN = "text/plain"
TIMEOUT = 1800
def __init__(self, uname=settings.EMAIL_USERNAME,
pwd=settings.EMAIL_PASSWORD,
imap_string=settings.EMAIL_IMAP_STRING,
port=settings.EMAIL_IMAP_PORT):
self.uname = uname
self.pwd = pwd
self.imap_string = imap_string
self.port = port
def imap_connect(self):
"""
Connect to the IMAP mailbox.
"""
self.mailbox = imaplib.IMAP4_SSL(self.imap_string, self.port)
self.mailbox.login(self.uname, self.pwd)
self.mailbox.select()
def imap_disconnect(self):
"""
Disconnect from the IMAP mailbox.
"""
self.mailbox.close()
self.mailbox.logout()
def __imap_search(self, ** criteria_dict):
""" Searches for query in the given IMAP criteria and returns
the message numbers that match as a list of strings.
Criteria without values (eg DELETED) should be keyword args
with KEY=True, or else not passed. Criteria with values should
be keyword args of the form KEY="VALUE" where KEY is a valid
IMAP key.
IMAP default is to AND all criteria together. We don't support
other logic quite yet.
All valid keys: ALL, ANSWERED, BCC <string>, BEFORE <string>,
BODY <string>, CC <string>, DELETED, DRAFT, FLAGGED, FROM
<string>, HEADER <field-name> <string> (UNTESTED), KEYWORD
<flag>, LARGER <n>, NEW, NOT <search-key>, OLD, ON <date>,
OR <search-key1> <search-key2> (UNTESTED), RECENT, SEEN,
SENTBEFORE <date>, SENTON <date>, SENTSINCE <date>, SINCE <date>,
SMALLER <n>, SUBJECT <string>, TEXT <string>, TO <string>,
UID <sequence set>, UNANSWERED, UNDELETED, UNDRAFT, UNFLAGGED,
UNKEYWORD <flag>, UNSEEN.
For details on keys and their values, see
http://tools.ietf.org/html/rfc3501#section-6.4.4
:param criteria_dict: dictionary of search criteria keywords
:raises: EmailException if something in IMAP breaks
:returns: List of message numbers as strings matched by given criteria
"""
self.imap_connect()
criteria = []
for key in criteria_dict:
if criteria_dict[key] is True:
criteria.append('(%s)' % key)
else:
criteria.append('(%s "%s")' % (key, criteria_dict[key]))
# If any of these criteria are not valid IMAP keys, IMAP will tell us.
status, msg_nums = self.mailbox.search('UTF-8', * criteria)
self.imap_disconnect()
if 0 == len(msg_nums):
msg_nums = []
if 'OK' in status:
return self.__parse_imap_search_result(msg_nums)
else:
raise EmailException("IMAP status is " + str(status))
def remove_formatting(self, html):
"""
Clean out any whitespace
@Params
html - String of html to remove whitespace from
@Returns
Cleaned string
"""
return ' '.join(html.split())
def __parse_imap_search_result(self, result):
"""
This takes the result of imap_search and returns SANE results
@Params
result - result from an imap_search call
@Returns
List of IMAP search results
"""
if isinstance(result, types.ListType):
# Above is same as "type(result) == types.ListType"
if len(result) == 1:
return self.__parse_imap_search_result(result[0])
else:
return result
elif isinstance(result, types.StringType):
# Above is same as "type(result) == types.StringType"
return result.split()
else:
# Fail silently assuming tests will fail if emails are not found
return []
def fetch_html(self, msg_nums):
"""
Given a message number that we found with imap_search,
get the text/html content.
@Params
msg_nums - message number to get html message for
@Returns
HTML content of message matched by message number
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
return self.__imap_fetch_content_type(msg_nums, self.HTML)
def fetch_plaintext(self, msg_nums):
"""
Given a message number that we found with imap_search,
get the text/plain content.
@Params
msg_nums - message number to get message for
@Returns
Plaintext content of message matched by message number
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
return self.__imap_fetch_content_type(msg_nums, self.PLAIN)
def __imap_fetch_content_type(self, msg_nums, content_type):
"""
Given a message number that we found with imap_search, fetch the
whole source, dump that into an email object, and pick out the part
that matches the content type specified. Return that, if we got
multiple emails, return dict of all the parts.
@Params
msg_nums - message number to search for
content_type - content type of email message to return
@Returns
Specified content type string or dict of all content types of matched
email.
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
if not content_type:
raise Exception("Need a content type!")
contents = {}
self.imap_connect()
for num in msg_nums:
status, data = self.mailbox.fetch(num, "(RFC822)")
for response_part in data:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1])
for part in msg.walk():
if str(part.get_content_type()) == content_type:
content = str(part.get_payload(decode=True))
contents[int(num)] = content
self.imap_disconnect()
return contents
def fetch_html_by_subject(self, email_name):
"""
Get the html of an email, searching by subject.
@Params
email_name - the subject to search for
@Returns
HTML content of the matched email
"""
if not email_name:
raise EmailException("Subject cannot be null")
results = self.__imap_search(SUBJECT=email_name)
sources = self.fetch_html(results)
return sources
def fetch_plaintext_by_subject(self, email_name):
"""
Get the plain text of an email, searching by subject.
@Params
email_name - the subject to search for
@Returns
Plaintext content of the matched email
"""
if not email_name:
raise EmailException("Subject cannot be null")
results = self.__imap_search(SUBJECT=email_name)
sources = self.fetch_plaintext(results)
return sources
def search_for_recipient(self, email, timeout=None, content_type=None):
"""
Get content of emails, sent to a specific email address.
@Params
email - the recipient email address to search for
timeout - seconds to try beore timing out
content_type - type of email string to return
@Returns
Content of the matched email in the given content type
"""
return self.search(timeout=timeout,
content_type=content_type, TO=email)
def search_for_subject(self, subject, timeout=None, content_type=None):
"""
Get content of emails, sent to a specific email address.
@Params
email - the recipient email address to search for
timeout - seconds to try beore timing out
content_type - type of email string to return
@Returns
Content of the matched email in the given content type
"""
return self.search(timeout=timeout,
content_type=content_type, SUBJECT=subject)
def search_for_count(self, ** args):
"""
A search that keeps searching up until timeout for a
specific number of matches to a search. If timeout is not
specified we use the default. If count= is not specified we
will fail. Return values are the same as search(), except for count=0,
where we will return an empty list. Use this if you need to wait for a
number of emails other than 1.
@Params
args - dict of arguments to use in search:
count - number of emails to search for
timeout - seconds to try search before timing out
@Returns
List of message numbers matched by search
"""
if "timeout" not in args.keys():
timeout = self.TIMEOUT
elif args["timeout"]:
timeout = args["timeout"]
args["timeout"] = timeout / 15
if "count" not in args.keys():
raise EmailException("Count param not defined!")
else:
count = int(args["count"])
del args["count"]
results = None
timer = timeout
count = 0
while count < timer:
try:
results = self.search(** args)
except EmailException:
if count == 0:
return []
if results and len(results) == count:
return results
else:
time.sleep(15)
count += 15
if count >= timer:
raise EmailException("Failed to match criteria %s in %s minutes" %
(args, timeout / 60))
def __check_msg_for_headers(self, msg, ** email_headers):
"""
Checks an Email.Message object for the headers in email_headers.
Following are acceptable header names: ['Delivered-To',
'Received', 'Return-Path', 'Received-SPF',
'Authentication-Results', 'DKIM-Signature',
'DomainKey-Signature', 'From', 'To', 'Message-ID',
'Subject', 'MIME-Version', 'Content-Type', 'Date',
'X-Sendgrid-EID', 'Sender'].
@Params
msg - the Email.message object to check
email_headers - list of headers to check against
@Returns
Boolean whether all the headers were found
"""
all_headers_found = False
email_headers['Delivered-To'] = email_headers['To']
email_headers.pop('To')
all_headers_found = all(k in msg.keys() for k in email_headers)
return all_headers_found
def fetch_message(self, msgnum):
"""
Given a message number, return the Email.Message object.
@Params
msgnum - message number to find
@Returns
Email.Message object for the given message number
"""
self.imap_connect()
status, data = self.mailbox.fetch(msgnum, "(RFC822)")
self.imap_disconnect()
for response_part in data:
if isinstance(response_part, tuple):
return email.message_from_string(response_part[1])
def get_content_type(self, msg, content_type="HTML"):
"""
Given an Email.Message object, gets the content-type payload
as specified by @content_type. This is the actual body of the
email.
@Params
msg - Email.Message object to get message content for
content_type - Type of content to get from the email
@Return
String content of the email in the given type
"""
if "HTML" in content_type.upper():
content_type = self.HTML
elif "PLAIN" in content_type.upper():
content_type = self.PLAIN
for part in msg.walk():
if str(part.get_content_type()) == content_type:
return str(part.get_payload(decode=True))
def search(self, ** args):
"""
Checks email inbox every 15 seconds that match the criteria
up until timeout.
Search criteria should be keyword args eg
TO="selenium@gmail.com". See __imap_search docstring for list
of valid criteria. If content_type is not defined, will return
a list of msg numbers.
Options:
- fetch: will return a dict of Message objects, keyed on msgnum,
which can be used to look at headers and other parts of the complete
message. (http://docs.python.org/library/email.message.html)
- timeout: will replace the default module timeout with the
value in SECONDS.
- content_type: should be either "PLAIN" or
"HTML". If defined returns the source of the matched messages
as a dict of msgnum:content. If not defined we return a list
of msg nums.
"""
if "content_type" not in args.keys():
content_type = None
elif "HTML" in args["content_type"]:
content_type = self.HTML
del args["content_type"]
elif "PLAIN" in args["content_type"]:
content_type = self.PLAIN
del args["content_type"]
elif args["content_type"]:
content_type = args['content_type']
del args["content_type"]
if "timeout" not in args.keys():
timeout = self.TIMEOUT
elif "timeout" in args:
timeout = args["timeout"]
del args["timeout"]
fetch = False
if "fetch" in args.keys():
fetch = True
del args["fetch"]
results = None
timer = timeout
count = 0
while count < timer:
results = self.__imap_search(** args)
if len(results) > 0:
if fetch:
msgs = {}
for msgnum in results:
msgs[msgnum] = self.fetch_message(msgnum)
return msgs
elif not content_type:
return results
else:
return self.__imap_fetch_content_type(results,
content_type)
else:
time.sleep(15)
count += 15
if count >= timer:
raise EmailException(
"Failed to find message for criteria %s in %s minutes" %
(args, timeout / 60))
def remove_whitespace(self, html):
"""
Clean whitespace from html
@Params
html - html source to remove whitespace from
@Returns
String html without whitespace
"""
# Does python have a better way to do exactly this?
clean_html = html
for char in ("\r", "\n", "\t"):
clean_html = clean_html.replace(char, "")
return clean_html
def remove_control_chars(self, html):
"""
Clean control characters from html
@Params
html - html source to remove control characters from
@Returns
String html without control characters
"""
return self.remove_whitespace(html)
def replace_entities(self, html):
"""
Replace htmlentities with unicode characters
@Params
html - html source to replace entities in
@Returns
String html with entities replaced
"""
def fixup(text):
"""replace the htmlentities in some text"""
text = text.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return chr(int(text[3:-1], 16))
else:
return chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = chr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub(r"&#?\w+;", fixup, html)
def decode_quoted_printable(self, html):
"""
Decoding from Quoted-printable, or QP encoding, that uses ASCII 7bit
chars to encode 8 bit chars, resulting in =3D to represent '='. Python
supports UTF-8 so we decode. Also removes line breaks with '= at the
end.'
@Params
html - html source to decode
@Returns
String decoded HTML source
"""
return self.replace_entities(quopri.decodestring(html))
def html_bleach(self, html):
"""
Cleanup and get rid of all extraneous stuff for better comparison
later. Turns formatted into into a single line string.
@Params
html - HTML source to clean up
@Returns
String cleaned up HTML source
"""
return self.decode_quoted_printable(html)
class EmailException(Exception):
"""Raised when we have an Email-related problem."""
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
|
import komand
from .schema import GetSubdomainsInput, GetSubdomainsOutput
# Custom imports below
class GetSubdomains(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_subdomains",
description="Get subdomains https://api.passivetotal.org/api/docs/#api-Enrichment-GetV2EnrichmentSubdomains",
input=GetSubdomainsInput(),
output=GetSubdomainsOutput(),
)
def run(self, params={}):
results = self.connection.enrichment.get_subdomains(query=[params["query"]])
subdomains = results.get("subdomains") or []
count = len(subdomains)
return {"count": count, "subdomains": subdomains}
def test(self):
"""Test action"""
results = self.connection.enrichment.get_subdomains(query=["*.passivetotal.org"])
subdomains = results.get("subdomains") or []
count = len(subdomains)
return {"count": count, "subdomains": subdomains}
|
# Copyright 2019 Verily Life Sciences LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds the model input_fn for training and eval."""
from __future__ import absolute_import
from __future__ import division
import functools
import os.path
import random
from classifaedes import metadata
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_list('positive_labels', ['males', 'multiple-males'],
'Comma separated list of positive labels.')
flags.DEFINE_integer('eval_batch_size', 32,
'Batch size for eval (for consistent tuning eval).')
flags.DEFINE_integer('read_q_capacity', 16, 'Read queue capacity.')
flags.DEFINE_integer('read_q_threads', 2, 'Number of data-read threads.')
flags.DEFINE_integer('shuffle_q_capacity', 512, 'Shuffle queue capacity.')
flags.DEFINE_integer('shuffle_q_min_after_deq', 128,
'Minimum number of examples in the shufle queue.')
flags.DEFINE_integer('shuffle_q_threads', 4,
'Number of queue runner threads for the shuffle queue. '
'These threads perform image pre-processing.')
def prep_image(img, image_shape, is_training=False):
"""Perform image preprocessing for training and serving."""
h, w = image_shape
img = tf.image.convert_image_dtype(img, tf.float32)
if is_training:
img = _distort_image(img)
# Distort after padding to avoid revealing the distortion effects in padding.
img = tf.image.pad_to_bounding_box(img, 0, 0, h, w)
return img
def build_input_fn(hps, input_md, data_dir, split):
"""Returns input_fn for tf.learn Estimator.
Args:
hps: HParams.
input_md: Input metadata. See metadata.py for details.
data_dir: Path to directory containing train, test data.
split: "train" or "test".
Returns:
Estimator input_fn - callable input graph builder.
"""
assert split in ('train', 'test')
filepath = os.path.join(data_dir, split, 'Examples-?????-of-?????')
decoder = _build_decoder()
image_shape = metadata.shape_from_metadata(input_md)
tf.logging.info('Using image shape: %s', image_shape)
is_training = split == 'train'
def input_fn():
"""Builds input ops for the train / eval graph.
Returns:
A 2-tuple of (inputs, targets).
inputs: Dictionary of input Tensors.
* images: Float Tensor of shape [N, H, W, 1].
targets: Boolean Tensor of shape [N].
"""
data = slim.dataset.Dataset(
data_sources=[filepath],
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=None,
items_to_descriptions={
'image': 'Grayscale image.',
'label': 'String label.',
},
)
dataprovider = slim.dataset_data_provider.DatasetDataProvider(
data,
num_readers=FLAGS.read_q_threads,
common_queue_capacity=FLAGS.read_q_capacity,
common_queue_min=(FLAGS.read_q_capacity // 2),
shuffle=is_training,
)
img, str_label = dataprovider.get(['image', 'label'])
label = tf.reduce_any([tf.equal(s, str_label)
for s in FLAGS.positive_labels], axis=0)
img = prep_image(img, image_shape, is_training=is_training)
img, label = _batch_examples(hps, [img, label], is_training)
tf.logging.error('Image shape: %s', img.get_shape().as_list())
tf.summary.image('positives', tf.boolean_mask(img, label))
tf.summary.image('negatives', tf.boolean_mask(img, tf.logical_not(label)))
return {'images': img}, label
return input_fn
#
# Private functions.
#
def _build_decoder():
"""Builds the TFExampleDecoder for reading train / eval data."""
keys_to_features = {
'image/encoded': tf.FixedLenFeature([], tf.string),
'image/format': tf.FixedLenFeature([], tf.string, default_value='png'),
'label': tf.FixedLenFeature([], tf.string),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(channels=1),
'label': slim.tfexample_decoder.Tensor('label'),
}
return slim.tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
def _batch_examples(hps, tensors, is_training):
"""Enqueue preprocessed input tensors for batching ahead of training path."""
batch_size = FLAGS.eval_batch_size
batch_fn = tf.train.batch
# For eval, use deterministic batching and fixed batch_size.
if is_training:
batch_size = hps.batch_size
batch_fn = functools.partial(
tf.train.shuffle_batch,
min_after_dequeue=FLAGS.shuffle_q_min_after_deq)
return batch_fn(
tensors,
batch_size=batch_size,
capacity=FLAGS.shuffle_q_capacity,
num_threads=FLAGS.shuffle_q_threads,
)
def _distort_image(img):
"""Randomly distort the image."""
with tf.name_scope('image_distortions'):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
pixel_xforms = [
functools.partial(tf.image.random_brightness, max_delta=0.3),
functools.partial(tf.image.random_contrast, lower=0.5, upper=1.5)
]
random.shuffle(pixel_xforms)
for xform in pixel_xforms:
img = xform(img)
return tf.clip_by_value(img, 0.0, 1.0)
|
# -*- coding: utf-8 -*-
"""Download data from Florida Automated Weather Network (FAWN)."""
import mando
import pandas as pd
try:
from mando.rst_text_formatter import RSTHelpFormatter as HelpFormatter
except ImportError:
from argparse import RawTextHelpFormatter as HelpFormatter
import xarray as xr
from tstoolbox import tsutils
_vars = {
"precip": "precipitation_amount",
"rhmin": "min_relative_humidity",
"rhmax": "max_relative_humidity",
"sph": "specific_humidity",
"srad": "surface_downwelling_shortwave_flux_in_air",
"tmin": "min_air_temperature",
"tmax": "max_air_temperature",
"winds": "wind_speed",
}
@mando.command("metdata", formatter_class=HelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def metdata_cli(
lat,
lon,
variables=None,
start_date=None,
end_date=None,
):
r"""Download daily data from METDATA.
This archive contains daily surface meteorological (METDATA) data for the
Continental United States at 4-km (1/24-deg) resolution. The meteorological
variables are maximum/minimum temperature, precipitation amount and duration,
maximum/minimum relative humidity,downward shortwave solar radiation, wind speed and
direction, and specific humidity. The method utilized here combines desirable
spatial attributes of gridded climate data from PRISM and desirable temporal
attributes of regional-scale reanalysis and daily gauge-based precipitation from
NLDAS-2 to derive a spatially and temporally complete high resolution gridded
dataset of surface meteorological variables for the continental US for 1979-present.
Validation of this data suggests that it can serve as a suitable surrogate for
landscape-scale ecological modeling across vast unmonitored areas of the US.
Whenever you publish research based on data from this archive, please reference this
data by using the phrase "daily gridded meteorological data (METDATA) for the
continental US" and by citing the article (Abatzoglou,2012). Further, appropriately
acknowledge the National Science Foundation (NSF), Idaho EPSCoR and the individual
investigators responsible for the data set.
Citation:
Abatzoglou, J.T., 2013, Development of gridded surface meteorological data for
ecological applications and modeling, International Journal of Climatology, DOI:
10.1002/joc.3413
geospatial_bounds_crs:
EPSG:4326
Conventions:
CF-1.0
geospatial_bounds:
POLYGON((-124.7666666333333 49.400000000000000, -124.7666666333333
25.066666666666666, -67.058333300000015 25.066666666666666, -67.058333300000015
49.400000000000000, -124.7666666333333 49.400000000000000))
geospatial_lat_min:
25.0631
geospatial_lat_max:
49.3960
geospatial_lon_min:
-124.7722
geospatial_lon_max:
-67.0648
geospatial_lon_resolution:
0.041666666666666
geospatial_lat_resolution:
0.041666666666666
geospatial_lat_units:
decimal_degrees north
geospatial_lon_units:
decimal_degrees east
coordinate_system:
EPSG:4326
author:
John Abatzoglou - University of Idaho, jabatzoglou @ uidaho.edu
date:
02 July 2019
note1:
The projection information for this file is: GCS WGS 1984.
note3:
Data in slices after last_permanent_slice (1-based) are considered provisional and subject to change with subsequent updates
note4:
Data in slices after last_provisional_slice (1-based) are considered early and subject to change with subsequent updates
note5:
Days correspond approximately to calendar days ending at midnight, Mountain Standard Time (7 UTC the next calendar day)
Metadata_Conventions:
Unidata Dataset Discovery v1.0
title:
Daily Meteorological data for continental US
keywords:
daily precipitation, daily precipitation duration, daily maximum temperature, daily minimum temperature, daily downward shortwave solar radiation, daily specific humidity, daily maximum relative humidity, daily minimum relative humidity, daily wind speed, daily wind direction, ClimatologyMeteorologyAtmosphere, Gridded Meteorological Data, EPSCoR Data
id:
UofIMETDATA
naming_authority:
cida.usgs.gov
cdm_data_type:
Grid
date_created:
2012-08-16
creator_name:
Dr. John Abatzoglou
creator_url:
http://nimbus.cos.uidaho.eud/METDATA/
creator_email:
jabatzoglou @ uidaho.edu
publisher_name:
Center for Integrated Data Analytics
publisher_url:
https://www.cida.usgs.gov/
publisher_email:
dblodgett @ usgs.gov
institution:
University of Idaho
date_issued:
2012-08-16
project:
METDATA
processing_level:
Gridded Meteorogolical Data
contributors:
Dr. John Abatzoglou
time_coverage_start:
1979-01-01T00:00
time_coverage_resolution:
P1D
license:
Freely available
Parameters
----------
{lat}
{lon}
variables : str
At the command line can supply a comma separated list of variable
names. Using the Python API needs to be a Python list of strings.
The current list of available METDATA variables are in the following table and
you can use either the "Short" or "Long" names.
+--------+-------------------------------------------+-------+
| Short | Long | Units |
+========+===========================================+=======+
| precip | precipitation_amount | mm |
+--------+-------------------------------------------+-------+
| rhmax | max_relative_humidity | |
+--------+-------------------------------------------+-------+
| rhmin | min_relative_humidity | |
+--------+-------------------------------------------+-------+
| sph | specific_humidity | kg/kg |
+--------+-------------------------------------------+-------+
| srad | surface_downwelling_shortwave_flux_in_air | W/m2 |
+--------+-------------------------------------------+-------+
| tmin | min_air_temperature | degK |
+--------+-------------------------------------------+-------+
| tmax | max_air_temperature | degK |
+--------+-------------------------------------------+-------+
| winds | wind_speed | m/s |
+--------+-------------------------------------------+-------+
{start_date}
{end_date}
"""
tsutils._printiso(
metdata(
lat,
lon,
variables=variables,
start_date=start_date,
end_date=end_date,
)
)
def opendap(variables, lat, lon, start_date=None, end_date=None):
url = "https://cida.usgs.gov/thredds/dodsC/UofIMETDATA"
if not variables:
variables = _vars.keys()
variables = tsutils.make_list(variables)
nvars = [_vars.get(i, i) for i in variables]
# Get and subset the data.
dataset = (
xr.open_dataset(url, engine="pydap", cache=True, mask_and_scale=True)
.sel(lat=lat, lon=lon, method="nearest")[nvars]
.drop_vars(["lat", "lon"])
.sel(day=slice(start_date, end_date))
)
# Rename the columns to include units of the form "name:unit".
rename = {}
for i in nvars:
if i in ["min_air_temperature", "max_air_temperature"]:
unit_label = "degK"
else:
unit_label = dataset[i].attrs["units"]
rename[i] = "{}:{}".format(i, unit_label)
ndf = dataset.to_dataframe().rename(rename, axis="columns")
ndf.index.name = "Datetime"
return ndf
def metdata(
lat,
lon,
variables=None,
start_date=None,
end_date=None,
):
r"""Download METDATA data from CIDA."""
if variables is None:
variables = _vars.keys()
df = opendap(variables, lat, lon, start_date=start_date, end_date=end_date)
if len(df.dropna(how="all")) == 0:
if start_date is None:
start_date = "beginning of record"
if end_date is None:
end_date = "end of record"
raise ValueError(
tsutils.error_wrapper(
"""
USGS-CIDA returned no METDATA data for lat/lon "{lat}/{lon}", variables "{variables}"
between {start_date} and {end_date}.
""".format(
**locals()
)
)
)
return df
metdata.__doc__ = metdata_cli.__doc__
if __name__ == "__main__":
r = metdata(29.6, -82.3, "precipitation_amount")
print(r)
|
import core
import lang
def loadAll():
return [core.loadAll(), lang.loadAll()]
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['feedback'] = """
type: command
short-summary: Send feedback to the Azure CLI Team.
long-summary: >-
This command is interactive. If possible, it launches the default
web browser to open GitHub issue creation page with the body auto-generated and pre-filled.
You will have a chance to edit the issue body before submitting it.
"""
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.schema import Activity
from botbuilder.schema.teams import (
NotificationInfo,
TeamsChannelData,
TeamInfo,
TeamsMeetingInfo,
)
def teams_get_channel_data(activity: Activity) -> TeamsChannelData:
if not activity:
return None
if activity.channel_data:
return TeamsChannelData().deserialize(activity.channel_data)
return None
def teams_get_channel_id(activity: Activity) -> str:
if not activity:
return None
if activity.channel_data:
channel_data = TeamsChannelData().deserialize(activity.channel_data)
return channel_data.channel.id if channel_data.channel else None
return None
def teams_get_team_info(activity: Activity) -> TeamInfo:
if not activity:
return None
if activity.channel_data:
channel_data = TeamsChannelData().deserialize(activity.channel_data)
return channel_data.team
return None
def teams_notify_user(
activity: Activity, alert_in_meeting: bool = None, external_resource_url: str = None
):
if not activity:
return
if not activity.channel_data:
activity.channel_data = {}
channel_data = TeamsChannelData().deserialize(activity.channel_data)
channel_data.notification = NotificationInfo(alert=True)
channel_data.notification.alert_in_meeting = alert_in_meeting
channel_data.notification.external_resource_url = external_resource_url
activity.channel_data = channel_data
def teams_get_meeting_info(activity: Activity) -> TeamsMeetingInfo:
if not activity:
return None
if activity.channel_data:
channel_data = TeamsChannelData().deserialize(activity.channel_data)
return channel_data.meeting
return None
|
# -*- coding: utf-8 -*-
# Created by apple on 2017/2/21.
from datetime import datetime
from ..log import log
class Date:
@staticmethod
def time2datetime(t) -> datetime:
"""
时间戳转datetime
:param t: 1970开始的秒数
:return:
"""
if t:
try:
return datetime.fromtimestamp(float(t))
except ValueError:
log.debug('t is not a number')
else:
log.debug('not format param t')
|
r'''
Author : PiKaChu_wcg
Date : 2021-10-05 14:27:47
LastEditors : PiKachu_wcg
LastEditTime : 2022-01-05 15:19:22
FilePath : \pikachu_wcgd:\blog\md.py
'''
import os
from datetime import datetime
import argparse
import time
datetime.now().strftime('%Y-%m-%d')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--name",type=str,required=True,help="文件名")
args=parser.parse_args()
return args
if __name__=="__main__":
args=get_args()
filename = datetime.now().strftime('%Y-%m-%d')+"-"+args.name+".md"
filename = os.path.join('D:\\blog\_posts', filename)
f = open(filename, "w",encoding='utf-8')
f.write(
f"""---
layout: post
title: "{args.name}"
subtitle:
date: {datetime.now().strftime('%Y-%m-%d')}
categories: [paper]
tags: []
pinned: false
toc: true
author: pikachu-wcg
---
"""
)
f.close()
time.sleep(3)
os.system('C:\\"Program Files"\Typora\\typora.exe {}'.format(
filename))
|
import numpy as np
from .activation import ActivationFunc
class ReLU(ActivationFunc):
"""Relu activation function
"""
def __init__(self):
super().__init__()
def forward(self, x):
"""forward pass
"""
out = np.maximum(0, x)
self.f_val = out
return out
@property
def grad(self):
"""compute grad
"""
assert self.f_val is not None
return (self.f_val>0).astype(float)
def backward(self, grad):
"""Compute gradient
Relu's gradient is 1 if the input is >0, else gradient is 0.
This means given the upstream gradient grad, we simply threshold it
by checking whether the corresponding forward pass was >0 or not
"""
g = grad*self.grad
self.f_val = None
return g
|
# Generated by Django 3.1.13 on 2022-03-24 09:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('predictions_manager', '0005_auto_20220318_1531'),
]
operations = [
migrations.RemoveField(
model_name='prediction',
name='provenance',
),
migrations.AddField(
model_name='provenance',
name='prediction',
field=models.OneToOneField(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='predictions_manager.prediction'),
),
]
|
#!/usr/bin/env python3
from lxml import etree
from urllib.request import urlopen
#Constants
#NS sets up various XML namespaces and loads them
#into a dictionary for reference later.
NS = dict(md="urn:oasis:names:tc:SAML:2.0:metadata",
ds='http://www.w3.org/2000/09/xmldsig#',
mdui="urn:oasis:names:tc:SAML:metadata:ui",
mdattr="urn:oasis:names:tc:SAML:metadata:attribute",
mdrpi="urn:oasis:names:tc:SAML:metadata:rpi",
shibmd="urn:mace:shibboleth:metadata:1.0",
xrd='http://docs.oasis-open.org/ns/xri/xrd-1.0',
pyff='http://pyff.io/NS',
xml='http://www.w3.org/XML/1998/namespace',
saml="urn:oasis:names:tc:SAML:2.0:assertion",
xs="http://www.w3.org/2001/XMLSchema",
xsi="http://www.w3.org/2001/XMLSchema-instance",
ser="http://eidas.europa.eu/metadata/servicelist",
eidas="http://eidas.europa.eu/saml-extensions",
remd="http://refeds.org/metadata",
icmd="http://id.incommon.org/metadata")
MDOBJ = 'https://mds.edugain.org/edugain-v1.xml'
#MDOBJ = 'http://md.incommon.org/InCommon/InCommon-metadata.xml'
#MDOBJ = "./edugain-v1.xml"
try:
root = etree.parse(urlopen(MDOBJ))
#root = etree.parse(MDOBJ)
print("Retrieved MD from ", MDOBJ)
except:
print("unable to retrieve MD from ", MDOBJ)
exit()
for i in root.findall(".//{%s}EntityDescriptor" % NS['md']):
entityID = i.get('entityID')
for r in i.findall(".//{%s}RegistrationInfo" % NS['mdrpi']):
RegBy = r.get('registrationAuthority')
OrgID = i.find(".//{%s}OrganizationName" % NS['md']).text
for z in i.findall(".//{%s}ContactPerson" % NS['md']):
technical = []
administrative = []
support = []
if z is not None:
contactType = z.get("contactType")
if contactType == "technical":
try:
address = z.find(".//{%s}EmailAddress" % NS['md']).text
address = address.replace('mailto:', '')
technical.append(address)
except:
pass
elif contactType == "administrative":
try:
address = z.find(".//{%s}EmailAddress" % NS['md']).text
address = address.replace('mailto:', '')
administrative.append(address)
except:
pass
elif contactType == "support":
try:
address = z.find(".//{%s}EmailAddress" % NS['md']).text
address = address.replace('mailto:', '')
support.append(address)
except:
pass
elif contactType == "other":
pass
if len(technical) > 0:
print(entityID, RegBy, OrgID.replace(',', ''), "technical", ','.join(technical), sep=',')
break
elif len(administrative) > 0:
print(entityID, RegBy, OrgID.replace(',', ''), "administrative", ','.join(administrative), sep=',')
break
elif len(support) > 0:
print(entityID, RegBy, OrgID.replace(',', ''), "support", ','.join(support), sep=',')
break
#print(entityID, len(technical), len(administrative), len(support))
|
import json
import logging
import math
import random
import time
from datetime import datetime
from pgoapi.utilities import f2i
from s2sphere import CellId, LatLng
from pokedata import Pokedata, parse_map
logger = logging.getLogger(__name__)
REQ_SLEEP = 1
MAX_NUM_RETRIES = 10
#Constants for Hex Grid
#Gap between vertical and horzonal "rows"
lat_gap_meters = 150
lng_gap_meters = 86.6
#111111m is approx 1 degree Lat, which is close enough for this
meters_per_degree = 111111
lat_gap_degrees = float(lat_gap_meters) / meters_per_degree
def calculate_lng_degrees(lat):
return float(lng_gap_meters) / (meters_per_degree * math.cos(math.radians(lat)))
class Pokesearch:
def __init__(self, api, auth_service, username, password, position):
self.api = api
self.auth_service = auth_service
self.username = username
self.password = password
self.position = position
self.visible_range_meters = 70
def login(self):
logger.info('login start with service: %s', self.auth_service)
self.api.set_position(*self.position)
while not self.api.login(self.auth_service, self.username, self.password):
logger.warn('failed to login to pokemon go, retrying...')
time.sleep(REQ_SLEEP)
self._update_download_settings()
logger.info('login successful')
def search(self, position, num_steps):
if self.api._auth_provider and self.api._auth_provider._ticket_expire:
if isinstance(self.api._auth_provider._ticket_expire, (int, long)):
remaining_time = self.api._auth_provider._ticket_expire / 1000.0 - time.time()
if remaining_time > 60:
logger.info("Skipping Pokemon Go login process since already logged in for another {:.2f} seconds".format(remaining_time))
else:
self.login()
else:
logger.warn("skipping login since _ticket_expire was a token.")
else:
self.login()
all_pokemon = {}
num_retries = 0
for step, coord in enumerate(generate_location_steps(position, num_steps, self.visible_range_meters), 1):
lat = coord[0]
lng = coord[1]
self.api.set_position(*coord)
cell_ids = get_cell_ids(lat, lng)
timestamps = [0,] * len(cell_ids)
response_dict = None
while not response_dict:
try:
self.api.get_map_objects(latitude = f2i(lat), longitude = f2i(lng), since_timestamp_ms = timestamps, cell_id = cell_ids)
response_dict = self.api.call()
except:
logging.warn('exception happened on get_map_objects api call', exc_info=True)
if not response_dict:
if num_retries < MAX_NUM_RETRIES:
num_retries += 1
logger.warn('get_map_objects failed, retrying in %s seconds, %s retries', REQ_SLEEP, num_retries)
time.sleep(REQ_SLEEP)
else:
logger.warn('MAX_NUM_RETRIES exceeded, retrying login...')
self.login()
raise StopIteration
# try:
pokemons = parse_map(response_dict)
# except KeyError as e:
# logger.error('failed to parse map with key error: %s', e)
for key in pokemons.keys():
if not key in all_pokemon:
pokemon = pokemons[key]
all_pokemon[key] = pokemon
yield pokemon
# else:
# logger.info("have duplicate poke: %s", key)
total_steps = (3 * (num_steps**2)) - (3 * num_steps) + 1
logger.info('Completed {:5.2f}% of scan.'.format(float(step) / total_steps * 100))
time.sleep(REQ_SLEEP)
def _update_download_settings(self):
visible_range_meters = 0
while visible_range_meters == 0:
try:
logger.info('fetching download settings...')
self.api.download_settings(hash="05daf51635c82611d1aac95c0b051d3ec088a930")
response_dict = self.api.call()
visible_range_meters = response_dict['responses']['DOWNLOAD_SETTINGS']['settings']['map_settings']['pokemon_visible_range']
self.visible_range_meters = float(visible_range_meters)
except:
logging.warn('exception happened on download_settings api call', exc_info=True)
logger.info('download settings[pokemon_visible_range]: %s', self.visible_range_meters)
def generate_location_steps(position, num_steps, visible_range_meters):
#Bearing (degrees)
NORTH = 0
EAST = 90
SOUTH = 180
WEST = 270
pulse_radius = visible_range_meters / 1000.0 # km - radius of players heartbeat is 100m
xdist = math.sqrt(3)*pulse_radius # dist between column centers
ydist = 3*(pulse_radius/2) # dist between row centers
yield (position[0], position[1], 0) #insert initial location
ring = 1
loc = position
while ring < num_steps:
#Set loc to start at top left
loc = get_new_coords(loc, ydist, NORTH)
loc = get_new_coords(loc, xdist/2, WEST)
for direction in range(6):
for i in range(ring):
if direction == 0: # RIGHT
loc = get_new_coords(loc, xdist, EAST)
if direction == 1: # DOWN + RIGHT
loc = get_new_coords(loc, ydist, SOUTH)
loc = get_new_coords(loc, xdist/2, EAST)
if direction == 2: # DOWN + LEFT
loc = get_new_coords(loc, ydist, SOUTH)
loc = get_new_coords(loc, xdist/2, WEST)
if direction == 3: # LEFT
loc = get_new_coords(loc, xdist, WEST)
if direction == 4: # UP + LEFT
loc = get_new_coords(loc, ydist, NORTH)
loc = get_new_coords(loc, xdist/2, WEST)
if direction == 5: # UP + RIGHT
loc = get_new_coords(loc, ydist, NORTH)
loc = get_new_coords(loc, xdist/2, EAST)
yield (loc[0], loc[1], 0)
ring += 1
def get_new_coords(init_loc, distance, bearing):
""" Given an initial lat/lng, a distance(in kms), and a bearing (degrees),
this will calculate the resulting lat/lng coordinates.
"""
R = 6378.1 #km radius of the earth
bearing = math.radians(bearing)
init_coords = [math.radians(init_loc[0]), math.radians(init_loc[1])] # convert lat/lng to radians
new_lat = math.asin( math.sin(init_coords[0])*math.cos(distance/R) +
math.cos(init_coords[0])*math.sin(distance/R)*math.cos(bearing))
new_lon = init_coords[1] + math.atan2(math.sin(bearing)*math.sin(distance/R)*math.cos(init_coords[0]),
math.cos(distance/R)-math.sin(init_coords[0])*math.sin(new_lat))
return [math.degrees(new_lat), math.degrees(new_lon)]
def get_cell_ids(lat, lng, radius = 10):
origin = CellId.from_lat_lng(LatLng.from_degrees(lat, lng)).parent(15)
walk = [origin.id()]
right = origin.next()
left = origin.prev()
# Search around provided radius
for i in range(radius):
walk.append(right.id())
walk.append(left.id())
right = right.next()
left = left.prev()
# Return everything
return sorted(walk)
|
import FWCore.ParameterSet.Config as cms
from RecoVertex.BeamSpotProducer.BeamSpotFakeConditionsEarly10TeVCollision_cfi import *
|
import math
from math import sqrt
from math import e as exp
import seaborn as sns
import statsmodels.api as sm
import random
from scipy import optimize
import pandas as pd
import numpy as np
from scipy.ndimage.filters import gaussian_filter, median_filter
class River:
def __init__(self):
self.error = 0
##### logit model for probability of amplification
probfunction = pd.DataFrame([[-2,.333],[-1, .875],[0,1],[1,1], [-10,0], [-3,0]], columns=['initial eDNA', 'probability of amplification'])
probfunction['copy number'] = probfunction['initial eDNA'].apply(lambda x: 10**x * 3.65*1e5)
model2 = sm.Logit(probfunction['probability of amplification'].values, probfunction['copy number'].values)
self.result2 = model2.fit( disp=0)
self.PofCaptureNet = 0.01
def change_er(self,er):
self.error = er
def init_river_params(self, V,D,u,λ, H):
self.V = V
self.u = u
self.λ = λ
self.D = D
self.H = H
if { 'V' , 'u' , 'λ' , 'D' , 'BV' , 'T' , 'pf' , 'B' } <= self.__dict__.keys():
self.inf, self.sup = self.Find_detection_range(0.001)
def init_sampling_strategy(self, pf, boat_V, time):
self.pf = pf
self.BV = boat_V
self.T = time
if { 'V' , 'u' , 'λ' , 'D' , 'BV' , 'T' , 'pf' , 'B' } <= self.__dict__.keys():
self.inf, self.sup = self.Find_detection_range(0.001) #0.005
def init_fish(self, dist_bet_fish, biomass):
self.B = biomass
self.dist = dist_bet_fish
if { 'V' , 'u' , 'λ' , 'D' , 'BV' , 'T' , 'pf' , 'B' } <= self.__dict__.keys():
self.inf, self.sup = self.Find_detection_range(0.001) #0.005
def CtoP(self,c):
return (self.result2.predict(c)-.5)/.5
#return (1/(1+np.exp(-(-.83+ .00781*c)))).reshape(-1,1)
def CeDNA_1_Source(self,x):
constants = (self.B*self.u)/(sqrt(self.V**2 + 4*self.D*self.λ)*self.H)
if x < 0:
result = constants * exp**( (self.V+ sqrt(self.V**2 + 4*self.D*self.λ))*x / (2*self.D) )
else:
result = constants * exp**( (self.V- sqrt(self.V**2 + 4*self.D*self.λ))*x / (2*self.D) )
if result < 1: return 0
return result
def fish_locations_transect(self):
x = 0
returnable = []
while x > self.inf :
a = -np.random.exponential(self.dist, 1)[0]
x += a
returnable += [x]
returnable = returnable[::-1]
x = 0
while x < self.BV*self.T + self.sup:
a = np.random.exponential(self.dist, 1)[0]
x += a
returnable += [x]
return returnable
def _fish_locations_net(self):
ret = []
x = 0
while x< self.BV*self.T:
x += np.random.exponential(self.dist, 1)[0]
ret += [x]
return ret[:-1]
def average_catch(self, n = 1000):
lis = np.array([sum([1 if random.random()< self.PofCaptureNet else 0 for x in self._fish_locations_net()]) for x in range(n)])
return {'mean': lis.mean(), 'std': lis.std(), 'Prob_of_detection': 1 - (np.count_nonzero(lis)/len(lis)), 'list': lis}
#@staticmethod
def _solved_river_abv(self,x):
return -(self.pf/(self.BV*self.H)) *(2*self.B*self.u*self.D) /( 4*self.D*self.λ - self.V*sqrt(self.V**2 + 4*self.D * self.λ)+ self.V**2)* exp**( (self.V - sqrt(self.V**2 + 4*self.D*self.λ))/ (2*self.D) * x )
def _solved_river_bl(self,x):
return (self.pf/(self.BV*self.H)) *(2*self.B*self.u*self.D) /(4*self.D*self.λ + self.V*sqrt(self.V**2 + 4*self.D * self.λ)+ self.V**2) * exp**( (self.V + sqrt(self.V**2 + 4*self.D*self.λ))/ (2*self.D) * x )
def _sld_intermediary(self,Xi, Xf):
low, high = sorted([Xi, Xf])
if low >= 0:
return abs(self._solved_river_abv(Xf) - self._solved_river_abv(Xi))
if high <= 0:
return abs(self._solved_river_bl(Xf) - self._solved_river_bl(Xi))
return self._sld_intermediary(low, 0) + self._sld_intermediary(0, high)
def sample_eDNA_transect(self,x0):
ret = self._sld_intermediary(x0, x0 + self.BV*self.T) + random.gauss(0, self.error)
if ret< 0: return 0
else: return ret
def sample_eDNA_transect_n_sources(self):
return np.array([self.sample_eDNA_transect( -dis )*(1+random.gauss(0, self.error)) for dis in self.fish_locations_transect()]).sum()
def Sample_Multiple_Transects_With_Different_Fish_Distances(self, dist_range = [0,100], n = 1000):
store_dist = self.dist
response = []
if len(dist_range) == 2:
distlist = [random.uniform(dist_range[0], dist_range[1]) for i in range(n)]
else: distlist = dist_range
for i in distlist:
self.dist = i
response += [self.sample_eDNA_transect_n_sources()]
self.dist = store_dist
response = self.CtoP(response)
return {'distances': distlist, 'response': response, 'avg': np.array(response).mean(), 'std': np.array(response).std()}
def Catch_Transects_With_Different_Fish_Distances(self, dist_range = [0,100], n = 1000):
store_dist = self.dist
response = []
distlist = [random.uniform(dist_range[0], dist_range[1]) for i in range(n)]
if len(dist_range) == 2:
distlist = [random.uniform(dist_range[0], dist_range[1]) for i in range(n)]
else: distlist = dist_range
for i in distlist:
self.dist = i
response += [self.average_catch(n=1)['mean']]
self.dist = store_dist
response = np.array(response)
det_dist = np.array([1 if x> 0 else 0 for x in response])
return {'distances': distlist, 'catch': response, 'detection': det_dist,'avg': response.mean(), 'std': response.std(), 'avg_detection': det_dist.mean(), 'std_detection': det_dist.std()}
def Find_detection_range(self, p):
max_up = optimize.bisect(lambda d: self.CtoP(self._sld_intermediary(-d, -d+self.BV*self.T))[0] - p, 0, 1e10)
max_down = optimize.bisect(lambda d: self.CtoP(self._sld_intermediary(-d, -d+self.BV*self.T))[0] - p, -1e10, 0)
return sorted([max_down, max_up])
def print_params(self):
print(' '.join([i+'='+str(self.__dict__[i]) for i in list(self.__dict__.keys())[3:-2]]))
|
'''
Author: jianzhnie
Date: 2021-11-10 18:22:22
LastEditTime: 2022-02-25 18:58:03
LastEditors: jianzhnie
Description:
'''
import torch
import torch.nn as nn
from torch import Tensor
from torchvision import models
class ImageEncoder(nn.Module):
def __init__(self, is_require_grad=True):
super(ImageEncoder, self).__init__()
self.is_require_grad = is_require_grad
# Resnet Encoder
self.resnet_encoder = self.build_encoder()
# Flatten the feature map grid [B, D, H, W] --> [B, D, H*W]
self.flatten = nn.Flatten(start_dim=1, end_dim=3)
self.output_dim = 2048
def forward(self, x: Tensor) -> Tensor: # type: ignore
r"""Forward pass connecting the `'backbone'` with the `'head layers'`"""
x_feat = self.resnet_encoder(x)
x_feat = self.flatten(x_feat)
return x_feat
def build_encoder(self):
"""Builds the backbone layers."""
resnet = models.resnet50(pretrained=True)
modules = list(resnet.children())[:-1]
resnet_encoder = nn.Sequential(*modules)
for param in resnet_encoder.parameters():
param.requires_grad = self.is_require_grad
return resnet_encoder
if __name__ == '__main__':
x = torch.ones(1, 3, 224, 224)
model = ImageEncoder(is_require_grad=True)
output = model(x)
print(output.shape)
|
import pandas as pd
messages=pd.read_csv('smsspamcollection/SMSSpamCollection', sep='\t',names=['label','message'])
#clean data and preprocessing
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_recall_fscore_support as score
ps = PorterStemmer()
lm = WordNetLemmatizer()
corpus = []
for i in range(0, len(messages)):
review = re.sub('[^a-zA-Z]', ' ', messages['message'][i])
review = review.lower()
review = review.split()
#Stemming
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
#lemmatization
#review = [lm.lemmatize(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review)
corpus.append(review)
# creating the bag of words model
#from sklearn.feature_extraction.text import CountVectorizer
#cv = CountVectorizer(max_features=5000)
# Creating the TF IDF
from sklearn.feature_extraction.text import TfidfVectorizer
cv = TfidfVectorizer()
x = cv.fit_transform(corpus).toarray()
y=pd.get_dummies(messages['label'])
y=y.iloc[:,1].values
#test train split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.20, random_state = 0)
# Training model using Navie bayes classifier
#from sklearn.naive_bayes import MultinomialNB
#spam_detect_model = MultinomialNB().fit(x_train, y_train)
#Random forest
rf = RandomForestClassifier(n_estimators=100,max_depth=None,n_jobs=1)
spam_detect_model = rf.fit(x_train,y_train)
# Now, let's see the predictions. I would be using predict function and calculating Precision, Recall , f- score, and Accuracy measure also.
y_pred = spam_detect_model.predict(x_test)
#confusion matrix
from sklearn.metrics import confusion_matrix
confusion_m = confusion_matrix(y_test, y_pred)
#Accuracy and ROC curve
from sklearn.metrics import accuracy_score, plot_roc_curve
#print('Accuracy :', accuracy_score(y_test, y_pred) * 100)
plot_roc_curve(spam_detect_model, x_test, y_test)
precision,recall,fscore,support =score(y_test,y_pred,pos_label=1, average ='binary')
print('Precision : {} / Recall : {} / fscore : {} / Accuracy: {}'.format(round(precision,3),round(recall,3),round(fscore,3),round((y_pred==y_test).sum()/len(y_test) * 100,8)))
|
from cv2 import cv2 as cv
import time
from datetime import datetime
import os
from absl import app, flags, logging
from absl.flags import FLAGS
flags.DEFINE_string(
'url', 'rtsp://192.168.100.10/h264/ch1/main/av_stream', 'url for rtsp source')
flags.DEFINE_boolean('dnot_show', False, 'show rtsp stream in window')
flags.DEFINE_boolean('dnot_save', False, 'save image to dir')
def main(_argv):
rtsp_source = FLAGS.url
cap = cv.VideoCapture(rtsp_source)
start = time.time()
out_path = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if (not FLAGS.dnot_save) and (not os.path.exists(out_path)):
print('will save in {0}'.format(out_path))
os.makedirs(out_path)
index = 0
while(cap.isOpened()):
_, frame = cap.read()
end = time.time()
seconds = end - start
if not FLAGS.dnot_save:
cv.imwrite('./{0}/frame{1}.jpg'.format(out_path,
str(index).zfill(5)), frame)
if not FLAGS.dnot_show:
frameOfWindows = cv.resize(
frame, (416, 416), interpolation=cv.INTER_CUBIC)
frameOfWindows = cv.putText(frameOfWindows, 'FPS: {:.0f} '.format(1.0 / seconds), (0, 30),
cv.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
cv.imshow('rtsp live stream', frameOfWindows)
#print( 1.0 / seconds)
if int(seconds) == 100:
print('end')
break
start = end
index = index + 1
if cv.waitKey(20) & 0xFF == ord('q'):
break
cap.release()
cv.destroyAllWindows()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
import pyautogui
class GUI_INTERFACE():
def __init__(self):
pass
def keywrite(self, *args, **kwargs):
pyautogui.typewrite(list(args), interval=kwargs.get("delay", 0.1))
def write(self, msg):
pyautogui.typewrite(msg)
|
# test the LDS problem class
import jax.numpy as np
import jax.random as random
import tigerforecast
import matplotlib.pyplot as plt
from tigerforecast.utils.random import generate_key
def test_lds_time_series(steps=1000, show_plot=False, verbose=False):
T = steps
n, m, d = 5, 3, 10
problem = tigerforecast.problem("LDS-TimeSeries-v0")
problem.initialize(n, m, d)
x_output = []
y_output = []
for t in range(T):
x, y = problem.step()
x_output.append(x)
y_output.append(y)
info = problem.hidden()
if verbose:
print(info)
if show_plot:
plt.plot(x_output)
plt.plot(y_output)
plt.title("lds")
plt.show(block=False)
plt.pause(1)
plt.close()
print("test_lds_time_series passed")
return
if __name__=="__main__":
test_lds_time_series(show_plot=True)
|
from setuptools import setup, find_packages
import django_monitor
setup(
name = 'django-monitor',
version = django_monitor.__version__,
description = "Django app to moderate model objects",
long_description = open("README.rst").read(),
install_requires = [
"django >= 1.9",
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
keywords = 'django moderation models',
author = "Rajeesh Nair",
author_email = 'rajeeshrnair@gmail.com',
url = 'http://bitbucket.org/rajeesh/django-monitor',
license = 'BSD',
packages = find_packages(),
include_package_data = True,
zip_safe = True,
)
|
import numpy as np
rbf_human_params = {
't1_blood': 1.55,
'max_perfusion_value': 600,
}
rbf_rat_params = {
't1_blood': 1.14,
'max_perfusion_value': 1000
}
rbf_params = {
'human': rbf_human_params,
'rat': rbf_rat_params
}
def pwi(data):
"""Create a profusion weighted image from a dicom series of ASL images
Args:
data -- Data containing a ASL control/label images on last axis
Returns a single profusion weighted image.
The profusion weighted image is calculated by subtracting
"""
# Throw away the first pair because this may not be steady state
controls = data[..., 2::2].astype(float)
labels = data[..., 3::2].astype(float)
return (controls - labels).mean(axis=-1)
def rbf(pwi, m0, inv_delay, rbf_params):
"""Compute a Renal Blood Flow map
Args:
pwi -- perfusion weighted image
m0 -- M0 image or constant value
inv_delay -- inversion time (seconds) (TI)
rbf_params -- dict of params dependent on the subject
Returns RBF map
"""
blood_water_coeff = 80. # (ml / 100g)
inv_efficiency = 0.95
rbf = blood_water_coeff / (2 * (inv_delay / 60.) * inv_efficiency) * (pwi / m0) * np.exp(inv_delay / rbf_params['t1_blood'])
mask = (rbf > rbf_params['max_perfusion_value']) | (rbf < 0)
return np.ma.array(rbf, mask=mask)
|
''' Made by Guilherme Moreira, used on the twitter account @RedditHotNews '''
import praw, twitter
from keys import *
from datetime import datetime
api = twitter.Api(consumer_key = consumer_key,
consumer_secret = consumer_secret,
access_token_key = access_token_key,
access_token_secret = access_token_secret,
sleep_on_rate_limit = True) #twitter api wrapper
def getSubmission():
reddit = praw.Reddit(client_id = client_id,
client_secret = client_secret,
user_agent = 'r/news forwarder') #reddit api wrapper
subreddit = reddit.subreddit('news') #subreddit selector, the one which the posts are going to be selected from
for submission in subreddit.hot(limit=20): #goes through the 10 hottest posts in r/news
if submission.score >= 1000 and submission.url != None: #checks the amount of upvotes and if the post has a link
try:
short = 'redd.it/{}'.format(submission.id) #makes a redd.it shortlink using the submission id
api.PostUpdate("{title}, {shortLink} {url}".format(title = submission.title, shortLink = short, url = submission.url)) #tweets
print(submission.id)
except: #an error will arise if the reddit post has already been posted
print('duplicate')
print('start at {}'.format(datetime.now()))
getSubmission()
print('end at {}'.format(datetime.now()))
|
"""
mslib.msui.multilayers
~~~~~~~~~~~~~~~~~~~
This module contains classes for object oriented managing of WMS layers.
Improves upon the old method of loading each layer on UI changes,
the layers are all persistent and fully functional without requiring user input.
This file is part of mss.
:copyright: Copyright 2021 May Bär
:copyright: Copyright 2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from PyQt5 import QtWidgets, QtCore, QtGui
import logging
import mslib.msui.wms_control
from mslib.msui.icons import icons
from mslib.msui.mss_qt import ui_wms_multilayers as ui
from mslib.utils.config import save_settings_qsettings, load_settings_qsettings
class Multilayers(QtWidgets.QDialog, ui.Ui_MultilayersDialog):
"""
Contains all layers of all loaded WMS and provides helpful methods to manage them inside a popup dialog
"""
needs_repopulate = QtCore.pyqtSignal()
def __init__(self, dock_widget):
super().__init__(parent=dock_widget)
self.setupUi(self)
self.setWindowFlags(QtCore.Qt.Window)
if isinstance(dock_widget, mslib.msui.wms_control.HSecWMSControlWidget):
self.setWindowTitle(self.windowTitle() + " (Top View)")
elif isinstance(dock_widget, mslib.msui.wms_control.VSecWMSControlWidget):
self.setWindowTitle(self.windowTitle() + " (Side View)")
elif isinstance(dock_widget, mslib.msui.wms_control.LSecWMSControlWidget):
self.setWindowTitle(self.windowTitle() + " (Linear View)")
self.dock_widget = dock_widget
self.layers = {}
self.layers_priority = []
self.current_layer: Layer = None
self.threads = 0
self.height = None
self.scale = self.logicalDpiX() / 96
self.filter_favourite = False
self.carry_parameters = {"level": None, "itime": None, "vtime": None}
self.is_linear = isinstance(dock_widget, mslib.msui.wms_control.LSecWMSControlWidget)
self.settings = load_settings_qsettings("multilayers",
{"favourites": [], "saved_styles": {}, "saved_colors": {}})
self.synced_reference = Layer(None, None, None, is_empty=True)
self.skip_clicked_event = False
self.listLayers.itemChanged.connect(self.multilayer_changed)
self.listLayers.itemClicked.connect(self.check_icon_clicked)
self.listLayers.itemClicked.connect(self.multilayer_clicked)
self.listLayers.itemDoubleClicked.connect(self.multilayer_doubleclicked)
self.listLayers.setVisible(True)
self.leMultiFilter.setVisible(True)
self.lFilter.setVisible(True)
self.filterFavouriteAction = self.leMultiFilter.addAction(QtGui.QIcon(icons("64x64", "star_unfilled.png")),
QtWidgets.QLineEdit.TrailingPosition)
self.filterRemoveAction = self.leMultiFilter.addAction(QtGui.QIcon(icons("64x64", "remove.png")),
QtWidgets.QLineEdit.TrailingPosition)
self.filterRemoveAction.setVisible(False)
self.filterRemoveAction.setToolTip("Click to remove the filter")
self.filterFavouriteAction.setToolTip("Show only favourite layers")
self.filterRemoveAction.triggered.connect(lambda x: self.remove_filter_triggered())
self.filterFavouriteAction.triggered.connect(lambda x: self.filter_favourite_toggled())
self.cbMultilayering.stateChanged.connect(self.toggle_multilayering)
self.leMultiFilter.textChanged.connect(self.filter_multilayers)
self.listLayers.setColumnWidth(2, 50)
self.listLayers.setColumnWidth(3, 50)
self.listLayers.setColumnWidth(1, 200)
self.listLayers.setColumnHidden(2, True)
self.listLayers.setColumnHidden(3, not self.is_linear)
self.listLayers.header().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
self.delete_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("Delete"), self)
self.delete_shortcut.activated.connect(self.delete_server)
self.delete_shortcut.setWhatsThis("Delete selected server")
def delete_server(self, server=None):
if not server:
server = self.listLayers.currentItem()
if server and not isinstance(server, Layer):
current = self.get_current_layer()
if current in self.layers[server.text(0)].values():
self.current_layer = None
for child_index in range(server.childCount()):
widget = server.child(child_index)
if widget in self.layers_priority:
self.layers_priority.remove(widget)
index = self.listLayers.indexOfTopLevelItem(server)
self.layers.pop(server.text(0))
self.listLayers.takeTopLevelItem(index)
self.update_priority_selection()
self.needs_repopulate.emit()
def remove_filter_triggered(self):
self.leMultiFilter.setText("")
if self.filter_favourite:
self.filter_favourite_toggled()
def filter_favourite_toggled(self):
self.filter_favourite = not self.filter_favourite
if self.filter_favourite:
self.filterFavouriteAction.setIcon(QtGui.QIcon(icons("64x64", "star_filled.png")))
self.filterFavouriteAction.setToolTip("Disable showing only favourite layers")
else:
self.filterFavouriteAction.setIcon(QtGui.QIcon(icons("64x64", "star_unfilled.png")))
self.filterFavouriteAction.setToolTip("Show only favourite layers")
self.filter_multilayers()
def check_icon_clicked(self, item):
"""
Checks if the mouse is pointing at an icon and handles the event accordingly
"""
icon_width = self.height - 2
# Clicked on layer, check favourite
if isinstance(item, Layer):
starts_at = 40 * self.scale
icon_start = starts_at + 3
if self.cbMultilayering.isChecked():
checkbox_width = round(self.height * 0.75)
icon_start += checkbox_width + 6
position = self.listLayers.viewport().mapFromGlobal(QtGui.QCursor().pos())
if icon_start <= position.x() <= icon_start + icon_width:
self.skip_clicked_event = True
self.threads += 1
item.favourite_triggered()
if self.filter_favourite:
self.filter_multilayers()
self.threads -= 1
# Clicked on server, check garbage bin
elif isinstance(item, QtWidgets.QTreeWidgetItem):
starts_at = 20 * self.scale
icon_start = starts_at + 3
position = self.listLayers.viewport().mapFromGlobal(QtGui.QCursor().pos())
if icon_start <= position.x() <= icon_start + icon_width:
self.threads += 1
self.delete_server(item)
self.threads -= 1
def get_current_layer(self):
"""
Return the current layer in the perspective of Multilayering or Singlelayering
For Multilayering, it is the first priority syncable layer, or first priority layer if none are syncable
For Singlelayering, it is the current selected layer
"""
if self.cbMultilayering.isChecked():
active_layers = self.get_active_layers()
synced_layers = [layer for layer in active_layers if layer.is_synced]
return synced_layers[0] if synced_layers else active_layers[0] if active_layers else None
else:
return self.current_layer
def reload_sync(self):
"""
Updates the self.synced_reference layer to contain the common options of all synced layers
"""
levels, itimes, vtimes, crs = self.get_multilayer_common_options()
self.synced_reference.levels = levels
self.synced_reference.itimes = itimes
self.synced_reference.vtimes = vtimes
self.synced_reference.allowed_crs = crs
if self.current_layer:
if not self.synced_reference.level:
self.synced_reference.level = self.current_layer.level
if not self.synced_reference.itime:
self.synced_reference.itime = self.current_layer.itime
if not self.synced_reference.vtime:
self.synced_reference.vtime = self.current_layer.vtime
if self.synced_reference.level not in self.synced_reference.levels:
self.synced_reference.level = levels[0] if levels else None
if self.synced_reference.itime not in self.synced_reference.itimes:
self.synced_reference.itime = itimes[-1] if itimes else None
if self.synced_reference.vtime not in self.synced_reference.vtimes or \
self.synced_reference.vtime < self.synced_reference.itime:
self.synced_reference.vtime = next((vtime for vtime in vtimes if
vtime >= self.synced_reference.itime), None) if vtimes else None
def filter_multilayers(self, filter_string=None):
"""
Hides all multilayers that don't contain the filter_string
Shows all multilayers that do
"""
if filter_string is None:
filter_string = self.leMultiFilter.text()
for wms_name in self.layers:
header = self.layers[wms_name]["header"]
wms_hits = 0
for child_index in range(header.childCount()):
widget = header.child(child_index)
if filter_string.lower() in widget.text(0).lower() and (
not self.filter_favourite or widget.is_favourite):
widget.setHidden(False)
wms_hits += 1
else:
widget.setHidden(True)
if wms_hits == 0 and (len(filter_string) > 0 or self.filter_favourite):
header.setHidden(True)
else:
header.setHidden(False)
self.filterRemoveAction.setVisible(self.filter_favourite or len(filter_string) > 0)
def get_multilayer_common_options(self, additional_layer=None):
"""
Return the common option for levels, init_times, valid_times and CRS
for all synchronised layers and the additional provided one
"""
layers = self.get_active_layers(only_synced=True)
if additional_layer:
layers.append(additional_layer)
elevation_values = []
init_time_values = []
valid_time_values = []
crs_values = []
for layer in layers:
if len(layer.levels) > 0:
elevation_values.append(layer.levels)
init_time_values.append(layer.itimes)
valid_time_values.append(layer.vtimes)
crs_values.append(layer.allowed_crs)
for values in elevation_values:
elevation_values[0] = list(set(elevation_values[0]).intersection(values))
for values in init_time_values:
init_time_values[0] = list(set(init_time_values[0]).intersection(values))
for values in valid_time_values:
valid_time_values[0] = list(set(valid_time_values[0]).intersection(values))
for values in crs_values:
crs_values[0] = list(set(crs_values[0]).intersection(values))
return sorted(elevation_values[0], key=lambda x: float(x.split()[0])) if len(elevation_values) > 0 else [], \
sorted(init_time_values[0]) if len(init_time_values) > 0 else [], \
sorted(valid_time_values[0]) if len(valid_time_values) > 0 else [], \
sorted(crs_values[0]) if len(crs_values) > 0 else []
def get_multilayer_priority(self, layer_widget):
"""
Returns the priority of a layer, with a default of 999 if it wasn't explicitly set
"""
priority = self.listLayers.itemWidget(layer_widget, 2)
return int(priority.currentText()) if priority else 999
def get_active_layers(self, only_synced=False):
"""
Returns a list of every layer that has been checked
"""
active_layers = []
for wms_name in self.layers:
header = self.layers[wms_name]["header"]
for child_index in range(header.childCount()):
widget = header.child(child_index)
if widget.checkState(0) > 0 if not only_synced else widget.is_synced:
active_layers.append(widget)
return sorted(active_layers, key=lambda layer: self.get_multilayer_priority(layer))
def update_priority_selection(self):
"""
Updates the priority numbers for the selected layers to the sorted self.layers_priority list
"""
active_layers = self.get_active_layers()
possible_values = [str(x) for x in range(1, len(active_layers) + 1)]
for layer in active_layers:
priority = self.listLayers.itemWidget(layer, 2)
if priority is not None:
# Update available numbers
priority.currentIndexChanged.disconnect(self.priority_changed)
priority.clear()
priority.addItems(possible_values)
# Update selected number
priority.setCurrentIndex(self.layers_priority.index(layer))
priority.currentIndexChanged.connect(self.priority_changed)
def add_wms(self, wms):
"""
Adds a wms to the multilayer list
"""
if wms.url not in self.layers:
header = QtWidgets.QTreeWidgetItem(self.listLayers)
header.setText(0, wms.url)
header.wms_name = wms.url
self.layers[wms.url] = {}
self.layers[wms.url]["header"] = header
self.layers[wms.url]["wms"] = wms
header.setExpanded(True)
if not self.height:
self.height = self.listLayers.visualItemRect(header).height()
icon = QtGui.QIcon(icons("64x64", "bin.png"))
header.setIcon(0, icon)
def add_multilayer(self, name, wms, auto_select=False):
"""
Adds a layer to the multilayer list, with the wms url as a parent
"""
if name not in self.layers[wms.url]:
layerobj = self.dock_widget.get_layer_object(wms, name.split(" | ")[-1])
widget = Layer(self.layers[wms.url]["header"], self, layerobj, name=name)
widget.wms_name = wms.url
if layerobj.abstract:
widget.setToolTip(0, layerobj.abstract)
if self.cbMultilayering.isChecked():
widget.setCheckState(0, QtCore.Qt.Unchecked)
if self.is_linear:
color = QtWidgets.QPushButton()
color.setFixedHeight(15)
color.setStyleSheet(f"background-color: {widget.color}")
self.listLayers.setItemWidget(widget, 3, color)
def color_changed(layer):
self.multilayer_clicked(layer)
new_color = QtWidgets.QColorDialog.getColor().name()
color.setStyleSheet(f"background-color: {new_color}")
layer.color_changed(new_color)
self.multilayer_clicked(layer)
self.dock_widget.auto_update()
color.clicked.connect(lambda: color_changed(widget))
if widget.style:
style = QtWidgets.QComboBox()
style.setFixedHeight(self.height)
style.setFixedWidth(200)
style.addItems(widget.styles)
style.setCurrentIndex(style.findText(widget.style))
def style_changed(layer):
layer.style = self.listLayers.itemWidget(layer, 1).currentText()
layer.style_changed()
self.multilayer_clicked(layer)
self.dock_widget.auto_update()
style.currentIndexChanged.connect(lambda: style_changed(widget))
self.listLayers.setItemWidget(widget, 1, style)
size = QtCore.QSize()
size.setHeight(self.height)
widget.setSizeHint(0, size)
self.layers[wms.url][name] = widget
if widget.is_invalid:
widget.setDisabled(True)
return
if not self.current_layer or auto_select:
self.current_layer = widget
self.listLayers.setCurrentItem(widget)
def multilayer_clicked(self, item):
"""
Gets called whenever the user clicks on a layer in the multilayer list
Makes sure the dock widget updates its data depending on the users selection
"""
if self.skip_clicked_event:
self.skip_clicked_event = False
return
if not isinstance(item, Layer):
index = self.cbWMS_URL.findText(item.text(0))
if index != -1 and index != self.cbWMS_URL.currentIndex():
self.cbWMS_URL.setCurrentIndex(index)
return
if item.is_invalid:
return
self.threads += 1
if self.carry_parameters["level"] in item.get_levels():
item.set_level(self.carry_parameters["level"])
if self.carry_parameters["itime"] in item.get_itimes():
item.set_itime(self.carry_parameters["itime"])
if self.carry_parameters["vtime"] in item.get_vtimes():
item.set_vtime(self.carry_parameters["vtime"])
if self.current_layer != item:
self.current_layer = item
self.listLayers.setCurrentItem(item)
index = self.cbWMS_URL.findText(item.get_wms().url)
if index != -1 and index != self.cbWMS_URL.currentIndex():
self.cbWMS_URL.setCurrentIndex(index)
self.needs_repopulate.emit()
if not self.cbMultilayering.isChecked():
QtCore.QTimer.singleShot(QtWidgets.QApplication.doubleClickInterval(), self.dock_widget.auto_update)
self.threads -= 1
def multilayer_doubleclicked(self, item, column):
if isinstance(item, Layer):
self.hide()
def multilayer_changed(self, item):
"""
Gets called whenever the checkmark for a layer is activate or deactivated
Creates a priority combobox or removes it depending on the situation
"""
if self.threads > 0:
return
if item.checkState(0) > 0 and not self.listLayers.itemWidget(item, 2):
priority = QtWidgets.QComboBox()
priority.setFixedHeight(self.height)
priority.currentIndexChanged.connect(self.priority_changed)
self.listLayers.setItemWidget(item, 2, priority)
self.layers_priority.append(item)
self.update_priority_selection()
if (item.itimes or item.vtimes or item.levels) and self.is_sync_possible(item):
item.is_synced = True
self.reload_sync()
elif not (item.itimes or item.vtimes or item.levels):
item.is_active_unsynced = True
self.update_checkboxes()
self.needs_repopulate.emit()
self.dock_widget.auto_update()
elif item.checkState(0) == 0 and self.listLayers.itemWidget(item, 2):
if item in self.layers_priority:
self.listLayers.removeItemWidget(item, 2)
self.layers_priority.remove(item)
self.update_priority_selection()
item.is_synced = False
item.is_active_unsynced = False
self.reload_sync()
self.update_checkboxes()
self.needs_repopulate.emit()
self.dock_widget.auto_update()
def priority_changed(self, new_index):
"""
Get called whenever the user changes a priority for a layer
Finds out the previous index and switches the layer position in self.layers_priority
"""
active_layers = self.get_active_layers()
old_index = [i for i in range(1, len(active_layers) + 1)]
for layer in active_layers:
value = self.get_multilayer_priority(layer)
if value in old_index:
old_index.remove(value)
old_index = old_index[0] - 1
to_move = self.layers_priority.pop(old_index)
self.layers_priority.insert(new_index, to_move)
self.update_priority_selection()
self.multilayer_clicked(self.layers_priority[new_index])
self.needs_repopulate.emit()
self.dock_widget.auto_update()
def update_checkboxes(self):
"""
Activates or deactivates the checkboxes for every layer depending on whether they
can be synchronised or not
"""
self.threads += 1
for wms_name in self.layers:
header = self.layers[wms_name]["header"]
for child_index in range(header.childCount()):
layer = header.child(child_index)
is_active = self.is_sync_possible(layer) or not (layer.itimes or layer.vtimes or layer.levels)
layer.setDisabled(not is_active or layer.is_invalid)
self.threads -= 1
def is_sync_possible(self, layer):
"""
Returns whether the passed layer can be synchronised with all other synchronised layers
"""
if len(self.get_active_layers()) == 0:
return True
levels, itimes, vtimes, crs = self.get_multilayer_common_options(layer)
levels_before, itimes_before, vtimes_before, crs_before = self.get_multilayer_common_options()
return (len(levels) > 0 or (len(levels_before) == 0 and len(layer.levels) == 0)) and \
(len(itimes) > 0 or (len(itimes_before) == 0 and len(layer.itimes) == 0)) and \
(len(vtimes) > 0 or (len(vtimes_before) == 0 and len(layer.vtimes) == 0))
def toggle_multilayering(self):
"""
Toggle between checkable layers (multilayering) and single layer mode
"""
self.threads += 1
for wms_name in self.layers:
header = self.layers[wms_name]["header"]
for child_index in range(header.childCount()):
layer = header.child(child_index)
if self.cbMultilayering.isChecked():
layer.setCheckState(0, 2 if layer.is_synced or layer.is_active_unsynced else 0)
else:
layer.setData(0, QtCore.Qt.CheckStateRole, QtCore.QVariant())
layer.setDisabled(layer.is_invalid)
if self.cbMultilayering.isChecked():
self.update_checkboxes()
self.listLayers.setColumnHidden(2, False)
else:
self.listLayers.setColumnHidden(2, True)
self.needs_repopulate.emit()
self.threads -= 1
class Layer(QtWidgets.QTreeWidgetItem):
def __init__(self, header, parent, layerobj, name=None, is_empty=False):
super().__init__(header)
self.parent = parent
self.header = header
self.layerobj = layerobj
self.dimensions = {}
self.extents = {}
self.setText(0, name if name else "")
self.levels = []
self.level = None
self.itimes = []
self.itime = None
self.itime_name = None
self.allowed_init_times = []
self.vtimes = []
self.vtime = None
self.vtime_name = None
self.allowed_valid_times = []
self.styles = []
self.style = None
self.is_synced = False
self.is_active_unsynced = False
self.is_favourite = False
self.is_invalid = False
if not is_empty:
self._parse_layerobj()
self._parse_levels()
self._parse_itimes()
self._parse_vtimes()
self._parse_styles()
self.is_favourite = str(self) in self.parent.settings["favourites"]
self.show_favourite()
if str(self) in self.parent.settings["saved_colors"]:
self.color = self.parent.settings["saved_colors"][str(self)]
else:
self.color = "#00aaff"
def _parse_layerobj(self):
"""
Parses the dimensions and extents out of the self.layerobj
"""
self.allowed_crs = []
lobj = self.layerobj
while lobj is not None:
self.dimensions.update(lobj.dimensions)
for key in lobj.extents:
if key not in self.extents:
self.extents[key] = lobj.extents[key]
if len(self.allowed_crs) == 0:
self.allowed_crs = getattr(lobj, "crsOptions", None)
lobj = lobj.parent
def _parse_levels(self):
"""
Extracts and saves the possible levels for the layer
"""
if "elevation" in self.extents:
units = self.dimensions["elevation"]["units"]
values = self.extents["elevation"]["values"]
self.levels = [f"{e.strip()} ({units})" for e in values]
self.level = self.levels[0]
def _parse_itimes(self):
"""
Extracts and saves all init_time values for the layer
"""
init_time_names = [x for x in ["init_time", "reference_time", "run"] if x in self.extents]
# Both time dimension and time extent tags were found. Try to determine the
# format of the date/time strings.
if len(init_time_names) > 0:
self.itime_name = init_time_names[0]
values = self.extents[self.itime_name]["values"]
self.allowed_init_times = sorted(self.parent.dock_widget.parse_time_extent(values))
self.itimes = [_time.isoformat() + "Z" for _time in self.allowed_init_times]
if len(self.allowed_init_times) == 0:
logging.error(f"Cannot determine init time format of {self.header.text(0)} for {self.text(0)}")
self.is_invalid = True
else:
self.itime = self.itimes[-1]
def _parse_vtimes(self):
"""
Extracts and saves all valid_time values for the layer
"""
valid_time_names = [x for x in ["time", "forecast"] if x in self.extents]
# Both time dimension and time extent tags were found. Try to determine the
# format of the date/time strings.
if len(valid_time_names) > 0:
self.vtime_name = valid_time_names[0]
values = self.extents[self.vtime_name]["values"]
self.allowed_valid_times = sorted(self.parent.dock_widget.parse_time_extent(values))
self.vtimes = [_time.isoformat() + "Z" for _time in self.allowed_valid_times]
if len(self.allowed_valid_times) == 0:
logging.error(f"Cannot determine valid time format of {self.header.text(0)} for {self.text(0)}")
self.is_invalid = True
else:
if self.itime:
self.vtime = next((vtime for vtime in self.vtimes if vtime >= self.itime), self.vtimes[0])
else:
self.vtime = self.vtimes[0]
def _parse_styles(self):
"""
Extracts and saves all styles for the layer.
Sets the layers style to the first one, or the saved one if possible.
"""
self.styles = [f"{style} | {self.layerobj.styles[style]['title']}" for style in self.layerobj.styles]
if self.parent.is_linear:
self.styles.extend(["linear | linear scaled y-axis", "log | log scaled y-axis"])
if len(self.styles) > 0:
self.style = self.styles[0]
if str(self) in self.parent.settings["saved_styles"] and \
self.parent.settings["saved_styles"][str(self)] in self.styles:
self.style = self.parent.settings["saved_styles"][str(self)]
def get_level(self):
if not self.parent.cbMultilayering.isChecked() or not self.is_synced:
return self.level
else:
return self.parent.synced_reference.level
def get_levels(self):
if not self.parent.cbMultilayering.isChecked() or not self.is_synced:
return self.levels
else:
return self.parent.synced_reference.levels
def get_itimes(self):
if not self.parent.cbMultilayering.isChecked() or not self.is_synced:
return self.itimes
else:
return self.parent.synced_reference.itimes
def get_itime(self):
if not self.parent.cbMultilayering.isChecked() or not self.is_synced:
return self.itime
else:
return self.parent.synced_reference.itime
def get_vtimes(self):
if not self.parent.cbMultilayering.isChecked() or not self.is_synced:
return self.vtimes
else:
return self.parent.synced_reference.vtimes
def get_vtime(self):
if not self.parent.cbMultilayering.isChecked() or not self.is_synced:
return self.vtime
else:
return self.parent.synced_reference.vtime
def set_level(self, level):
if (not self.parent.cbMultilayering.isChecked() or not self.is_synced) and level in self.levels:
self.level = level
elif self.is_synced and level in self.parent.synced_reference.levels:
self.parent.synced_reference.level = level
def set_itime(self, itime):
if (not self.parent.cbMultilayering.isChecked() or not self.is_synced) and itime in self.itimes:
self.itime = itime
elif self.is_synced and itime in self.parent.synced_reference.itimes:
self.parent.synced_reference.itime = itime
if self.get_vtime():
if self.get_vtime() < itime:
valid_vtime = next((vtime for vtime in self.get_vtimes() if vtime >= itime), None)
if valid_vtime:
self.set_vtime(valid_vtime)
self.parent.carry_parameters["vtime"] = self.get_vtime()
self.parent.needs_repopulate.emit()
def set_vtime(self, vtime):
if (not self.parent.cbMultilayering.isChecked() or not self.is_synced) and vtime in self.vtimes:
self.vtime = vtime
elif self.is_synced and vtime in self.parent.synced_reference.vtimes:
self.parent.synced_reference.vtime = vtime
if self.get_itime() and self.get_itime() > vtime:
valid_itimes = [itime for itime in self.get_itimes() if itime <= vtime]
if valid_itimes:
self.set_itime(valid_itimes[-1])
self.parent.needs_repopulate.emit()
def get_layer(self):
"""
Returns the layer name used internally by the WMS
"""
return self.text(0).split(" | ")[-1].split(" (synced)")[0]
def get_style(self):
"""
Returns the style name used internally by the WMS
"""
if self.style:
return self.style.split(" |")[0]
return ""
def get_level_name(self):
"""
Returns the level used internally by the WMS
"""
if self.level:
return self.get_level().split(" (")[0]
def get_legend_url(self):
if not self.parent.is_linear:
style = self.get_style()
urlstr = None
if style and "legend" in self.layerobj.styles[style]:
urlstr = self.layerobj.styles[style]["legend"]
return urlstr
def get_allowed_crs(self):
if self.is_synced:
return self.parent.synced_reference.allowed_crs
else:
return self.allowed_crs
def draw(self):
"""
Triggers the layer to be drawn by the WMSControlWidget
"""
if isinstance(self.parent.dock_widget, mslib.msui.wms_control.HSecWMSControlWidget):
self.parent.dock_widget.get_map([self])
elif isinstance(self.parent.dock_widget, mslib.msui.wms_control.VSecWMSControlWidget):
self.parent.dock_widget.get_vsec([self])
else:
self.parent.dock_widget.get_lsec([self])
def get_wms(self):
return self.parent.layers[self.header.text(0)]["wms"]
def show_favourite(self):
"""
Shows a filled star icon if this layer is a favourite layer or an unfilled one if not
"""
if self.is_favourite:
icon = QtGui.QIcon(icons("64x64", "star_filled.png"))
else:
icon = QtGui.QIcon(icons("64x64", "star_unfilled.png"))
self.setIcon(0, icon)
def style_changed(self):
"""
Persistently saves the currently selected style of the layer, if it is not the first one
"""
if self.style != self.styles[0]:
self.parent.settings["saved_styles"][str(self)] = self.style
else:
self.parent.settings["saved_styles"].pop(str(self))
save_settings_qsettings("multilayers", self.parent.settings)
def color_changed(self, color):
"""
Persistently saves the currently selected color of the layer, if it isn't black
"""
self.color = color
if self.color != 0:
self.parent.settings["saved_colors"][str(self)] = self.color
else:
self.parent.settings["saved_colors"].pop(str(self))
save_settings_qsettings("multilayers", self.parent.settings)
def favourite_triggered(self):
"""
Toggles whether a layer is or is not a favourite
"""
self.is_favourite = not self.is_favourite
self.show_favourite()
if not self.is_favourite and str(self) in self.parent.settings["favourites"]:
self.parent.settings["favourites"].remove(str(self))
elif self.is_favourite and str(self) not in self.parent.settings["favourites"]:
self.parent.settings["favourites"].append(str(self))
save_settings_qsettings("multilayers", self.parent.settings)
def __str__(self):
return f"{self.header.text(0) if self.header else ''}: {self.text(0)}"
|
from FBAnalyzer import FBAnalyzer
from utils import get_root_path_from_input
from models import FriendMetric
if __name__ == '__main__':
fb_analyzer = FBAnalyzer(root_path=get_root_path_from_input())
# fb_analyzer = FBAnalyzer.get_pickle_instance("../FBAnalyzer.pkl")
top_10_friends_tuple = fb_analyzer.sorted_descending_msg_count()[:10]
from collections import OrderedDict
char_ratio_list = []
rank = 1
for friend_name, msg_cnt_total in top_10_friends_tuple:
friend_metric = FriendMetric(fb_analyzer.fb_data.sender_name, fb_analyzer.fb_data.friends.get(friend_name))
info = OrderedDict({
"name": friend_name,
"rank": rank,
"sent_messages": friend_metric.sent_messages_count,
"received_messages": friend_metric.received_messages_count,
"total_messages": friend_metric.total_messages_count,
"sent_chars": friend_metric.sent_characters_count,
"received_chars": friend_metric.received_characters_count,
"total_chars": friend_metric.total_characters_count,
"received_per_sent_msg_ratio": friend_metric.received_messages_count/max(1,friend_metric.sent_messages_count),
"received_per_sent_char_ratio": friend_metric.received_characters_count/max(1,friend_metric.sent_characters_count),
})
rank += 1
for k, v in info.items():
print(k + ":", v)
print("-" * 60)
|
def get_fib(position):
if position < 0:
raise Exception('Position out of bounds, cannot be less than zero.')
if position == 0 or position == 1:
return position
else:
return get_fib(position - 1) + get_fib(position - 2)
# Test cases
print get_fib(9)
print get_fib(11)
print get_fib(0)
print get_fib(-1)
|
#!/usr/bin/env python2
#***************************************************************************
#
# Copyright (c) 2015 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#***************************************************************************/
#
# @author Andreas Antener <andreas@uaventure.com>
#
# The shebang of this file is currently Python2 because some
# dependencies such as pymavlink don't play well with Python3 yet.
from __future__ import division
PKG = 'px4'
import rospy
from geometry_msgs.msg import Quaternion, Vector3
from mavros_msgs.msg import AttitudeTarget
from mavros_test_common import MavrosTestCommon
from pymavlink import mavutil
from std_msgs.msg import Header
from threading import Thread
from tf.transformations import quaternion_from_euler
class MavrosOffboardAttctlTest(MavrosTestCommon):
"""
Tests flying in offboard control by sending attitude and thrust setpoints
via MAVROS.
For the test to be successful it needs to cross a certain boundary in time.
"""
def setUp(self):
super(MavrosOffboardAttctlTest, self).setUp()
self.att = AttitudeTarget()
self.att_setpoint_pub = rospy.Publisher(
'mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=1)
# send setpoints in seperate thread to better prevent failsafe
self.att_thread = Thread(target=self.send_att, args=())
self.att_thread.daemon = True
self.att_thread.start()
def tearDown(self):
super(MavrosOffboardAttctlTest, self).tearDown()
#
# Helper methods
#
def send_att(self):
rate = rospy.Rate(10) # Hz
self.att.body_rate = Vector3()
self.att.header = Header()
self.att.header.frame_id = "base_footprint"
self.att.orientation = Quaternion(*quaternion_from_euler(0.25, 0.25,
0))
self.att.thrust = 0.7
self.att.type_mask = 7 # ignore body rate
while not rospy.is_shutdown():
self.att.header.stamp = rospy.Time.now()
self.att_setpoint_pub.publish(self.att)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
#
# Test method
#
def test_attctl(self):
"""Test offboard attitude control"""
# boundary to cross
boundary_x = 5
boundary_y = 5
boundary_z = -5
# make sure the simulation is ready to start the mission
self.wait_for_topics(60)
self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND,
10, -1)
self.log_topic_vars()
self.set_mode("OFFBOARD", 5)
self.set_arm(True, 5)
rospy.loginfo("run mission")
rospy.loginfo("attempting to cross boundary | x: {0}, y: {1}, z: {2}".
format(boundary_x, boundary_y, boundary_z))
# does it cross expected boundaries in 'timeout' seconds?
timeout = 12 # (int) seconds
loop_freq = 2 # Hz
rate = rospy.Rate(loop_freq)
crossed = False
for i in xrange(timeout * loop_freq):
if (self.local_position.pose.position.x > boundary_x and
self.local_position.pose.position.z > boundary_y and
self.local_position.pose.position.y < boundary_z):
rospy.loginfo("boundary crossed | seconds: {0} of {1}".format(
i / loop_freq, timeout))
crossed = True
break
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(crossed, (
"took too long to cross boundaries | current position x: {0:.2f}, y: {1:.2f}, z: {2:.2f} | timeout(seconds): {3}".
format(self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z, timeout)))
self.set_arm(False, 5)
if __name__ == '__main__':
import rostest
rospy.init_node('test_node', anonymous=True)
rostest.rosrun(PKG, 'mavros_offboard_attctl_test',
MavrosOffboardAttctlTest)
|
import numpy as np
import unittest
import chainer.testing as testing
import chainer.testing.condition as condition
from chainer import functions as F
from mkldnn import switch
class TestSoftmaxCrossEntropy(unittest.TestCase):
def setUp(self):
self.x_2d = np.random.rand(2, 3).astype('f')
self.label = np.random.rand(2).astype('i')
def tearDown(self):
self.x_2d = None
self.label = None
def check_softmax_cross_entropy(self):
switch.enable_softmax_cross_entropy = True
y_2d = F.softmax_cross_entropy(self.x_2d, self.label, use_cudnn=False)
switch.enable_softmax_cross_entropy = False
y_2d_expect = F.softmax_cross_entropy(self.x_2d, self.label, use_cudnn=False)
testing.assert_allclose(y_2d.data, y_2d_expect.data)
@condition.retry(3)
def test_cpu(self):
self.check_softmax_cross_entropy()
testing.run_module(__name__, __file__)
|
from firedrake import *
from firedrake.petsc import PETSc
from argparse import ArgumentParser
import sys
parser = ArgumentParser(description="""Linear gravity wave system.""",
add_help=False)
parser.add_argument("--refinements",
default=4,
type=int,
help=("Number of refinements when generating the "
"spherical base mesh."))
parser.add_argument("--nlayers",
default=20,
type=int,
help="Number of vertical levels in the extruded mesh.")
parser.add_argument("--dt",
default=36.0,
type=float,
help="Timestep size (s)")
parser.add_argument("--dumpfreq",
default=50,
type=int,
help="Output frequency")
parser.add_argument("--help",
action="store_true",
help="Show help.")
args, _ = parser.parse_known_args()
if args.help:
help = parser.format_help()
PETSc.Sys.Print("%s\n" % help)
sys.exit(1)
nlayers = args.nlayers # Number of extrusion layers
R = 6.371E6/125.0 # Scaled radius [m]: R_earth / 125.0
thickness = 1.0E4 # Thickness [m] of the spherical shell
degree = 1 # Degree of finite element complex
refinements = args.refinements # Number of horizontal refinements
c = Constant(343.0) # Speed of sound
N = Constant(0.01) # Buoyancy frequency
Omega = Constant(7.292E-5) # Angular rotation rate
dt = args.dt # Time-step size
tmax = 3600.0 # End time
# Horizontal base mesh (cubic coordinate field)
base = IcosahedralSphereMesh(R,
refinement_level=refinements,
degree=3)
# Extruded mesh
mesh = ExtrudedMesh(base, extrusion_type='radial',
layers=nlayers, layer_height=thickness/nlayers)
# Create tensor product complex:
# Horizontal elements
U1 = FiniteElement('RT', triangle, degree)
U2 = FiniteElement('DG', triangle, degree - 1)
# Vertical elements
V0 = FiniteElement('CG', interval, degree)
V1 = FiniteElement('DG', interval, degree - 1)
# HDiv element
W2_ele_h = HDiv(TensorProductElement(U1, V1))
W2_ele_v = HDiv(TensorProductElement(U2, V0))
W2_ele = W2_ele_h + W2_ele_v
# L2 element
W3_ele = TensorProductElement(U2, V1)
# Charney-Phillips element
Wb_ele = TensorProductElement(U2, V0)
# Resulting function spaces
W2 = FunctionSpace(mesh, W2_ele)
W3 = FunctionSpace(mesh, W3_ele)
Wb = FunctionSpace(mesh, Wb_ele)
x = SpatialCoordinate(mesh)
# Initial condition for velocity
u0 = Function(W2)
u_max = Constant(20.0)
uexpr = as_vector([-u_max*x[1]/R, u_max*x[0]/R, 0.0])
u0.project(uexpr)
# Initial condition for the buoyancy perturbation
lamda_c = 2.0*pi/3.0
phi_c = 0.0
W_CG1 = FunctionSpace(mesh, "CG", 1)
z = Function(W_CG1).interpolate(sqrt(x[0]*x[0] + x[1]*x[1] + x[2]*x[2]) - R)
lat = Function(W_CG1).interpolate(asin(x[2]/sqrt(x[0]*x[0] + x[1]*x[1] + x[2]*x[2])))
lon = Function(W_CG1).interpolate(atan_2(x[1], x[0]))
b0 = Function(Wb)
L_z = 20000.0
d = 5000.0
sin_tmp = sin(lat) * sin(phi_c)
cos_tmp = cos(lat) * cos(phi_c)
q = R*acos(sin_tmp + cos_tmp*cos(lon-lamda_c))
s = (d**2)/(d**2 + q**2)
bexpr = s*sin(2*pi*z/L_z)
b0.interpolate(bexpr)
# Initial condition for pressure
p0 = Function(W3).assign(0.0)
# Set up linear variational solver for u-p
# (After eliminating buoyancy)
W = W2 * W3
u, p = TrialFunctions(W)
w, phi = TestFunctions(W)
# Coriolis term
fexpr = 2*Omega*x[2]/R
Vcg = FunctionSpace(mesh, "CG", 1)
f = interpolate(fexpr, Vcg)
# radial unit vector
khat = interpolate(x/sqrt(dot(x, x)), mesh.coordinates.function_space())
a_up = (dot(w, u) + 0.5*dt*dot(w, f*cross(khat, u))
- 0.5*dt*p*div(w)
# Appears after eliminating b
+ (0.5*dt*N)**2*dot(w, khat)*dot(u, khat)
+ phi*p + 0.5*dt*c**2*phi*div(u))*dx
L_up = (dot(w, u0) + 0.5*dt*dot(w, f*cross(khat, u0))
+ 0.5*dt*dot(w, khat*b0)
+ phi*p0)*dx
bcs = [DirichletBC(W.sub(0), 0.0, "bottom"),
DirichletBC(W.sub(0), 0.0, "top")]
w = Function(W)
up_problem = LinearVariationalProblem(a_up, L_up, w, bcs=bcs)
solver_parameters = {
'pc_type': 'fieldsplit',
'pc_fieldsplit_type': 'schur',
'ksp_type': 'gmres',
'ksp_monitor_true_residual': True,
'pc_fieldsplit_schur_fact_type': 'FULL',
'pc_fieldsplit_schur_precondition': 'selfp',
'fieldsplit_0': {'ksp_type': 'preonly',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'},
'fieldsplit_1': {'ksp_type': 'cg',
'pc_type': 'gamg',
'pc_gamg_sym_graph': True,
'mg_levels': {'ksp_type': 'chebyshev',
'ksp_chebyshev_esteig': True,
'ksp_max_it': 5,
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}}
}
up_solver = LinearVariationalSolver(up_problem,
solver_parameters=solver_parameters)
# Buoyancy solver
gamma = TestFunction(Wb)
b = TrialFunction(Wb)
a_b = gamma*b*dx
L_b = dot(gamma*khat, u0)*dx
b_update = Function(Wb)
b_problem = LinearVariationalProblem(a_b, L_b, b_update)
b_solver = LinearVariationalSolver(b_problem,
solver_parameters={'ksp_type': 'cg',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'})
# Time-loop
t = 0
state = Function(W2 * W3 * Wb, name="state")
un1, pn1, bn1 = state.split()
un1.assign(u0)
pn1.assign(p0)
bn1.assign(b0)
output = File("results/gravity_waves.pvd")
output.write(un1, pn1, bn1)
count = 1
dumpfreq = args.dumpfreq
while t < tmax:
t += dt
# Solve for velocity and pressure updates
up_solver.solve()
un1.assign(w.sub(0))
pn1.assign(w.sub(1))
u0.assign(un1)
p0.assign(pn1)
# Reconstruct buoyancy
b_solver.solve()
bn1.assign(assemble(b0 - 0.5*dt*N**2*b_update))
b0.assign(bn1)
count += 1
if count > dumpfreq:
# Write output
output.write(un1, pn1, bn1)
count -= dumpfreq
|
default_app_config = 'demo.periods.apps.PeriodsConfig'
|
def extractBcnovelsCom(item):
'''
Parser for 'bcnovels.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if 'Manga' in item['tags']:
return None
tagmap = [
('When the Protagonist of a Fanfiction', 'When a Fanfic Protagonist Transmigrated into the Original Novel', 'translated'),
('Reader and Protagonist', 'The Reader and Protagonist Definitely Have to Be in True Love', 'translated'),
('Everyday the Protagonist Wants to Capture Me', 'Everyday the Protagonist Wants to Capture Me', 'translated'),
('Prince\'s Loyal Lover', 'Prince\'s Loyal Lover', 'translated'),
('The Scum Villain’s Self-Saving System', 'The Scum Villain\'s Self-Saving System', 'translated'),
('I Can’t Write Any ‘Below the Neck’ Love Scenes', 'I Can\'t Write Any \'Below the Neck\' Love Scenes', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
import htcondor
import os
import shutil
import subprocess
import sys
from datetime import datetime, timedelta
from pathlib import Path
from .conf import *
from .dagman import DAGMan
JobStatus = [
"NONE",
"IDLE",
"RUNNING",
"REMOVED",
"COMPLETED",
"HELD",
"TRANSFERRING_OUTPUT",
"SUSPENDED",
"JOB_STATUS_MAX"
]
schedd = htcondor.Schedd()
class Job:
"""
A :class:`Job` holds all operations related to HTCondor jobs
"""
@staticmethod
def submit(file, options=None):
# Make sure the specified submit file exists and is readable!
if os.access(file, os.R_OK) is False:
print(f"Error: could not read file {file}")
sys.exit(1)
# If no resource specified, submit job to the local schedd
if "resource" not in options:
submit_file = open(file)
submit_data = submit_file.read()
submit_file.close()
submit_description = htcondor.Submit(submit_data)
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: {error}")
sys.exit(1)
elif options["resource"] == "slurm":
if "runtime" not in options:
print("Error: Slurm resources must specify a --runtime argument")
sys.exit(1)
if "node_count" not in options:
print("Error: Slurm resources must specify a --node_count argument")
sys.exit(1)
# Verify that we have Slurm access; if not, run bosco_clutser to create it
try:
subprocess.check_output(["bosco_cluster", "--status", "hpclogin1.chtc.wisc.edu"])
except:
print(f"You need to install support software to access the Slurm cluster. Please run the following command in your terminal:\n\nbosco_cluster --add hpclogin1.chtc.wisc.edu slurm\n")
sys.exit(1)
Path(TMP_DIR).mkdir(parents=True, exist_ok=True)
DAGMan.write_slurm_dag(file, options["runtime"], options["node_count"], options["email"])
os.chdir(TMP_DIR) # DAG must be submitted from TMP_DIR
submit_description = htcondor.Submit.from_dag(str(TMP_DIR / "slurm_submit.dag"))
submit_description["+ResourceType"] = "\"Slurm\""
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time. See the job-set syntax for submitting multiple jobs.")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: f{error}")
sys.exit(1)
elif options["resource"] == "ec2":
if "runtime" not in options:
print("Error: EC2 resources must specify a --runtime argument")
sys.exit(1)
if "node_count" not in options:
print("Error: EC2 resources must specify a --node_count argument")
sys.exit(1)
Path(TMP_DIR).mkdir(parents=True, exist_ok=True)
DAGMan.write_ec2_dag(file, options["runtime"], options["node_count"], options["email"])
os.chdir(TMP_DIR) # DAG must be submitted from TMP_DIR
submit_description = htcondor.Submit.from_dag("ec2_submit.dag")
submit_description["+ResourceType"] = "\"EC2\""
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time. See the job-set syntax for submitting multiple jobs.")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: f{error}")
sys.exit(1)
@staticmethod
def status(id, options=None):
"""
Displays the status of a job
"""
job = None
job_status = "IDLE"
resource_type = "htcondor"
try:
job = schedd.query(
constraint=f"ClusterId == {id}",
projection=["JobStartDate", "JobStatus", "LastVacateTime", "ResourceType"]
)
except IndexError:
print(f"No job found for ID {id}.")
sys.exit(0)
except:
print(f"Error looking up job status: {sys.exc_info()[0]}")
sys.exit(1)
if len(job) == 0:
print(f"No job found for ID {id}.")
sys.exit(0)
if "ResourceType" in job[0]:
resource_type = job[0]["ResourceType"].lower()
# Now, produce job status based on the resource type
if resource_type == "htcondor":
if JobStatus[job[0]['JobStatus']] is "RUNNING":
job_running_time = datetime.now() - datetime.fromtimestamp(job[0]["JobStartDate"])
print(f"Job is {JobStatus[job[0]['JobStatus']]} since {round(job_running_time.seconds/3600)}h{round(job_running_time.seconds/60)}m{(job_running_time.seconds%60)}s")
elif JobStatus[job[0]['JobStatus']] is "HELD":
job_held_time = datetime.now() - datetime.fromtimestamp(job[0]["LastVacateTime"])
print(f"Job is {JobStatus[job[0]['JobStatus']]} since {round(job_held_time.seconds/3600)}h{round(job_held_time.seconds/60)}m{(job_held_time.seconds%60)}s")
else:
print(f"Job is {JobStatus[job[0]['JobStatus']]}")
# Jobs running on provisioned Slurm resources need to retrieve
# additional information from the provisioning DAGMan log
elif resource_type == "slurm" or resource_type == "ec2":
# Variables specific to jobs running on Slurm clusters
jobs_running = 0
job_started_time = None
provisioner_cluster_id = None
provisioner_job_submitted_time = None
slurm_cluster_id = None
slurm_nodes_requested = None
slurm_runtime = None
dagman_dag, dagman_out, dagman_log = DAGMan.get_files(id)
if dagman_dag is None:
print(f"No {resource_type} job found for ID {id}.")
sys.exit(0)
# Parse the .dag file to retrieve some user input values
dagman_dag_file = open(dagman_dag, "r")
for line in dagman_dag_file.readlines():
if "annex_node_count =" in line:
slurm_nodes_requested = line.split("=")[1].strip()
if "annex_runtime =" in line:
slurm_runtime = int(line.split("=")[1].strip())
# Parse the DAGMan event log for useful information
dagman_events = htcondor.JobEventLog(dagman_log)
for event in dagman_events.events(0):
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: B":
provisioner_cluster_id = event.cluster
provisioner_job_submitted_time = datetime.fromtimestamp(event.timestamp)
job_status = "PROVISIONING REQUEST PENDING"
elif "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: C":
slurm_cluster_id = event.cluster
elif event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
job_status = "RUNNING"
jobs_running += 1
if job_started_time is None:
job_started_time = datetime.fromtimestamp(event.timestamp)
elif event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.JOB_TERMINATED:
jobs_running -= 1
if jobs_running == 0:
job_status = "COMPLETE"
elif event.type == htcondor.JobEventType.JOB_HELD or event.type == htcondor.JobEventType.EXECUTABLE_ERROR:
job_status = "ERROR"
# Now that we have all the information we want, display it
current_time = datetime.now()
time_diff = None
if job_status is "PROVISIONING REQUEST PENDING":
time_diff = current_time - provisioner_job_submitted_time
elif job_status is "RUNNING":
time_diff = current_time - job_started_time
print(f"Job is {job_status}", end='')
if time_diff is not None:
print(f" since {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
else:
print("")
else:
print(f"Error: The 'job status' command does not support {resource_type} resources.")
sys.exit(1)
@staticmethod
def resources(id, options=None):
"""
Displays the resources used by a specified job
"""
# If no resource specified, assume job is running on local pool
if "resource" not in options:
try:
job = schedd.query(
constraint=f"ClusterId == {id}",
projection=["RemoteHost"]
)
except IndexError:
print(f"No jobs found for ID {id}.")
sys.exit(0)
except:
print(f"Unable to look up job resources")
sys.exit(1)
if len(job) == 0:
print(f"No jobs found for ID {id}.")
sys.exit(0)
# TODO: Make this work correctly for jobs that havne't started running yet
job_host = job[0]["RemoteHost"]
print(f"Job is using resource {job_host}")
# Jobs running on provisioned Slurm resources need to retrieve
# additional information from the provisioning DAGMan log
elif options["resource"] == "slurm":
# Internal variables
dagman_cluster_id = None
provisioner_cluster_id = None
slurm_cluster_id = None
# User-facing variables (all values set below are default/initial state)
provisioner_job_submitted_time = None
provisioner_job_scheduled_end_time = None
job_status = "NOT SUBMITTED"
job_started_time = None
jobs_running = 0
slurm_nodes_requested = None
slurm_runtime = None
dagman_dag, dagman_out, dagman_log = DAGMan.get_files(id)
if dagman_dag is None:
print(f"No Slurm job found for ID {id}.")
sys.exit(0)
# Parse the .dag file to retrieve some user input values
dagman_dag_file = open(dagman_dag, "r")
for line in dagman_dag_file.readlines():
if "annex_node_count =" in line:
slurm_nodes_requested = line.split("=")[1].strip()
if "annex_runtime =" in line:
slurm_runtime = int(line.split("=")[1].strip())
# Parse the DAGMan event log for useful information
dagman_events = htcondor.JobEventLog(dagman_log)
for event in dagman_events.events(0):
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: B":
provisioner_cluster_id = event.cluster
provisioner_job_submitted_time = datetime.fromtimestamp(event.timestamp)
provisioner_job_scheduled_end_time = datetime.fromtimestamp(event.timestamp + slurm_runtime)
job_status = "PROVISIONING REQUEST PENDING"
if event.cluster == provisioner_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
provisioner_job_started_time = datetime.fromtimestamp(event.timestamp)
provisioner_job_scheduled_end_time = datetime.fromtimestamp(event.timestamp + slurm_runtime)
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: C":
slurm_cluster_id = event.cluster
job_started_time = datetime.fromtimestamp(event.timestamp)
if event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
job_status = "RUNNING"
jobs_running += 1
if event.cluster == slurm_cluster_id and (event.type == htcondor.JobEventType.JOB_TERMINATED or event.type == htcondor.JobEventType.JOB_EVICTED):
jobs_running -= 1
if jobs_running == 0:
job_status = "COMPLETE"
if event.type == htcondor.JobEventType.JOB_HELD or event.type == htcondor.JobEventType.EXECUTABLE_ERROR:
job_status = "ERROR"
# Now that we have all the information we want, display it
if job_status is "PROVISIONING REQUEST PENDING":
print(f"Job is still waiting for {slurm_nodes_requested} Slurm nodes to provision")
elif job_status is "RUNNING":
print(f"Job is running on {jobs_running}/{slurm_nodes_requested} requested Slurm nodes")
elif job_status is "ERROR":
print(f"An error occurred provisioning Slurm resources")
# Show information about time remaining
if job_status is "RUNNING" or job_status is "COMPLETE":
current_time = datetime.now()
if current_time < provisioner_job_scheduled_end_time:
time_diff = provisioner_job_scheduled_end_time - current_time
print(f"Slurm resources are reserved for another {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
else:
time_diff = current_time - provisioner_job_scheduled_end_time
print(f"Slurm resources were terminated since {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
|
# Package imports
import numpy as np
import matplotlib.pyplot as plt
from testCases_v2 import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
%matplotlib inline
np.random.seed(1)
X, Y = load_planar_dataset()
# Train the logistic regression classifier
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(X.T, Y.T);
# Plot the decision boundary for logistic regression
plot_decision_boundary(lambda x: clf.predict(x), X, Y)
plt.title("Logistic Regression")
# Print accuracy
LR_predictions = clf.predict(X.T)
print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +
'% ' + "(percentage of correctly labelled datapoints)")
|
# Copyright © 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the business-filing end-point.
Test-Suite to ensure that the /businesses endpoint is working as expected.
"""
import copy
import datetime
from http import HTTPStatus
from registry_schemas.example_data import (
COURT_ORDER_FILING_TEMPLATE,
REGISTRARS_NOTATION_FILING_TEMPLATE,
REGISTRARS_ORDER_FILING_TEMPLATE,
)
from legal_api.models import Business, Filing
from legal_api.services.authz import STAFF_ROLE
from tests import integration_payment
from tests.unit.models import factory_business, factory_business_mailing_address
from tests.unit.services.utils import create_header
@integration_payment
def test_filing_court_order(client, jwt, session):
"""Assert that a valid court order filing can be posted."""
identifier = 'BC1156638'
b = factory_business(identifier, datetime.datetime.utcnow(), None, Business.LegalTypes.COMP.value)
factory_business_mailing_address(b)
filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.CREATED
assert rv.json['filing']['header']['name'] == 'courtOrder'
filing = Filing.get_filing_by_payment_token(rv.json['filing']['header']['paymentToken'])
assert filing
assert filing.status == Filing.Status.PENDING.value
def test_filing_court_order_validation(client, jwt, session):
"""Assert that a court order filing can be validated."""
identifier = 'BC1156638'
b = factory_business(identifier, datetime.datetime.utcnow(), None, Business.LegalTypes.COMP.value)
factory_business_mailing_address(b)
filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['courtOrder']['fileNumber'] = ''
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.UNPROCESSABLE_ENTITY
filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['courtOrder']['orderDetails'] = ''
filing['filing']['courtOrder']['effectOfOrder'] = ''
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.BAD_REQUEST
assert rv.json['errors'] == [{'error': 'Court Order is required.', 'path': '/filing/courtOrder/orderDetails'}]
filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['courtOrder']['effectOfOrder'] = 'invalid'
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.BAD_REQUEST
assert rv.json['errors'] == [{'error': 'Invalid effectOfOrder.', 'path': '/filing/courtOrder/effectOfOrder'}]
@integration_payment
def test_filing_registrars_notation(client, jwt, session):
"""Assert that a valid registrars notation filing can be posted."""
identifier = 'BC1156638'
b = factory_business(identifier, datetime.datetime.utcnow(), None, Business.LegalTypes.COMP.value)
factory_business_mailing_address(b)
filing = copy.deepcopy(REGISTRARS_NOTATION_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.CREATED
assert rv.json['filing']['header']['name'] == 'registrarsNotation'
filing = Filing.get_filing_by_payment_token(rv.json['filing']['header']['paymentToken'])
assert filing
assert filing.status == Filing.Status.PENDING.value
filing = copy.deepcopy(REGISTRARS_NOTATION_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['registrarsNotation']['effectOfOrder'] = ''
filing['filing']['registrarsNotation']['fileNumber'] = ''
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.CREATED
def test_filing_registrars_notation_validation(client, jwt, session):
"""Assert that a registrars notation filing can be validated."""
identifier = 'BC1156638'
b = factory_business(identifier, datetime.datetime.utcnow(), None, Business.LegalTypes.COMP.value)
factory_business_mailing_address(b)
filing = copy.deepcopy(REGISTRARS_NOTATION_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['registrarsNotation']['orderDetails'] = ''
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.UNPROCESSABLE_ENTITY
filing = copy.deepcopy(REGISTRARS_NOTATION_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['registrarsNotation']['effectOfOrder'] = 'planOfArrangement'
filing['filing']['registrarsNotation']['fileNumber'] = ''
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.BAD_REQUEST
assert rv.json['errors'] == [{
'error': 'Court Order Number is required when this filing is pursuant to a Plan of Arrangement.',
'path': '/filing/registrarsNotation/fileNumber'}]
filing = copy.deepcopy(REGISTRARS_NOTATION_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['registrarsNotation']['effectOfOrder'] = 'invalid'
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.BAD_REQUEST
assert rv.json['errors'] == [{'error': 'Invalid effectOfOrder.',
'path': '/filing/registrarsNotation/effectOfOrder'}]
@integration_payment
def test_filing_registrars_order(client, jwt, session):
"""Assert that a valid registrars order filing can be posted."""
identifier = 'BC1156638'
b = factory_business(identifier, datetime.datetime.utcnow(), None, Business.LegalTypes.COMP.value)
factory_business_mailing_address(b)
filing = copy.deepcopy(REGISTRARS_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.CREATED
assert rv.json['filing']['header']['name'] == 'registrarsOrder'
filing = Filing.get_filing_by_payment_token(rv.json['filing']['header']['paymentToken'])
assert filing
assert filing.status == Filing.Status.PENDING.value
filing = copy.deepcopy(REGISTRARS_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['registrarsOrder']['effectOfOrder'] = ''
filing['filing']['registrarsOrder']['fileNumber'] = ''
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.CREATED
def test_filing_registrars_order_validation(client, jwt, session):
"""Assert that a registrars order filing can be validated."""
identifier = 'BC1156638'
b = factory_business(identifier, datetime.datetime.utcnow(), None, Business.LegalTypes.COMP.value)
factory_business_mailing_address(b)
filing = copy.deepcopy(REGISTRARS_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['registrarsOrder']['orderDetails'] = ''
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.UNPROCESSABLE_ENTITY
filing = copy.deepcopy(REGISTRARS_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['registrarsOrder']['effectOfOrder'] = 'planOfArrangement'
filing['filing']['registrarsOrder']['fileNumber'] = ''
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.BAD_REQUEST
assert rv.json['errors'] == [{
'error': 'Court Order Number is required when this filing is pursuant to a Plan of Arrangement.',
'path': '/filing/registrarsOrder/fileNumber'}]
filing = copy.deepcopy(REGISTRARS_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
filing['filing']['registrarsOrder']['effectOfOrder'] = 'invalid'
rv = client.post(f'/api/v1/businesses/{identifier}/filings',
json=filing,
headers=create_header(jwt, [STAFF_ROLE], None))
assert rv.status_code == HTTPStatus.BAD_REQUEST
assert rv.json['errors'] == [{'error': 'Invalid effectOfOrder.', 'path': '/filing/registrarsOrder/effectOfOrder'}]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('users', '0013_userprofile_timezone'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='task_last_view_date',
field=models.DateTimeField(default=datetime.datetime(2018, 11, 12, 11, 17, 44, 268976, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
]
|
# Taiwo Kareem
# 05/04/2018 04:09 AM
import sys
import requests
import re
import time
import os
import json
from urllib.parse import urljoin # Python3
if len(sys.argv) != 2:
sys.exit('Usage: python %s "<config_filename>"' % __file__)
config_file = sys.argv[1]
config = {}
if os.path.isfile(config_file):
with open(config_file) as f:
try:
config = json.load(f)
except json.decoder.JSONDecodeError as err:
sys.exit("ERROR: Invalid json data in file. %s" % err)
else:
sys.exit("Config file: '%s' not found" % config_file)
URL = config.get("domain")
filename = config.get("output_filename")
URL_REGEX = config.get("path_regex")
KEYWORD_REGEX = config.get("keyword_regex")
AUTH = config.get("login")
s = requests.Session()
s.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:27.0) Gecko/20100101 Firefox/27.0',
})
if AUTH and AUTH != "username:password":
AUTH = tuple(AUTH.split(":"))
s.auth = AUTH
s.post(URL, auth=AUTH)
opened=[]
visited=[]
hits=0
mode="w"
MATCHES = []
if os.path.isfile(filename):
with open(filename) as f:
visited = f.read().split("\n")
mode = "a"
with open(filename, mode) as f:
def process(url, visited=visited, hits=hits, s=s):
LINKS = []
page_crawled = False
for pages in opened:
if pages == url:
page_crawled = True
if page_crawled == False:
opened.append(url)
text = s.get(url).text
for link in re.findall(r'href="(.*?)"', text):
link = urljoin(url, link).split("#")[0]#.split("?")[0]
LINKS.append(link.lower())
for link in list(set(LINKS)):
if link.startswith(URL):
if link not in visited:
if re.search(URL_REGEX, link, re.I):
source = s.get(link).text
# ([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)
matches = set(re.findall(r"{0}".format(KEYWORD_REGEX), source, re.I))
if matches:
hits += 1
print("\n[%s] (%s/%s) -> %s" % (len(matches), hits, len(visited), link))
else:
matches = []
for email in matches:
if email not in MATCHES:
print(email.lower())
f.write(email.lower() + "\n")
f.flush()
MATCHES.append(email)
try:
visited.append(link)
except:
time.sleep(3)
else:
print(".", end="", flush=True)
try:
process(link, hits=hits)
except Exception as e:
time.sleep(3)
print("\n--", e)
try:
process(URL, hits=hits)
except Exception as e:
print(e)
time.sleep(3)
|
import gobject
import logging
import os
import traceback
from glob import glob
# Victron packages
from ve_utils import exit_on_error
from delegates.base import SystemCalcDelegate
class RelayState(SystemCalcDelegate):
RELAY_GLOB = '/dev/gpio/relay_*'
FUNCTION_ALARM = 0
FUNCTION_MANUAL = 2
FUNCTION_BMS_STOPCHARGE = 4
FUNCTION_BMS_STOPDISCHARGE = 5
def __init__(self):
SystemCalcDelegate.__init__(self)
self._relays = {}
def get_input(self):
return [
('com.victronenergy.settings', [
'/Settings/Relay/Function',
'/Settings/Relay/1/Function'])] # Managed by the gui
def get_settings(self):
return [
('/Relay/0/State', '/Settings/Relay/0/InitialState', 0, 0, 1),
('/Relay/1/State', '/Settings/Relay/1/InitialState', 0, 0, 1)
]
@property
def relay_function(self):
return self._dbusmonitor.get_value('com.victronenergy.settings',
'/Settings/Relay/Function')
@property
def relay1_function(self):
return self._dbusmonitor.get_value('com.victronenergy.settings',
'/Settings/Relay/1/Function')
def set_sources(self, dbusmonitor, settings, dbusservice):
SystemCalcDelegate.set_sources(self, dbusmonitor, settings, dbusservice)
relays = sorted(glob(self.RELAY_GLOB))
if len(relays) == 0:
logging.info('No relays found')
return
self._relays.update({'/Relay/{}/State'.format(i): os.path.join(r, 'value') \
for i, r in enumerate(relays) })
gobject.idle_add(exit_on_error, self._init_relay_state)
for dbus_path in self._relays.iterkeys():
self._dbusservice.add_path(dbus_path, value=None, writeable=True,
onchangecallback=self._on_relay_state_changed)
logging.info('Relays found: {}'.format(', '.join(self._relays.values())))
def set_relay(self, dbus_path, state):
self._dbusservice[dbus_path] = state
self.__on_relay_state_changed(dbus_path, state)
def set_function(self, func, state):
""" Find a relay bound to the relevant function, and set state. """
for p, r in ((self.relay_function, '/Relay/0/State'),
(self.relay1_function, '/Relay/1/State')):
if p == func and self._dbusservice[r] != state:
self.set_relay(r, state)
def _init_relay_state(self):
if self.relay_function is None:
return True # Try again on the next idle event
for dbus_path, path in self._relays.iteritems():
if dbus_path == '/Relay/0/State' and self.relay_function != self.FUNCTION_MANUAL:
continue # Skip primary relay if function is not manual
if dbus_path == '/Relay/1/State' and self.relay1_function != self.FUNCTION_MANUAL:
continue # Skip secondary relay if function is not manual
try:
state = self._settings[dbus_path]
except KeyError:
pass
else:
self.set_relay(dbus_path, state)
# Sync state back to dbus
self._update_relay_state()
# Watch changes and update dbus. Do we still need this?
gobject.timeout_add(5000, exit_on_error, self._update_relay_state)
return False
def _update_relay_state(self):
# @todo EV Do we still need this? Maybe only at startup?
for dbus_path, file_path in self._relays.items():
try:
with open(file_path, 'rt') as r:
state = int(r.read().strip())
self._dbusservice[dbus_path] = state
except (IOError, ValueError):
traceback.print_exc()
return True
def __on_relay_state_changed(self, dbus_path, state):
try:
path = self._relays[dbus_path]
with open(path, 'wt') as w:
w.write(str(state))
except IOError:
traceback.print_exc()
return False
return True
def _on_relay_state_changed(self, dbus_path, value):
try:
state = int(bool(value))
except ValueError:
traceback.print_exc()
return False
try:
return self.__on_relay_state_changed(dbus_path, state)
finally:
# Remember the state to restore after a restart
self._settings[dbus_path] = state
|
"""
Projet de session IFT780
Date:
Authors: Alexandre Turpin, Quentin Levieux and Adrien Verdier
License: Opensource, free to use
Other: This class is used to create our different transforms to
modify ou dataset (for data augmentation for exemple)
"""
import torchvision.transforms as transforms
class DataTransforms():
"""
Class used to create our different transforms for data augmentation
"""
def __init__(self, data_aug=False, model_name="", dataset_name=""):
"""
Args:
data_aug: Boolean that say if data augmentation is activated
"""
self.data_aug=data_aug
self.model_name=model_name
self.dataset_name=dataset_name
def get_transforms(self):
"""
This method defines the transform that we will use on our datasets
Args:
Returns:
train_transform : Transforms used on the train data
test_transform : Transforms used on the test data
"""
if self.data_aug :
# We create the transforms if data augmentation is activated
if self.dataset_name == "mnist":
if self.model_name == "AlexNet":
test_transform = transforms.Compose([
transforms.Resize((227, 227)),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
])
train_transform = transforms.Compose([
transforms.Resize((227, 227)),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(10),
transforms.RandomCrop((227,227), padding=4)
])
elif self.model_name == "VGGNet" or self.model_name == "ResNet" or self.model_name == "ResNext":
test_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
])
train_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(10),
transforms.RandomCrop((224,224), padding=4)
])
elif self.model_name == "LeNet":
test_transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
])
train_transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(10),
transforms.RandomCrop((32, 32), padding=4)
])
elif self.dataset_name == "cifar10":
if self.model_name == "AlexNet":
test_transform = transforms.Compose([
transforms.Resize((227, 227)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_transform = transforms.Compose([
transforms.Resize((227, 227)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=.20),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(10),
transforms.RandomCrop((227,227), padding=4)
])
elif self.model_name == "VGGNet" or self.model_name == "ResNet" or self.model_name == "ResNext":
test_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=.20),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(10),
transforms.RandomCrop((224,224), padding=4)
])
elif self.model_name == "LeNet":
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(10),
transforms.RandomCrop((32, 32), padding=4)
])
return train_transform, test_transform
else :
# We create the transforms if data augmentation is not activated
if self.dataset_name == "mnist":
if self.model_name == "AlexNet":
base_transform = transforms.Compose([
transforms.Resize((227, 227)),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
])
elif self.model_name == "VGGNet" or self.model_name == "ResNet" or self.model_name == "ResNext":
base_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
])
elif self.model_name == "LeNet":
base_transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=0.5, std=0.5)
])
elif self.dataset_name == "cifar10":
if self.model_name == "AlexNet":
base_transform = transforms.Compose([
transforms.Resize((227, 227)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif self.model_name == "VGGNet" or self.model_name == "ResNet" or self.model_name == "ResNext":
base_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif self.model_name == "LeNet":
base_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return base_transform, base_transform
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interface and registry for jobsub "forms".
"""
import logging
LOG = logging.getLogger(__name__)
class UnimplementedException(Exception):
pass
registry = {}
class JobSubForm(object):
"""
JobSubForms should inherit from this class so that
they can be registered. This follows the pattern from
http://effbot.org/zone/metaclass-plugins.htm
"""
class __metaclass__(type):
def __init__(cls, clsname, bases, dict):
global registry
type.__init__(cls, clsname, bases, dict)
if clsname == "JobSubForm":
return
name = dict["name"]
if name in registry:
raise Exception("Multiply defined form type %s: %s." % (clsname, name))
LOG.info("Registered jobsub plugin: %s->%s" % (name, clsname))
registry[name] = cls
class JobSubFormInterface(object):
"""
A JobSubForm allows developers to create UIs for their
Hadoop applications. It is responsible for
rendering an HTML form, and preparing a submission
to the jobsubd daemon.
The general flow for editing and saving is:
1) Present the form
(new) __init__() -> render_edit()
(edit) __init__(string_repr=...) -> render_edit()
2) Handle the POST
__init__() -> is_valid_edit(post_data) -> serialize_to_string()
\_-> render_edit()
And the flow for submission is
1) Present the parameterization
__init__(string_repr) -> render_parameterization()
2) Handle the POST
__init__(string_repr) -> is_valid_parameterization(post_data) -> submit()
\_-> render_parameterization()
Note that both flows may be implemented by mixing in with
DjangoFormBasedEditForm and BasicParameterizationForm,
in which case all you need to implement is render() and
to_job_submission_plan().
"""
def render(self):
"""
Renders an HTML snippet corresponding to the form.
This does not include the <form> tag, nor the submit
buttons.
"""
raise UnimplementedException()
def post(data):
"""
Updates its state according to form data.
Returns True if the form is valid, False otherwise.
"""
raise UnimplementedException()
def serialize_to_string(self):
"""
Saves its internal state to a string.
"""
raise UnimplementedException()
def deserialize_from_string(self):
"""
Restores its internal state from a string.
"""
raise UnimplementedException()
def parameterization_form(self):
"""
Returns an HTML snippet corresponding to
the parameterization necessary for job submission.
"""
raise UnimplementedException()
def to_job_submission_steps(self, job_design_name):
"""
Creates a JobSubmission from itself.
Data is the post data from parameterization_form.
"""
raise UnimplementedException()
|
# Copyright 2018 qyou@nlpr.ia.ac.cn
import os
from deeppavlov.core.common.registry import register
from deeppavlov.core.models.component import Component
@register('segment')
class Segment(Component):
def __init__(self,
user_dict_path=None,
vocab=None,
**kwargs):
self.user_dict_path = user_dict_path
self.vocab = vocab
def exists_user_dict(self):
return self.user_dict_path and os.path.exists(self.user_dict_path)
def __call__(self, text, *args, **kwargs):
raise NotImplementedError('You should use the specific word segment class')
|
#!/usr/bin/python
import re
import smbus
import logging
# ===========================================================================
# Adafruit_I2C Class
# ===========================================================================
logger = logging.getLogger(__name__)
class Adafruit_I2C(object):
def __init__(self, address, busnum=1, debug=False):
self.address = address
self.bus = smbus.SMBus(busnum)
logger.debug(f"I2C: init bus {busnum}; address {address}")
def reverseByteOrder(self, data):
"Reverses the byte order of an int (16-bit) or long (32-bit) value"
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def errMsg(self):
logger.error(f"Error accessing 0x{self.address:02X} : Check your I2C address" )
return -1
def write8(self, reg, value):
"Writes an 8-bit value to the specified register/address"
try:
self.bus.write_byte_data(self.address, reg, value)
logger.debug(f"I2C: Wrote 0x{value:02X} to register 0x{reg:02X}02X")
except IOError as err:
return self.errMsg()
def write16(self, reg, value):
"Writes a 16-bit value to the specified register/address pair"
try:
self.bus.write_word_data(self.address, reg, value)
logger.debug(f"I2C: Wrote 0x{value:02X} to register pair 0x{reg:02X} 0x{reg+1:02X}")
except IOError as err:
return self.errMsg()
def writeRaw8(self, value):
"Writes an 8-bit value on the bus"
try:
self.bus.write_byte(self.address, value)
logger.debug(f"I2C: Wrote 0x{value:02X}")
except IOError as err:
return self.errMsg()
def writeList(self, reg, list):
"Writes an array of bytes using I2C format"
try:
logger.debug (f"I2C: Writing list to register 0x{reg:02X}")
logger.debug (list)
self.bus.write_i2c_block_data(self.address, reg, list)
except IOError as err:
return self.errMsg()
def readList(self, reg, length):
"Read a list of bytes from the I2C device"
try:
results = self.bus.read_i2c_block_data(self.address, reg, length)
logger.debug (f"I2C: Device 0x{self.address:02X} returned the following from reg 0x{reg:02X}" )
logger.debug (results)
return results
except IOError as err:
return self.errMsg()
def readU8(self, reg):
"Read an unsigned byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
logger.debug (f"I2C: Device 0x{self.address:02X} returned 0x{result & 0xFF:02X} from reg 0x{reg:02X}" )
return result
except IOError as err:
return self.errMsg()
def readS8(self, reg):
"Reads a signed byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127:
result -= 256
logger.debug (f"I2C: Device 0x{self.address:02X} returned 0x{result & 0xFF:02X} from reg 0x{reg:02X}" )
return result
except IOError as err:
return self.errMsg()
def readU16(self, reg, little_endian=True):
"Reads an unsigned 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
# Swap bytes if using big endian because read_word_data assumes little
# endian on ARM (little endian) systems.
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
logger.debug (f"I2C: Device 0x{self.address:02X} returned 0x{result & 0xFFFF:02X} from reg 0x{reg:02X}" )
return result
except IOError as err:
return self.errMsg()
def readS16(self, reg, little_endian=True):
"Reads a signed 16-bit value from the I2C device"
try:
result = self.readU16(reg,little_endian)
if result > 32767: result -= 65536
return result
except IOError as err:
return self.errMsg()
if __name__ == '__main__':
try:
bus = Adafruit_I2C(address=0)
print ("Default I2C bus is accessible")
except:
print ("Error accessing default I2C bus")
|
"""
Calculate light propagation through optical fibers.
For full documentation see <https://ofiber.readthedocs.io>
Info about calculating simple optical fibers parameters::
help(ofiber.basics)
Info about modes and other characteristics of cylindrical fibers::
help(ofiber.cylinder_step)
Info about material and waveguide dispersion::
help(ofiber.dispersion)
Info about noise calculations relevant to optical communications::
help(ofiber.dispersion)
Info about modes in planar waveguides with step index profiles::
help(ofiber.planar_step)
Info about modes in planar waveguides with parabolic index profiles::
help(ofiber.planar_parabolic)
Info about refractive index of glasses::
help(ofiber.refraction)
"""
from .basics import *
from .cylinder_step import *
from .dispersion import *
from .graded_index import *
from .noise import *
from .planar_parabolic import *
from .planar_step import *
from .refraction import *
|
from nesbi.core import Nesbi
def get_monitoring_interfaces(device):
monitoring_interfaces = list()
interfaces_list = device.get('interface_list')
for interface in interfaces_list:
if device.get(interface).get('description') == "##UPLINK##":
monitoring_interfaces.append(interface)
return monitoring_interfaces
def main():
config_file = 'config.yaml'
attr_functions = [get_monitoring_interfaces]
nesbi = Nesbi(config_file, attr_functions=attr_functions, nesbi_dry_run=True,
nesbi_sername="usr", nesbi_password="pw")
nesbi.generate_import_data()
nesbi.process_import_data()
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Sn1pebot
#
# Initialization
#
# Sn1pebot Module Initialization
#
# Copyright (c) 2014 Geoffrey "GEOFBOT" Mon (User:Sn1per)
# Distributed under the MIT License
#
# Requires Pywikibot framework (core version)
import pywikibot
from sn1pebot.templatedata import *
from sn1pebot.dupecite import *
class Bot():
def __init__(self, site, username):
self.site = site
self.username = username
def isRun(self, page=""):
runpage = ""
if page != "":
runpage = pywikibot.Page(self.site, "User:" + self.username + "/Run/" + page + ".js")
else:
runpage = pywikibot.Page(self.site, "User:" + self.username + "/Run.js")
if runpage.exists() and runpage.get() == "true":
return True
else:
return False
|
from . import _init_playingwithfusion
from .version import version as __version__
# autogenerated by 'robotpy-build create-imports playingwithfusion playingwithfusion._playingwithfusion'
from ._playingwithfusion import CANVenom, TMD37003, TimeOfFlight
__all__ = ["CANVenom", "TMD37003", "TimeOfFlight"]
del _init_playingwithfusion
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField, DateField, IntegerField, DecimalField, TextAreaField
from wtforms.validators import DataRequired
class DbestParametersForm(FlaskForm):
#selecting time series
dataset_name = SelectField('Dataset', choices=[("NASA/GIMMS/3GV0", "GIMMS 8000m"), ("MODIS/006/MOD13Q1_NDVI", "MODIS NDVI 250 m"),
("MODIS/006/MOD13Q1_EVI", "MODIS EVI 250m")])
user_dataset_name = StringField('Own dataset/ GEE asset')
date_from = DateField('Date from', format='%Y-%m-%d', validators=[DataRequired()])
date_to = DateField('Date to', format='%Y-%m-%d', validators=[DataRequired()])
coordinates = DecimalField('Coordinates', validators=[DataRequired()])
#DBEST parameters
data_type = SelectField('Data type', choices=[('cyclical', 'cyclical'), ('non-cyclical', 'non-cyclical')])
algorithm = SelectField('Algorithm', choices=[('change detection', 'change detection'), ('generalization', 'generalization')])
breakpoint_no = IntegerField('Number of breakpoints', default=3)
seasonality = IntegerField('Seasonality', default=12)
first_level_shift = DecimalField('First level shift value', rounding=None, places=3, default=0.1)
second_level_shift = DecimalField('Second level shift value', rounding=None, places=3, default=0.2)
distance = StringField('Distance threshold', default='default')
duration = IntegerField('Duration', default=24)
alpha = DecimalField('Alpha', rounding=None, places=2, default=0.05)
save_ts_to_csv = SelectField('Save time series to file (time_series.csv)', choices=[(False, 'No'), (True, 'Yes')], default=False)
save_result_to_csv = SelectField('Save result to file (DBEST_result.csv)', choices=[(False, 'No'), (True, 'Yes')], default=False)
submit = SubmitField('Submit')
class PolyTrendParametersForm(FlaskForm):
#selecting time series
date_description = TextAreaField(u'For MODIS no earlier than 2000-03-01, for GIMMS 1981-07-01')
dataset_name = SelectField('Dataset', choices=[("NASA/GIMMS/3GV0", "GIMMS 8000m"), ("MODIS/006/MOD13Q1_NDVI", "MODIS NDVI 250 m"),
("MODIS/006/MOD13Q1_EVI", "MODIS EVI 250m")], validators=[DataRequired()])
user_dataset_name = StringField('Own dataset/ GEE asset')
date_from = DateField('Date from', format='%d-%m-%Y', validators=[DataRequired()])
date_to = DateField('Date to', format='%d-%m-%Y', validators=[DataRequired()])
coordinates = DecimalField('Coordinates', validators=[DataRequired()])
#PolyTrend parameters
alpha = DecimalField('Alpha', rounding=None, places=2, default=0.05)
save_ts_to_csv = SelectField('Save time series to file (time_series.csv)', choices=[(False, 'No'), (True, 'Yes')], default=False)
save_result_to_csv = SelectField('Save result to file PolyTrend_result.csv', choices=[(False, 'No'), (True, 'Yes')], default=False)
submit = SubmitField('Submit')
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for Kraus quantum channel representation class."""
import unittest
import numpy as np
from numpy.testing import assert_allclose
from qiskit import QiskitError
from qiskit.quantum_info.states import DensityMatrix
from qiskit.quantum_info import Kraus
from .channel_test_case import ChannelTestCase
class TestKraus(ChannelTestCase):
"""Tests for Kraus channel representation."""
def test_init(self):
"""Test initialization"""
# Initialize from unitary
chan = Kraus(self.UI)
assert_allclose(chan.data, [self.UI])
self.assertEqual(chan.dim, (2, 2))
# Initialize from Kraus
chan = Kraus(self.depol_kraus(0.5))
assert_allclose(chan.data, self.depol_kraus(0.5))
self.assertEqual(chan.dim, (2, 2))
# Initialize from Non-CPTP
kraus_l, kraus_r = [self.UI, self.UX], [self.UY, self.UZ]
chan = Kraus((kraus_l, kraus_r))
assert_allclose(chan.data, (kraus_l, kraus_r))
self.assertEqual(chan.dim, (2, 2))
# Initialize with redundant second op
chan = Kraus((kraus_l, kraus_l))
assert_allclose(chan.data, kraus_l)
self.assertEqual(chan.dim, (2, 2))
# Initialize from rectangular
kraus = [np.zeros((4, 2))]
chan = Kraus(kraus)
assert_allclose(chan.data, kraus)
self.assertEqual(chan.dim, (2, 4))
# Wrong input or output dims should raise exception
self.assertRaises(
QiskitError, Kraus, kraus, input_dims=4, output_dims=4)
def test_circuit_init(self):
"""Test initialization from a circuit."""
circuit, target = self.simple_circuit_no_measure()
op = Kraus(circuit)
target = Kraus(target)
self.assertEqual(op, target)
def test_circuit_init_except(self):
"""Test initialization from circuit with measure raises exception."""
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Kraus, circuit)
def test_equal(self):
"""Test __eq__ method"""
kraus = [self.rand_matrix(2, 2) for _ in range(2)]
self.assertEqual(Kraus(kraus), Kraus(kraus))
def test_copy(self):
"""Test copy method"""
mat = np.eye(4)
orig = Kraus(mat)
cpy = orig.copy()
cpy._data[0][0][0, 0] = 0.0
self.assertFalse(cpy == orig)
def test_is_cptp(self):
"""Test is_cptp method."""
self.assertTrue(Kraus(self.depol_kraus(0.5)).is_cptp())
self.assertTrue(Kraus(self.UX).is_cptp())
# Non-CPTP should return false
self.assertFalse(Kraus(([self.UI], [self.UX])).is_cptp())
self.assertFalse(Kraus(([self.UI, self.UX])).is_cptp())
def test_conjugate(self):
"""Test conjugate method."""
kraus_l, kraus_r = self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)
# Single Kraus list
targ = Kraus([np.conjugate(k) for k in kraus_l])
chan1 = Kraus(kraus_l)
chan = chan1.conjugate()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (2, 4))
# Double Kraus list
targ = Kraus(([np.conjugate(k) for k in kraus_l],
[np.conjugate(k) for k in kraus_r]))
chan1 = Kraus((kraus_l, kraus_r))
chan = chan1.conjugate()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (2, 4))
def test_transpose(self):
"""Test transpose method."""
kraus_l, kraus_r = self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)
# Single Kraus list
targ = Kraus([np.transpose(k) for k in kraus_l])
chan1 = Kraus(kraus_l)
chan = chan1.transpose()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
# Double Kraus list
targ = Kraus(([np.transpose(k) for k in kraus_l],
[np.transpose(k) for k in kraus_r]))
chan1 = Kraus((kraus_l, kraus_r))
chan = chan1.transpose()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
def test_adjoint(self):
"""Test adjoint method."""
kraus_l, kraus_r = self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)
# Single Kraus list
targ = Kraus([np.transpose(k).conj() for k in kraus_l])
chan1 = Kraus(kraus_l)
chan = chan1.adjoint()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
# Double Kraus list
targ = Kraus(([np.transpose(k).conj() for k in kraus_l],
[np.transpose(k).conj() for k in kraus_r]))
chan1 = Kraus((kraus_l, kraus_r))
chan = chan1.adjoint()
self.assertEqual(chan, targ)
self.assertEqual(chan.dim, (4, 2))
def test_compose_except(self):
"""Test compose different dimension exception"""
self.assertRaises(QiskitError,
Kraus(np.eye(2)).compose, Kraus(np.eye(4)))
self.assertRaises(QiskitError, Kraus(np.eye(2)).compose, 2)
def test_compose(self):
"""Test compose method."""
# Random input test state
rho = DensityMatrix(self.rand_rho(2))
# UnitaryChannel evolution
chan1 = Kraus(self.UX)
chan2 = Kraus(self.UY)
chan = chan1.compose(chan2)
targ = rho @ Kraus(self.UZ)
self.assertEqual(rho @ chan, targ)
# 50% depolarizing channel
chan1 = Kraus(self.depol_kraus(0.5))
chan = chan1.compose(chan1)
targ = rho @ Kraus(self.depol_kraus(0.75))
self.assertEqual(rho @ chan, targ)
# Compose different dimensions
kraus1, kraus2 = self.rand_kraus(2, 4, 4), self.rand_kraus(4, 2, 4)
chan1 = Kraus(kraus1)
chan2 = Kraus(kraus2)
targ = rho @ chan1 @ chan2
chan = chan1.compose(chan2)
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(rho @ chan, targ)
chan = chan1 @ chan2
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(rho @ chan, targ)
def test_dot(self):
"""Test dot method."""
# Random input test state
rho = DensityMatrix(self.rand_rho(2))
# UnitaryChannel evolution
chan1 = Kraus(self.UX)
chan2 = Kraus(self.UY)
targ = rho.evolve(Kraus(self.UZ))
self.assertEqual(rho.evolve(chan1.dot(chan2)), targ)
self.assertEqual(rho.evolve(chan1 * chan2), targ)
# 50% depolarizing channel
chan1 = Kraus(self.depol_kraus(0.5))
targ = rho @ Kraus(self.depol_kraus(0.75))
self.assertEqual(rho.evolve(chan1.dot(chan1)), targ)
self.assertEqual(rho.evolve(chan1 * chan1), targ)
# Compose different dimensions
kraus1, kraus2 = self.rand_kraus(2, 4, 4), self.rand_kraus(4, 2, 4)
chan1 = Kraus(kraus1)
chan2 = Kraus(kraus2)
targ = rho @ chan1 @ chan2
self.assertEqual(rho.evolve(chan2.dot(chan1)), targ)
self.assertEqual(rho.evolve(chan2 * chan1), targ)
def test_compose_front(self):
"""Test deprecated front compose method."""
# Random input test state
rho = DensityMatrix(self.rand_rho(2))
# UnitaryChannel evolution
chan1 = Kraus(self.UX)
chan2 = Kraus(self.UY)
chan = chan1.compose(chan2, front=True)
targ = rho @ Kraus(self.UZ)
self.assertEqual(rho @ chan, targ)
# 50% depolarizing channel
chan1 = Kraus(self.depol_kraus(0.5))
chan = chan1.compose(chan1, front=True)
targ = rho @ Kraus(self.depol_kraus(0.75))
self.assertEqual(rho @ chan, targ)
# Compose different dimensions
kraus1, kraus2 = self.rand_kraus(2, 4, 4), self.rand_kraus(4, 2, 4)
chan1 = Kraus(kraus1)
chan2 = Kraus(kraus2)
targ = rho @ chan1 @ chan2
chan = chan2.compose(chan1, front=True)
self.assertEqual(chan.dim, (2, 2))
self.assertEqual(rho @ chan, targ)
def test_expand(self):
"""Test expand method."""
rho0, rho1 = np.diag([1, 0]), np.diag([0, 1])
rho_init = DensityMatrix(np.kron(rho0, rho0))
chan1 = Kraus(self.UI)
chan2 = Kraus(self.UX)
# X \otimes I
chan = chan1.expand(chan2)
rho_targ = DensityMatrix(np.kron(rho1, rho0))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init @ chan, rho_targ)
# I \otimes X
chan = chan2.expand(chan1)
rho_targ = DensityMatrix(np.kron(rho0, rho1))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init @ chan, rho_targ)
# Completely depolarizing
chan_dep = Kraus(self.depol_kraus(1))
chan = chan_dep.expand(chan_dep)
rho_targ = DensityMatrix(np.diag([1, 1, 1, 1]) / 4)
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init @ chan, rho_targ)
def test_tensor(self):
"""Test tensor method."""
rho0, rho1 = np.diag([1, 0]), np.diag([0, 1])
rho_init = DensityMatrix(np.kron(rho0, rho0))
chan1 = Kraus(self.UI)
chan2 = Kraus(self.UX)
# X \otimes I
chan = chan2.tensor(chan1)
rho_targ = DensityMatrix(np.kron(rho1, rho0))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init @ chan, rho_targ)
# I \otimes X
chan = chan1.tensor(chan2)
rho_targ = DensityMatrix(np.kron(rho0, rho1))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init @ chan, rho_targ)
# Completely depolarizing
chan_dep = Kraus(self.depol_kraus(1))
chan = chan_dep.tensor(chan_dep)
rho_targ = DensityMatrix(np.diag([1, 1, 1, 1]) / 4)
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(rho_init @ chan, rho_targ)
def test_power(self):
"""Test power method."""
# 10% depolarizing channel
rho = DensityMatrix(np.diag([1, 0]))
p_id = 0.9
chan = Kraus(self.depol_kraus(1 - p_id))
# Compose 3 times
p_id3 = p_id**3
chan3 = chan.power(3)
targ3a = rho @ chan @ chan @ chan
self.assertEqual(rho @ chan3, targ3a)
targ3b = rho @ Kraus(self.depol_kraus(1 - p_id3))
self.assertEqual(rho @ chan3, targ3b)
def test_power_except(self):
"""Test power method raises exceptions."""
chan = Kraus(self.depol_kraus(0.9))
# Non-integer power raises error
self.assertRaises(QiskitError, chan.power, 0.5)
def test_add(self):
"""Test add method."""
# Random input test state
rho = DensityMatrix(self.rand_rho(2))
kraus1, kraus2 = self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)
# Random Single-Kraus maps
chan1 = Kraus(kraus1)
chan2 = Kraus(kraus2)
targ = (rho @ chan1) + (rho @ chan2)
chan = chan1._add(chan2)
self.assertEqual(rho @ chan, targ)
chan = chan1 + chan2
self.assertEqual(rho @ chan, targ)
# Random Single-Kraus maps
chan = Kraus((kraus1, kraus2))
targ = 2 * (rho @ chan)
chan = chan._add(chan)
self.assertEqual(rho @ chan, targ)
def test_subtract(self):
"""Test subtract method."""
# Random input test state
rho = DensityMatrix(self.rand_rho(2))
kraus1, kraus2 = self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)
# Random Single-Kraus maps
chan1 = Kraus(kraus1)
chan2 = Kraus(kraus2)
targ = (rho @ chan1) - (rho @ chan2)
chan = chan1 - chan2
self.assertEqual(rho @ chan, targ)
# Random Single-Kraus maps
chan = Kraus((kraus1, kraus2))
targ = 0 * (rho @ chan)
chan = chan - chan
self.assertEqual(rho @ chan, targ)
def test_multiply(self):
"""Test multiply method."""
# Random initial state and Kraus ops
rho = DensityMatrix(self.rand_rho(2))
val = 0.5
kraus1, kraus2 = self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)
# Single Kraus set
chan1 = Kraus(kraus1)
targ = val * (rho @ chan1)
chan = chan1._multiply(val)
self.assertEqual(rho @ chan, targ)
chan = val * chan1
self.assertEqual(rho @ chan, targ)
# Double Kraus set
chan2 = Kraus((kraus1, kraus2))
targ = val * (rho @ chan2)
chan = chan2._multiply(val)
self.assertEqual(rho @ chan, targ)
chan = val * chan2
self.assertEqual(rho @ chan, targ)
def test_multiply_except(self):
"""Test multiply method raises exceptions."""
chan = Kraus(self.depol_kraus(1))
self.assertRaises(QiskitError, chan._multiply, 's')
self.assertRaises(QiskitError, chan.__rmul__, 's')
self.assertRaises(QiskitError, chan._multiply, chan)
self.assertRaises(QiskitError, chan.__rmul__, chan)
def test_negate(self):
"""Test negate method"""
rho = DensityMatrix(np.diag([1, 0]))
targ = DensityMatrix(np.diag([-0.5, -0.5]))
chan = -Kraus(self.depol_kraus(1))
self.assertEqual(rho @ chan, targ)
if __name__ == '__main__':
unittest.main()
|
from django import forms
from django.core.exceptions import ValidationError
from django.db import models
from django.dispatch import receiver
from django.utils.functional import cached_property
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from phonenumber_field.modelfields import PhoneNumberField
from wagtail.admin.edit_handlers import (FieldPanel, InlinePanel,
MultiFieldPanel, PageChooserPanel,
StreamFieldPanel)
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page
from wagtail.core.signals import page_published
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.search import index
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.snippets.models import register_snippet
from headlesspreview.models import HeadlessPreviewMixin
from tbx.core.blocks import StoryBlock
from tbx.core.models import ContactFields, RelatedLink
class PersonPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('people.PersonPage', related_name='related_links')
class PersonPage(HeadlessPreviewMixin, Page, ContactFields):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
role = models.CharField(max_length=255, blank=True)
is_senior = models.BooleanField(default=False)
short_intro = models.TextField(blank=True, null=True)
alt_short_intro = models.TextField(blank=True, null=True)
intro = RichTextField(blank=True)
biography = RichTextField(blank=True)
short_biography = models.CharField(
max_length=255, blank=True,
help_text='A shorter summary biography for including in other pages'
)
image = models.ForeignKey(
'torchbox.TorchboxImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
feed_image = models.ForeignKey(
'torchbox.TorchboxImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
search_fields = Page.search_fields + [
index.SearchField('first_name'),
index.SearchField('last_name'),
index.SearchField('intro'),
index.SearchField('biography'),
]
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('first_name'),
FieldPanel('last_name'),
FieldPanel('role'),
FieldPanel('is_senior'),
FieldPanel('short_intro', classname="full"),
FieldPanel('alt_short_intro', classname="full"),
FieldPanel('intro', classname="full"),
FieldPanel('biography', classname="full"),
FieldPanel('short_biography', classname="full"),
ImageChooserPanel('image'),
MultiFieldPanel(ContactFields.panels, "Contact"),
InlinePanel('related_links', label="Related links"),
]
promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
# Person index
class PersonIndexPage(HeadlessPreviewMixin, Page):
strapline = models.CharField(max_length=255)
@cached_property
def people(self):
return PersonPage.objects.exclude(is_senior=True).live().public()
@cached_property
def senior_management(self):
return PersonPage.objects.exclude(is_senior=False).live().public()
content_panels = Page.content_panels + [
FieldPanel('strapline', classname="full"),
]
class CulturePageLink(Orderable):
page = ParentalKey('people.CulturePage', related_name='links')
title = models.TextField()
description = models.TextField()
link = models.ForeignKey('wagtailcore.Page', on_delete=models.CASCADE, blank=True, null=True)
panels = [
FieldPanel('title', classname="full"),
FieldPanel('description', classname="full"),
PageChooserPanel('link')
]
class CulturePage(HeadlessPreviewMixin, Page):
strapline = models.TextField()
strapline_visible = models.BooleanField(
help_text='Hide strapline visually but leave it readable by screen '
'readers.'
)
hero_image = models.ForeignKey(
'torchbox.TorchboxImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
intro = RichTextField(blank=True)
body = StreamField(StoryBlock())
contact = models.ForeignKey('people.Contact', on_delete=models.SET_NULL, null=True, blank=True, related_name='+')
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('strapline', classname="full"),
FieldPanel('strapline_visible'),
ImageChooserPanel('hero_image'),
FieldPanel('intro', classname="full"),
InlinePanel('links', label='Link'),
StreamFieldPanel('body'),
SnippetChooserPanel('contact'),
]
# An author snippet which keeps a copy of a person's details in case they leave and their page is unpublished
# Could also be used for external authors
@register_snippet
class Author(index.Indexed, models.Model):
person_page = models.OneToOneField('people.PersonPage', on_delete=models.SET_NULL, null=True, blank=True, related_name='+')
name = models.CharField(max_length=255, blank=True)
role = models.CharField(max_length=255, blank=True)
image = models.ForeignKey(
'torchbox.TorchboxImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def update_manual_fields(self, person_page):
self.name = person_page.title
self.role = person_page.role
self.image = person_page.image
def clean(self):
if not self.person_page and not self.name:
raise ValidationError({'person_page': "You must set either 'Person page' or 'Name'"})
if self.person_page:
self.update_manual_fields(self.person_page)
def __str__(self):
return self.name
search_fields = [
index.SearchField('name'),
]
panels = [
PageChooserPanel('person_page'),
MultiFieldPanel([
FieldPanel('name'),
FieldPanel('role'),
ImageChooserPanel('image'),
], "Manual fields"),
]
@receiver(page_published, sender=PersonPage)
def update_author_on_page_publish(instance, **kwargs):
author, created = Author.objects.get_or_create(person_page=instance)
author.update_manual_fields(instance)
author.save()
@register_snippet
class Contact(index.Indexed, models.Model):
name = models.CharField(max_length=255, blank=True)
role = models.CharField(max_length=255, blank=True)
image = models.ForeignKey(
'torchbox.TorchboxImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
email_address = models.EmailField()
phone_number = PhoneNumberField()
default_contact = models.BooleanField(default=False, blank=True, null=True, unique=True)
def __str__(self):
return self.name
search_fields = [
index.SearchField('name'),
]
panels = [
FieldPanel('name'),
FieldPanel('role'),
FieldPanel('default_contact', widget=forms.CheckboxInput),
ImageChooserPanel('image'),
FieldPanel('email_address'),
FieldPanel('phone_number'),
]
class ContactReason(Orderable):
page = ParentalKey('people.ContactReasonsList', related_name='reasons')
title = models.CharField(max_length=255, blank=False)
description = models.TextField(blank=False)
@register_snippet
class ContactReasonsList(ClusterableModel):
name = models.CharField(max_length=255, blank=True)
heading = models.TextField(blank=False)
is_default = models.BooleanField(default=False, blank=True, null=True)
def __str__(self):
return self.name
panels = [
FieldPanel('name'),
FieldPanel('heading'),
FieldPanel('is_default', widget=forms.CheckboxInput),
InlinePanel('reasons', label='Reasons', max_num=3)
]
def clean(self):
if self.is_default:
qs = ContactReasonsList.objects.filter(is_default=True)
if self.pk:
qs = qs.exclude(pk=self.pk)
if qs.exists():
raise ValidationError({
'is_default': [
'There already is another default snippet.',
],
})
|
#!/usr/bin/env/python
"""
Usage:
prepare_data_train.py [options]
Options:
-h --help Show this screen.
--data_path FILE Path to data file containing pairs of molecules
--dataset_name NAME Name of dataset (for use in output file naming)
--save_dir NAME Path to save directory
--reverse If true, add pairs in both orders
"""
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
from rdkit import Chem
from rdkit.Chem import rdmolops
from rdkit.Chem import rdFMCS
import glob
import json
import numpy as np
from utils import bond_dict, dataset_info, need_kekulize, to_graph, to_graph_mol, graph_to_adj_mat
import utils
import pickle
import random
from docopt import docopt
from align_molecules import align_smiles_by_MCS_it
dataset = 'zinc'
def read_file(file_path, reverse=False):
with open(file_path, 'r') as f:
lines = f.readlines()
num_lines = len(lines)
data = []
for i, line in enumerate(lines):
toks = line.strip().split()
if len(toks) == 1:
smiles_1, smiles_2 = toks[0], toks[0]
reverse=False # If only one molecule, don't allow pair in both orders
else:
smiles_1, smiles_2 = toks[0], toks[1]
data.append({'smiles_1': smiles_1, 'smiles_2': smiles_2})
if reverse:
data.append({'smiles_1': smiles_2, 'smiles_2': smiles_1})
if i % 2000 ==0:
print('Finished reading: %d / %d' % (i, num_lines), end='\r')
print('Finished reading: %d / %d' % (num_lines, num_lines))
return data
def preprocess(raw_data, dataset, name, save_dir=''):
print('Parsing smiles as graphs.')
processed_data = []
fails = 0
total = len(raw_data)
for i, (smiles_1, smiles_2) in enumerate([(mol['smiles_1'], mol['smiles_2']) for mol in raw_data]):
(mol_in, mol_out), _, nodes_to_keep = align_smiles_by_MCS_it(smiles_1, smiles_2)
if mol_out == []:
fails +=1
continue
nodes_in, edges_in = to_graph_mol(mol_in, dataset)
nodes_out, edges_out = to_graph_mol(mol_out, dataset)
if min(len(edges_in), len(edges_out)) <= 0:
fails +=1
continue
processed_data.append({
'graph_in': edges_in,
'graph_out': edges_out,
'node_features_in': nodes_in,
'node_features_out': nodes_out,
'smiles_out': smiles_2,
'smiles_in': smiles_1,
'v_to_keep': nodes_to_keep,
})
if i % 500 == 0:
print('Processed: %d / %d' % (i, total), end='\r')
print('Processed: %d / %d' % (total, total))
if fails >0:
print("Failed %d molecules" % fails)
print("Saving data.")
with open(save_dir+'molecules_%s.json' % name, 'w') as f:
json.dump(processed_data, f)
print('Length raw data: \t%d' % total)
print('Length processed data: \t%d' % len(processed_data))
if __name__ == "__main__":
# Parse args
args = docopt(__doc__)
reverse = args.get('--reverse')
if args.get('--data_path') and args.get('--dataset_name'):
data_paths = [args.get('--data_path')]
names = [args.get('--dataset_name')]
else:
data_paths = ['data_zinc_dekois_train.smi', 'data_zinc_dekois_valid.smi', 'data_zinc_dude_train.smi', 'data_zinc_dude_valid.smi']
names = ['zinc_dekois_train', 'zinc_dekois_valid', 'zinc_dude_train', 'zinc_dude_valid']
reverse=True
if args.get('--save_dir'):
save_dir = args.get('--save_dir')
else:
save_dir = ''
for data_path, name in zip(data_paths, names):
print("Preparing %s" % data_path)
raw_data = read_file(data_path, reverse=reverse)
preprocess(raw_data, dataset, name, save_dir=save_dir)
|
from flask import render_template, url_for, flash, redirect, request
from app.forms import RegistrationForm, LoginForm, TransacoesForm, ClientForm, ProductForm, MenuForm
from app import app, psycopg2
from app import cursor
import xml.etree.ElementTree as ET
from lxml import etree
from io import StringIO, BytesIO
IsUserLoggedIn = False
AdminNameLoggedIn = ""
@app.route('/')
@app.route('/home')
def home():
global IsUserLoggedIn
global AdminNameLoggedIn
return render_template("layout/home.html", title='Home', IsUserLoggedIn=IsUserLoggedIn,
AdminNameLoggedIn=AdminNameLoggedIn)
@app.route('/stats')
def stats():
global IsUserLoggedIn, result, xml
global AdminNameLoggedIn
try:
cursor.callproc('getTopTransacoes_XML')
xml = cursor.fetchall()
root = ET.fromstring(xml[0][0])
return render_template("stats/stats.html", title='Estatísticas', IsUserLoggedIn=IsUserLoggedIn,
AdminNameLoggedIn=AdminNameLoggedIn, result=root)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
@app.route('/register', methods=['GET', 'POST'])
def register():
global IsUserLoggedIn, resultverifica, resultinsert
global AdminNameLoggedIn
form = RegistrationForm()
if form.validate_on_submit():
try:
cursor.callproc('verificaAdministrador',
[form.username.data, form.email.data, form.password.data, form.nome_restaurante.data])
resultverifica = cursor.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
print(resultverifica[0][0])
if (resultverifica[0][0]):
try:
cursor.callproc('createAdministrador',
[form.username.data, form.email.data, form.password.data, form.nome_restaurante.data])
resultinsert = cursor.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
print(resultinsert[0][0])
if resultinsert[0][0]:
flash(f'Account created!', 'success')
else:
flash(f'Account already exists!', 'danger')
return redirect(url_for('login'))
return render_template("layout/register.html", title='Register', form=form, IsUserLoggedIn=IsUserLoggedIn)
@app.route('/login', methods=['GET', 'POST'])
def login():
global IsUserLoggedIn
global AdminNameLoggedIn
form = LoginForm()
if form.validate_on_submit():
try:
cursor.callproc('verificarLogin', [form.email.data, form.password.data])
result = cursor.fetchall()
print(result[0][0])
IsUserLoggedIn = result
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
if (IsUserLoggedIn[0][0]):
AdminNameLoggedIn = form.email.data
try:
print(" Transacoes : sent " + AdminNameLoggedIn)
cursor.callproc('getAllTransacoes', [AdminNameLoggedIn])
alltransacoes = cursor.fetchall()
print("Transacoes got from bd")
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
return render_template('transacoes/tab_transacoes.html', title="Transações",
IsUserLoggedIn=IsUserLoggedIn, alltransacoes=alltransacoes)
else:
flash(f'Login unsuccessful', 'danger')
IsUserLoggedIn = False
return render_template("layout/login.html", title='Login', form=form, IsUserLoggedIn=IsUserLoggedIn)
return render_template("layout/login.html", title='Login', form=form)
@app.route("/logout")
def logout():
global IsUserLoggedIn
IsUserLoggedIn = False
return redirect(url_for('home'))
@app.route("/tab_transacoes")
def tab_transacoes():
global IsUserLoggedIn
global AdminNameLoggedIn
alltransacoes = None
print('Value of logged in = ')
print(IsUserLoggedIn)
try:
print(" Transacoes : sent " + AdminNameLoggedIn)
cursor.callproc('getAllTransacoes', [AdminNameLoggedIn])
alltransacoes = cursor.fetchall()
print("Transacoes got from bd")
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
return render_template('transacoes/tab_transacoes.html', title="Transações",
IsUserLoggedIn=IsUserLoggedIn, alltransacoes=alltransacoes)
@app.route("/new_transacoes", methods=['GET', 'POST'])
def new_transacoes():
global IsUserLoggedIn
global AdminNameLoggedIn
form = TransacoesForm()
if form.validate_on_submit():
try:
print("HELLO IT HAPPENED")
print(" create Transacoes : sent " + AdminNameLoggedIn, 1)
cursor.callproc('createTransacao',
[AdminNameLoggedIn, int(form.title.data), int(form.lugar.data), int(form.valor.data),
int(form.carne.data), int(form.peixe.data), int(form.entrada.data), int(form.bebida.data),
int(form.sobremesa.data)])
alltransacoes = cursor.fetchall()
print("Transacoes got from bd")
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
flash(f'Done!', 'success')
return redirect(url_for('tab_transacoes'))
return render_template('transacoes/new_transacoes.html', title="Nova Transação", form=form,
legend='Registar a nova transação', IsUserLoggedIn=IsUserLoggedIn)
@app.route("/tab_transacoes/<int:post_id>")
def transacoes(post_id):
global IsUserLoggedIn, transac
global AdminNameLoggedIn
try:
cursor.callproc('getTransacao', [post_id])
transac = cursor.fetchall()
print(transac)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
return render_template('transacoes/transacoes.html', transac=transac[0], IsUserLoggedIn=IsUserLoggedIn)
@app.route('/transacoes/<int:post_id>/delete', methods=['POST'])
def delete_transacoes(post_id):
global IsUserLoggedIn
global AdminNameLoggedIn
try:
cursor.callproc('deletetransacao', [post_id])
transac = cursor.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
flash(f'Delete successfull', 'info')
return redirect(url_for('tab_transacoes'))
@app.route("/tab_client")
def tab_client():
global IsUserLoggedIn, result
global AdminNameLoggedIn
try:
cursor.callproc('getallClientesFromRestaurante', [AdminNameLoggedIn])
result = cursor.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
return render_template('client/tab_client.html', posts=result, title="Clientes", IsUserLoggedIn=IsUserLoggedIn)
@app.route("/new_client", methods=['GET', 'POST'])
def new_client():
global IsUserLoggedIn
global AdminNameLoggedIn
form = ClientForm()
if form.validate_on_submit():
try:
print(" Nome cliente enviado : " + form.NomeCliente.data)
cursor.callproc('createCliente', [form.NomeCliente.data, AdminNameLoggedIn])
result = cursor.fetchall()
print(result)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
flash(f'Done!', 'success')
return redirect(url_for('tab_client'))
return render_template('client/new_client.html', title="Novo Cliente", form=form,
legend='Registrar novo cliente no seu restaurante', IsUserLoggedIn=IsUserLoggedIn)
@app.route("/tab_client/<int:post_id>")
def client(post_id):
global IsUserLoggedIn, client
global AdminNameLoggedIn
try:
cursor.callproc('getclientes', [post_id])
client = cursor.fetchall()
print(client)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
return render_template('client/client.html', client=client[0], IsUserLoggedIn=IsUserLoggedIn)
@app.route('/client/<int:post_id>/delete', methods=['POST'])
def delete_client(post_id):
global IsUserLoggedIn
global AdminNameLoggedIn
try:
cursor.callproc('deletecartao', [AdminNameLoggedIn, post_id])
client = cursor.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
flash(f'Delete successfull', 'info')
return redirect(url_for('tab_client'))
@app.route("/tab_menu")
def tab_menu():
global IsUserLoggedIn, ementa, domingo, quinta, quarta, sabado, segunda, sexta, terca
global AdminNameLoggedIn
try:
print("EMENTA : DOMINGO : ")
cursor.callproc('getAllEmentas', [AdminNameLoggedIn, 0])
domingo = cursor.fetchall()
print(domingo)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
try:
print("EMENTA : SEGUNDA : ")
cursor.callproc('getAllEmentas', [AdminNameLoggedIn, 1])
segunda = cursor.fetchall()
print(segunda)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
try:
print("EMENTA : TERÇA : ")
cursor.callproc('getAllEmentas', [AdminNameLoggedIn, 2])
terca = cursor.fetchall()
print(terca)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
try:
print("EMENTA : QUARTA : ")
cursor.callproc('getAllEmentas', [AdminNameLoggedIn, 3])
quarta = cursor.fetchall()
print(quarta)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
try:
print("EMENTA : QUINTA : ")
cursor.callproc('getAllEmentas', [AdminNameLoggedIn, 4])
quinta = cursor.fetchall()
print(quinta)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
try:
print("EMENTA : SEXTA : ")
cursor.callproc('getAllEmentas', [AdminNameLoggedIn, 5])
sexta = cursor.fetchall()
print(sexta)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
try:
print("EMENTA : SABADO : ")
cursor.callproc('getAllEmentas', [AdminNameLoggedIn, 6])
sabado = cursor.fetchall()
print(sabado)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
return render_template('menu/tab_menu.html', posts=domingo, segunda=segunda, terca=terca, quarta=quarta,
quinta=quinta, sexta=sexta, sabado=sabado, title="Ementas", IsUserLoggedIn=IsUserLoggedIn)
@app.route("/new_menu", methods=['GET', 'POST'])
def new_menu():
global IsUserLoggedIn
global AdminNameLoggedIn
form = MenuForm()
if form.validate_on_submit():
try:
print(" ementa enviado : " + AdminNameLoggedIn)
cursor.callproc('createEmenta',
[AdminNameLoggedIn, int(form.dia.data), int(form.carne.data), int(form.peixe.data),
int(form.entrada.data), int(form.bebida.data), int(form.sobremesa.data)])
result = cursor.fetchall()
print(result)
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
flash(f'Done!', 'success')
return redirect(url_for('tab_menu'))
return render_template('menu/new_menu.html', title="Ementa", form=form,
legend='Editar a ementa do dia desta semana',
IsUserLoggedIn=IsUserLoggedIn)
@app.route("/tab_product")
def tab_product():
global IsUserLoggedIn, result
global AdminNameLoggedIn
try:
cursor.callproc('getAllProdutos')
result = cursor.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
return render_template('product/tab_product.html', title="Produtos", IsUserLoggedIn=IsUserLoggedIn,
result=result)
@app.route("/new_product", methods=['GET', 'POST'])
def new_product():
global IsUserLoggedIn
global AdminNameLoggedIn
form = ProductForm()
if form.validate_on_submit():
try:
cursor.callproc('createProduto',
[int(form.tipo.data), form.nome.data, form.designacao.data, int(form.preco.data),
form.alergia.data, int(form.quantidade.data)])
result = cursor.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
flash(f'Done!', 'success')
return redirect(url_for('tab_product'))
return render_template('product/new_product.html', title="Novo produto", form=form, legend='Registar novo produto',
IsUserLoggedIn=IsUserLoggedIn)
@app.route("/product/<int:post_id>", methods=['GET', 'POST'])
def product(post_id):
global IsUserLoggedIn, resultprod
global AdminNameLoggedIn
try:
cursor.callproc('getAllProdutos')
resultprod = cursor.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
form = ProductForm()
if form.validate_on_submit():
try:
cursor.callproc('updateProduto',[post_id,form.nome.data,form.designacao.data,form.preco.data,form.alergia.data,form.quantidade.data])
updateprod = cursor.fetchall()
print("ON UPDATE:"+str(updateprod))
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
flash(f'Changes were updated successfully', 'success')
return redirect(url_for('tab_product'))
elif request.method == 'GET':
try:
cursor.callproc('getProduto',[post_id])
getprod = cursor.fetchall()
print("GET PROD:"+str(getprod))
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
form.choosetipo = [('0', 'Entradas'), ('1', 'Bebidas'), ('2', 'Sobremesas'), ('3', 'Carne'), ('4', 'Peixe')]
form.nome.data = getprod[0][2]
form.designacao.data = getprod[0][3]
form.alergia.data = getprod[0][5]
form.preco.data = getprod[0][4]
form.quantidade.data = getprod[0][6]
form.tipo.data = getprod[0][1]
return render_template('product/update_product.html', title="Editar produto", form=form,
legend='Editar produto', IsUserLoggedIn=IsUserLoggedIn, post_id=post_id)
@app.route('/product/<int:post_id>/delete', methods=['POST'])
def delete_product(post_id):
global IsUserLoggedIn
global AdminNameLoggedIn
try:
print("del here")
cursor.callproc('deleteProduto', [post_id])
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
flash(f'Delete successfull', 'info')
return redirect(url_for('tab_product'))
|
import json
import frappe
from frappe.utils.logger import get_logger
from datetime import timedelta, datetime
from frappe.utils import cint, cstr
from frappe import _
from spine.spine_adapter.redis_client.redis_client import submit_for_execution
module_name = __name__
logger = None
def date_diff_in_Seconds(dt2, dt1):
timedelta = dt2 - dt1
return timedelta.days * 24 * 3600 + timedelta.seconds
def poll_and_process_error_messages():
"""
Scheduled method that looks for any error messages to be processed with retry timelineconfigured.The messages will be picked based on it next schedule.
Currently only 5 messages are picked up for processing to ensure too many messages are not picked up. Messages are sorted by time they were received at, to
ensure processing in same order as receipt. However, this order is not guaranteed and handler implementations
should not assume this order.
:return: None
"""
global logger
get_module_logger()
config = frappe.get_cached_doc("Spine Consumer Config", "Spine Consumer Config").as_dict()
# Number of messages to pick up on one call.
window_size = config.get("msg_window_size")
if not window_size:
window_size = 5
messages = frappe.get_all("Message Log", filters={"status": "Error", "direction": "Received"},
fields=["*"], order_by="retrying_at", limit_page_length=window_size)
if messages and len(messages) > 0:
## Filter all messages from the messages list based on next retry timestamp
## Processing messages for only whose next retry timestamp have passed
logger.debug("Found {} error messages".format(len(messages)))
print("pending msgs found: {0}".format(len(messages)))
updated_msgs = []
for msg in messages:
print("name: {0}".format(msg.get("name")))
print("current date: {0}".format(datetime.now()))
print("retrying at: {0}".format(msg.get("retrying_at")))
print("time diff: {0}".format(date_diff_in_Seconds(datetime.now(), msg.get('retrying_at'))))
if date_diff_in_Seconds(datetime.now(), msg.get('retrying_at')) > 0:
# messages.remove(msg)
# else:
# Update status for all messages picked up for processing. This will ensure that later scheduled tasks will
# not pick up same messages.
updated_msgs.append(update_message_status(msg, "Processing"))
# Commit updates
frappe.db.commit()
for msg in updated_msgs:
status = None
retries_left = cint(msg.get("retries_left")) - 1
retry = json.loads(msg.get("retrying_timeline"))
if retries_left >= 0:
logger.debug("Processing new message log - {} of type {}".format(msg, type(msg)))
msg_value = msg.get("json_message")
logger.debug("Processing new Message - {}".format(msg_value))
# Process synchronously without submitting to redis queue. This can be changed later.
process_success = process_message(msg_value)
if process_success:
status = "Processed"
else:
status = "Error"
else:
status = "Failed"
update_message_status(msg, status, retry=retry, retries_left=retries_left)
# Commit DB updates.
frappe.db.commit()
else:
logger.info("SpineConsumer: No messages found for processing.")
def get_module_logger():
global logger
if logger is not None :
return logger
else:
logger = get_logger(module_name, with_more_info=False)
return logger
def preprocess_msg(msg):
if msg and type(msg).__name__ == "bytes":
msg = msg.decode("utf-8")
logger.debug("Message converted from bytes to string")
try:
#msg_dict = json.loads(msg)
msg_dict = msg
while type(msg_dict) is str:
msg_dict = json.loads(msg_dict)
logger.debug("Payload converted to dict - {} with type {}".format(msg_dict, type(msg_dict)))
except:
msg_dict = msg
logger.debug("Payload could not be converted to dict")
frappe.log_error(title="Message could not be converted")
return msg_dict
def filter_handlers_for_event(msg, conf):
global logger
get_module_logger()
if msg:
logger.debug("Event payload is - {}. Type - {}".format(msg, type(msg)))
doctype = None
msg_dict = msg
try:
client_id = None
doctype = None
logger.debug("msg_dict type - {}, msg_dict.get-Header - {}".format(type(msg_dict), msg_dict.get("Header")))
if msg_dict.get("Header") and msg_dict.get("Header").get("DocType"):
doctype = msg_dict.get("Header").get("DocType")
client_id = msg_dict.get("Header").get("Origin")
logger.debug(
"Header - {}. Doctype - {}".format(msg_dict.get("Header"), msg_dict.get("Header").get("DocType")))
logger.debug("DocType found from message - {}".format(doctype))
logger.debug("Client ID found from message - {}".format(client_id))
logger.debug("Own Client ID from Kafka Config - {}".format(conf.get("client.id")))
if client_id != conf.get("client.id"):
handlers = get_consumer_handlers(doctype)
else:
logger.info("Ignoring self generated message as client id is same in message and local configuration.")
handlers = []
except:
handlers = []
frappe.log_error(title="Spine Handler Lookup Error")
logger.debug("Found handlers for doctype {} = {}".format(doctype, handlers))
return handlers
def process_message(msg_value, queue = None):
global logger
get_module_logger()
process_success = False
consumer_conf = frappe.get_cached_doc("Spine Consumer Config", "Spine Consumer Config").as_dict()
# Send the received message for processing on the background queue for async execution using redis
# queue. This ensures that current event loop thread remains available for more incoming requests.
msg_value = preprocess_msg(msg_value)
handlers_to_enqueue = filter_handlers_for_event(msg_value, consumer_conf)
# if not handlers_to_enqueue or len(handlers_to_enqueue) == 0:
# # Submit a default logging handler to ensure event is logged for analysis
# handlers_to_enqueue = ["spine.spine_adapter.kafka_client.kafka_async_consumer.log_doctype_event"]
# provision for retrying.
# To Enable, capture the retry payload sent from handler function to set status of message and value for next retry.
if handlers_to_enqueue and len(handlers_to_enqueue) > 0:
# Process only if there are any handlers available. If not, do not commit.
for handler in handlers_to_enqueue:
try:
logger.info("Loading {} handler".format(handler))
func = frappe.get_attr(handler)
# If queue is provided, use async execution, else execute synchronously
if queue:
try:
submit_for_execution(queue, func, msg_value)
process_success = True
except:
frappe.log_error(title="Consumer Async Handler {} Submission Error".format(handler))
else:
try:
func(msg_value)
process_success = True
except Exception as exc:
frappe.log_error(title="Consumer Message Handler {} Error".format(handler))
logger.debug("Enqueued {} for processing of event.".format(handler))
except:
frappe.log_error(
title="Could not load handler {}. Ignoring.".format(handler))
else:
# No handlers defined. Consider this as success scenario.
process_success = True
logger.info("No handlers defined for doctype in the message - {}".format(msg_value.get("Header")))
return process_success
def get_consumer_handlers(doctype):
handlers = []
frappe.connect()
try:
# config = frappe.get_single("SpineConsumerConfig")
logger.debug("Retrieving configurations")
config = frappe.get_cached_doc("Spine Consumer Config", "Spine Consumer Config").as_dict()
logger.debug("Retrieved consumer configuration - {}".format(config))
configs = config.get("configs")
logger.debug("Retrieved configurations - {}".format(configs))
if configs:
logger.debug("Checking {} configurations".format(len(configs)))
for spine_config in configs:
logger.debug("Comparing spine config {}:{} with doctype {}".format(spine_config.document_type,
spine_config.event_handler, doctype))
if spine_config.document_type == doctype and spine_config.event_handler:
logger.debug("Found handlers - {}".format(spine_config.event_handler))
# value is expected to be comma separated list of handler functions
handlers = [x.strip() for x in spine_config.event_handler.split(',')]
except:
logger.debug("Error occurred while trying to get handlers for doctype {}.".format(doctype))
frappe.log_error(title="Could not get handlers for doctype {}".format(doctype))
pass
logger.debug("Found handlers - {} for doctype - {}".format(handlers, doctype))
return handlers
def update_message_status(msg_doc, status, retry=None, retries_left=None):
if status == 'Error':
retry_index = len(retry) - retries_left
if retry_index != len(retry):
retrying_at = datetime.strptime(retry[retry_index], '%Y-%m-%d %H:%M:%S')
msg_doc.update({"doctype": "Message Log", "status": status, "retrying_at": retrying_at, "retries_left": retries_left})
else:
status = 'Failed'
msg_doc.update({"doctype": "Message Log", "status": status, "retries_left": retries_left})
send_mail_for_failed_messages(msg_doc)
else:
msg_doc.update({"doctype": "Message Log", "status": status})
updated_msg = frappe.get_doc(msg_doc).save()
return updated_msg
def send_mail_for_failed_messages(msg_doc):
msg_doc = frappe.get_doc(msg_doc).as_dict()
json_message = json.loads(msg_doc.get("json_message"))
recipients = ["amit.anand@elastic.run"]
frappe.sendmail(recipients=recipients,
subject=_("Syncing of Message failed for document {0}".format(json_message.get("Header").get("DocType"))),
message="Please check with the support team.",
header=['Sync Failed Notification', 'red']
)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
__abs_file__ = os.path.abspath(__file__)
__abs_file_path__ = os.path.split(__abs_file__)[0]
default="lang_region"
all_files = [entry for entry in os.listdir(__abs_file_path__)
if entry !='__init__.py' and entry != '__pycache__' and not entry.endswith('~')]
all_conventions = [os.path.splitext(os.path.basename(entry))[0] for entry in all_files]
environment_creators = {}
for convention in all_conventions:
module = importlib.import_module('.'+convention, package='convention')
if 'test_environments' in dir(module):
logging.info('registering convention "{}"'.format(convention))
environment_creators[convention] = module.test_environments
def generate_environments(requested_conventions, files):
all_environments = []
for convention in requested_conventions:
create_fn = environment_creators.get(convention, None)
if create_fn is None:
raise ValueError('convention "{}" not implemented'.format(convention))
try:
all_environments.extend(create_fn(files))
except Exception as ex:
raise ValueError('could not create test environments for convention "{}": {}'.format(convention, repr(ex)))
return all_environments
|
import pygame
import game
class Snake:
def __init__(self, game_w, surface, length):
self.game = game_w
self.surface = surface
self.length = length
self.length_add = 0
self.direction = 'right'
# get resources
self.body_block = pygame.image.load("resources/tile.png")
self.head_block = pygame.image.load("resources/tile_head.png")
self.move_sound = pygame.mixer.Sound("resources/sound_01.mp3")
self.eat_sound = pygame.mixer.Sound("resources/sound_02.mp3")
# build body by length
self.body = []
for i in range(self.length):
self.body.append((0, i))
def move(self, where):
if where == 'right':
if self.direction != 'left':
self.direction = 'right'
elif where == 'left':
if self.direction != 'right':
self.direction = 'left'
elif where == 'up':
if self.direction != 'down':
self.direction = 'up'
elif where == 'down':
if self.direction != 'up':
self.direction = 'down'
def walk(self):
# update body
body_pop = self.body.pop(0)
if self.length_add > 0:
self.length_add -= 1
self.body.insert(0, body_pop)
else:
body_pos = self.game.locate_xy(body_pop)
pygame.draw.rect(self.surface, game.TILE_COLOR, pygame.Rect(body_pos[0], body_pos[1], 20, 20))
# update head
head_p = self.body[len(self.body) - 1]
# print('head is {0}'.format(head_p))
# print(self.body)
head_x = head_p[1]
head_y = head_p[0]
if self.direction == 'left':
head_x -= 1
if self.direction == 'right':
head_x += 1
if self.direction == 'up':
head_y -= 1
if self.direction == 'down':
head_y += 1
# check for out of bounds
if head_x > 31:
head_x = 0
elif head_x < 0:
head_x = 31
if head_y > 17:
head_y = 0
elif head_y < 0:
head_y = 17
# add new part and draw
self.body.append((head_y, head_x))
self.draw()
pygame.mixer.Sound.play(self.move_sound)
def draw(self):
for i in range(len(self.body)):
body_p = self.body[i]
xy = self.game.locate_xy(body_p)
# only for head
if i == len(self.body) - 1:
if self.direction == 'down':
blit_rotate_center(self.surface, self.head_block, (xy[0], xy[1]), 90)
elif self.direction == 'up':
blit_rotate_center(self.surface, self.head_block, (xy[0], xy[1]), -90)
elif self.direction == 'right':
blit_rotate_center(self.surface, self.head_block, (xy[0], xy[1]), 180)
elif self.direction == 'left':
self.surface.blit(self.head_block, (xy[0], xy[1]))
# rest of the body
else:
self.surface.blit(self.body_block, (xy[0], xy[1]))
pygame.display.flip()
def increase_length(self, bait):
pygame.mixer.Sound.play(self.eat_sound)
self.length += bait.award
self.length_add = bait.award
def bait_hit(self, bait_position):
if bait_position in self.body:
return True
else:
return False
def body_hit(self):
if len(self.body) != len(set(self.body)):
return True
else:
return False
def blit_rotate_center(surf, image, top_left, angle):
rotated_image = pygame.transform.rotate(image, angle)
new_rect = rotated_image.get_rect(center=image.get_rect(topleft=top_left).center)
surf.blit(rotated_image, new_rect)
|
#!/usr/bin/env python
import os
import sys
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'www.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
import pandas as pd
import numpy as np
def hazard_single(gm_prob: pd.Series, rec_prob: pd.Series):
"""
Calculates the exceedance probability given the specified
ground motion probabilities and rupture recurrence rates
Note: All ruptures specified in gm_prob have to exist
in rec_prob
Parameters
----------
gm_prob: pd.Series
The ground motion probabilities
format: index = rupture_name, values = probability
rec_prob: pd.Series
The recurrence probabilities of the ruptures
format: index = rupture_name, values = probability
Returns
-------
float
The exceedance probability
"""
ruptures = gm_prob.index.values
return np.sum(gm_prob[ruptures] * rec_prob[ruptures])
def hazard_curve(gm_prob_df: pd.DataFrame, rec_prob: pd.Series):
"""
Calculates the exceedance probabilities for the
specified IM values (via the gm_prob_df)
Note: All ruptures specified in gm_prob_df have to exist
in rec_prob
Parameters
----------
gm_prob_df: pd.DataFrame
The ground motion probabilities for every rupture
for every IM level.
format: index = rupture_name, columns = IM_levels
rec_prob: pd.Series
The recurrence probabilities of the ruptures
format: index = rupture_name, values = probability
Returns
-------
pd.Series
The exceedance probabilities for the different IM levels
format: index = IM_levels, values = exceedance probability
"""
data = np.sum(
gm_prob_df.values * rec_prob[gm_prob_df.index.values].values.reshape(-1, 1),
axis=0,
)
return pd.Series(index=gm_prob_df.columns.values, data=data)
|
import torch
from torch import nn
def _mean_abs_cosine_similarity(A):
denom = (A * A).sum(1, keepdim=True).sqrt()
B = A.mm(A.T) / (denom * denom.T)
penalty = B.triu(diagonal=1).abs().sum() / ((len(A) * (len(A) - 1)) / 2)
return penalty
class CosinePenalty():
def __init__(self, weight=1e-4):
self.weight = weight
def __call__(self, model):
penalty = 0.
for p in model.parameters():
if len(p.shape) > 1:
penalty += _mean_abs_cosine_similarity(p.flatten(1))
penalty *= self.weight
return penalty
if __name__ == "__main__":
from norm_resnet import resnet18
net = resnet18()
for p in net.parameters():
print(p[0])
break
cosine_penalty = CosinePenalty(weight=1.0)
for i in range(10):
penalty = cosine_penalty(net)
penalty.backward()
with torch.no_grad():
for p in net.parameters():
if p.grad is not None:
p -= p.grad.data
# for p in net.parameters():
# print(p[0])
# break
print(penalty)
pass
|
from __future__ import print_function
import os, sys
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import math
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from absl import flags
FLAGS = flags.FLAGS
class ARCH_celeba():
def __init__(self):
print("Creating CelebA architectures for base cases ")
return
def generator_model_celeba(self):
# init_fn = tf.random_normal_initializer(mean=0.0, stddev=0.05, seed=None)
init_fn = tf.keras.initializers.glorot_uniform()
init_fn = tf.function(init_fn, autograph=False)
inputs = tf.keras.Input(shape = (self.noise_dims,))
dec1 = tf.keras.layers.Dense(int(self.output_size/16)*int(self.output_size/16)*1024, kernel_initializer=init_fn, use_bias=False)(inputs)
dec1 = tf.keras.layers.LeakyReLU()(dec1)
un_flat = tf.keras.layers.Reshape([int(self.output_size/16),int(self.output_size/16),1024])(dec1) #4x4x1024
deconv1 = tf.keras.layers.Conv2DTranspose(512, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=init_fn)(un_flat) #8x8x512
deconv1 = tf.keras.layers.BatchNormalization()(deconv1)
deconv1 = tf.keras.layers.LeakyReLU()(deconv1)
deconv2 = tf.keras.layers.Conv2DTranspose(256, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=init_fn)(deconv1) #16x16x256
deconv2 = tf.keras.layers.BatchNormalization()(deconv2)
deconv2 = tf.keras.layers.LeakyReLU()(deconv2)
deconv4 = tf.keras.layers.Conv2DTranspose(128, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=init_fn)(deconv2) #32x32x128
deconv4 = tf.keras.layers.BatchNormalization()(deconv4)
deconv4 = tf.keras.layers.LeakyReLU()(deconv4)
out = tf.keras.layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=init_fn, activation = 'sigmoid')(deconv4) #64x64x3
model = tf.keras.Model(inputs = inputs, outputs = out)
return model
def discriminator_model_celeba(self):
# init_fn = tf.random_normal_initializer(mean=0.0, stddev=0.05, seed=None)
init_fn = tf.keras.initializers.glorot_uniform()
init_fn = tf.function(init_fn, autograph=False)
model = tf.keras.Sequential() #64x64x3
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=init_fn,input_shape=[self.output_size, self.output_size, 3])) #32x32x64
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=init_fn)) #16x16x128
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same', kernel_initializer=init_fn)) #8x8x256
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2D(512, (5, 5), strides=(2, 2), padding='same', kernel_initializer=init_fn)) #4x4x512
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Flatten()) #8192x1
model.add(layers.Dense(1)) #1x1
return model
def CelebA_Classifier(self):
self.FID_model = tf.keras.applications.inception_v3.InceptionV3(include_top=False, pooling='avg', weights='imagenet', input_tensor=None, input_shape=(80,80,3), classes=1000)
def FID_celeba(self):
def data_reader_faces(filename):
with tf.device('/CPU'):
print(tf.cast(filename[0],dtype=tf.string))
image_string = tf.io.read_file(tf.cast(filename[0],dtype=tf.string))
# Don't use tf.image.decode_image, or the output shape will be undefined
image = tf.image.decode_jpeg(image_string, channels=3)
image.set_shape([218,178,3])
image = tf.image.crop_to_bounding_box(image, 38, 18, 140,140)
image = tf.image.resize(image,[80,80])
# This will convert to float values in [0, 1]
image = tf.divide(image,255.0)
image = tf.scalar_mul(2.0,image)
image = tf.subtract(image,1.0)
# image = tf.image.convert_image_dtype(image, tf.float16)
return image
if self.FID_load_flag == 0:
### First time FID call setup
self.FID_load_flag = 1
if self.testcase in ['bald', 'hat']:
self.fid_train_images_names = self.fid_train_images
else:
random_points = tf.keras.backend.random_uniform([self.FID_num_samples], minval=0, maxval=int(self.fid_train_images.shape[0]), dtype='int32', seed=None)
print(random_points)
self.fid_train_images_names = self.fid_train_images[random_points]
## self.fid_train_images has the names to be read. Make a dataset with it
self.fid_image_dataset = tf.data.Dataset.from_tensor_slices(self.fid_train_images_names)
self.fid_image_dataset = self.fid_image_dataset.map(data_reader_faces,num_parallel_calls=int(self.num_parallel_calls))
self.fid_image_dataset = self.fid_image_dataset.batch(self.fid_batch_size)
self.CelebA_Classifier()
with tf.device(self.device):
for image_batch in self.fid_image_dataset:
noise = tf.random.normal([self.fid_batch_size, self.noise_dims],self.noise_mean, self.noise_stddev)
preds = self.generator(noise, training=False)
preds = tf.image.resize(preds, [80,80])
preds = tf.scalar_mul(2.,preds)
preds = tf.subtract(preds,1.0)
preds = preds.numpy()
act1 = self.FID_model.predict(image_batch)
act2 = self.FID_model.predict(preds)
try:
self.act1 = np.concatenate((self.act1,act1), axis = 0)
self.act2 = np.concatenate((self.act2,act2), axis = 0)
except:
self.act1 = act1
self.act2 = act2
self.eval_FID()
return
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, argparse
parser = argparse.ArgumentParser(description='Used to match IP addresses to their subnet ¯\_(ツ)_/¯')
parser.add_argument('-i', '--input', nargs='?',
help="Specifies the input file")
parser.add_argument("-o", "--output",
type=argparse.FileType('w'), nargs='?', const=sys.stdout,
help="Specifies the output file")
args = parser.parse_args()
if(args.output is not None):
sys.stdout = args.output
inputfile = args.input
def main():
with open(inputfile, 'r') if (args.input is not None) \
else open(raw_input("Enter the path of the file you want to check: "),'r') as fp:
print("{:20} {:^20} {:>20}".format("Source IP","Destination Subnet","In Subnet?"))
for line in fp:
ip1, ip2 = map(str, line.split(","))
ip2 = ip2.rstrip("\n\r")
cidr = int((ip2.rsplit('/', 1))[1]) #split off the CIDR notation
ip2 = ip2.split('/', 1)[0] #split off dest IP
binip1 = bin(int(ip1,0))[2:].zfill(32) #conv hex src IP to bin and pad with zero
binip2 = ''.join([bin(int(x)+256)[3:] for x in ip2.split('.')]) #conv dest IP dotdec to bin
bincidr = bin(0xffffffff >> (32-cidr) << (32-cidr))[2:] #conv CIDR notation to bin
dotcidr = dotConvert(bincidr)
dotip1 = dotConvert(binip1)
netid1 = netCalc(bincidr,binip1)
netid2 = netCalc(bincidr,binip2)
if (netid1 == netid2) and (args.output is not None):
print("{:<20} {:^20} {:>17}".format(dotip1, dotcidr, " Yes "))
elif (netid1) == (netid2):
print("{:<20} {:^20} {:>31}".format(dotip1, dotcidr, "\x1b[0;32;40m Yes \x1b[0m"))
elif (netid1 != netid2) and (args.output is not None):
print("{:<20} {:^20} {:>16}".format(dotip1, dotcidr, "No"))
else:
print("\x1b[0;31;40m{:<20} {:^20} {:>15} \x1b[0m".format(dotip1, dotcidr, "No"))
def dotConvert(binaddr): #used to convert binary addresss to dotdec
x = 8
binlist = [binaddr[i: i + x] for i in range(0, len(binaddr), x)]
dotdec = '.'.join([str(int(num, 2)) for num in binlist])
return dotdec;
def netCalc(cidr, addr): #used to perform bitwise AND to calc netmask
cidr = int(cidr, 2)
addr = int(addr, 2)
netid = cidr & addr
return netid;
if __name__ == '__main__':
main()
|
import json
import threading
import requests as requests
import homeassistant.homeassistant
import homeassistant.light
import services.alarms
def check_if_lights_are_on_but_not_home(minutes, lights_id, person_id, mobile_app_id):
if not homeassistant.homeassistant.is_home(person_id):
if homeassistant.light.is_on(lights_id):
homeassistant.homeassistant.send_notification(mobile_app_id,
'Les lumières sont allumées.',
'Une ou plusieurs lampes sont toujours allumées alors que vous n\'êtes pas à la maison...',
action1=['TURNOFFLIGHTS', 'Éteindre'])
timer = threading.Timer(minutes * 60, check_if_lights_are_on_but_not_home,
[minutes, lights_id, person_id, mobile_app_id])
timer.start()
def check_if_there_is_an_alarm(minutes, person_id):
if homeassistant.homeassistant.is_home(person_id):
if services.alarms.check():
# play song
print("MUSIC MAESTRO")
timer = threading.Timer(minutes * 60, check_if_there_is_an_alarm, [minutes, person_id])
timer.start()
def check_temperature(minutes, entity_id, mobile_app_id):
entity_state = homeassistant.homeassistant.get_state(entity_id).state
temperature = int(float(entity_state['state']))
if temperature >= 85:
friendly_name = entity_state['attributes']['friendly_name']
homeassistant.homeassistant.send_notification(mobile_app_id,
'La température est trop haute',
'La température de ' + friendly_name + ' est trop haute (' +
str(temperature) + '°C)!')
timer = threading.Timer(minutes * 60, check_temperature, [minutes, entity_id])
timer.start()
def check_if_eth_miner_is_offline(minutes, mobile_app_id):
json_data = json.loads(
requests.get("https://api.ethermine.org/miner/1C169a48601EC3D342Be36A26F5B387DC8d2155C/dashboard").text)
active_workers = json_data['data']['currentStatistics']['activeWorkers']
if active_workers == 1:
homeassistant.homeassistant.send_notification(mobile_app_id,
'Un PC de minage est éteint.',
'Un PC ne mine plus d\'ETH, à controller au plus vite.')
elif active_workers == 0:
homeassistant.homeassistant.send_notification(mobile_app_id,
'Les PC de minage sont éteint.',
'Les PC ne minent plus d\'ETH, à controller immédiatement!')
timer = threading.Timer(minutes * 60, check_if_eth_miner_is_offline, [minutes])
timer.start()
def register():
check_if_lights_are_on_but_not_home(60, 'light.lumieres_chambre', 'person.mathieu', 'mobile_app_oneplus_8t')
check_if_there_is_an_alarm(1, 'person.mathieu')
check_if_eth_miner_is_offline(20, 'mobile_app_oneplus_8t')
check_temperature(2, 'sensor.processor_temperature', 'mobile_app_oneplus_8t')
check_temperature(2, 'sensor.tour_mathieu_amd_ryzen_7_3700x_temperatures_cpu_package', 'mobile_app_oneplus_8t')
check_temperature(2, 'sensor.tour_mathieu_nvidia_nvidia_geforce_rtx_3070_temperatures_gpu_core',
'mobile_app_oneplus_8t')
|
from typing import Optional
from dexp.processing.filters.sobel_filter import sobel_filter
from dexp.processing.utils.blend_images import blend_images
from dexp.processing.utils.element_wise_affine import element_wise_affine
from dexp.processing.utils.fit_shape import fit_to_shape
from dexp.utils import xpArray
from dexp.utils.backends import Backend, NumpyBackend
def fuse_tg_nd(
image_a: xpArray,
image_b: xpArray,
downscale: Optional[int] = 2,
sharpness: Optional[float] = 24,
tenengrad_smoothing: Optional[int] = 4,
blend_map_smoothing: Optional[int] = 10,
bias_axis: Optional[int] = None,
bias_exponent: Optional[float] = 3,
bias_strength: Optional[float] = 2,
clip: Optional[bool] = True,
internal_dtype=None,
_display_blend_map: bool = False,
):
"""
Fuses two images by picking regions from one or the other image based on the local image quality
measured by using the magnitude of the Sobel gradient -- similarly as in the Tenengrad focus metric.
A smooth blend map is computed that blends the two images based on local image quality. A bias can be
introduced to favor one side of an axis versus another.
Parameters
----------
image_a : First image to fuse
image_b : Second image to fuse
downscale : How much to downscale the two images in order to compute the blend map.
A value of 2 is good to achieve both coarse denoising and reduce compute load.
sharpness : How 'sharp' should be the choice between the two images.
A large value makes sure that most of the time the voxel values of one or the other image
are picked with very little mixing even if local image quality between .
tenengrad_smoothing : How much to smooth the tenengrad map
blend_map_smoothing : How much to smooth the blend map
bias_axis : Along which axis should a bias be introduced in the blend map. None for no bias.
bias_exponent : Exponent for the bias
bias_strength : Bias strength -- zero means no bias
clip : clip output to input images min and max values.
internal_dtype : dtype for internal computation
_display_blend_map : For debugging purposes, we can display the images to fuse, the blend map and result.
Returns
-------
"""
xp = Backend.get_xp_module()
sp = Backend.get_sp_module()
if not image_a.shape == image_b.shape:
raise ValueError("Arrays must have the same shape")
if not image_a.dtype == image_b.dtype:
raise ValueError("Arrays must have the same dtype")
if internal_dtype is None:
internal_dtype = image_a.dtype
if type(Backend.current()) is NumpyBackend:
internal_dtype = xp.float32
original_dtype = image_a.dtype
image_a = Backend.to_backend(image_a, dtype=internal_dtype)
image_b = Backend.to_backend(image_b, dtype=internal_dtype)
min_a, max_a = xp.min(image_a), xp.max(image_a)
min_b, max_b = xp.min(image_b), xp.max(image_b)
min_value = min(min_a, min_b)
max_value = min(max_a, max_b)
del min_a, max_a, min_b, max_b
# downscale to speed up computation and reduce noise
d_image_a = sp.ndimage.interpolation.zoom(image_a, zoom=1 / downscale, order=0)
d_image_b = sp.ndimage.interpolation.zoom(image_b, zoom=1 / downscale, order=0)
# Denoise further:
d_image_a = sp.ndimage.gaussian_filter(d_image_a, sigma=1.5)
d_image_b = sp.ndimage.gaussian_filter(d_image_b, sigma=1.5)
# Compute Tenengrad filter:
t_image_a = sobel_filter(d_image_a, exponent=1, normalise_input=False, in_place_normalisation=True)
t_image_b = sobel_filter(d_image_b, exponent=1, normalise_input=False, in_place_normalisation=True)
del d_image_a, d_image_b
# Apply maximum filter:
t_image_a = sp.ndimage.maximum_filter(t_image_a, size=tenengrad_smoothing)
t_image_b = sp.ndimage.maximum_filter(t_image_b, size=tenengrad_smoothing)
# Apply smoothing filter:
t_image_a = sp.ndimage.uniform_filter(t_image_a, size=max(1, tenengrad_smoothing))
t_image_b = sp.ndimage.uniform_filter(t_image_b, size=max(1, tenengrad_smoothing))
# Normalise:
t_min_value = min(xp.min(t_image_a), xp.min(t_image_b))
t_max_value = max(xp.max(t_image_a), xp.max(t_image_b))
alpha = (1 / (t_max_value - t_min_value)).astype(internal_dtype)
beta = (-t_min_value / (t_max_value - t_min_value)).astype(internal_dtype)
t_image_a = element_wise_affine(t_image_a, alpha, beta, out=t_image_a)
t_image_b = element_wise_affine(t_image_b, alpha, beta, out=t_image_b)
del t_min_value, t_max_value
# Add bias:
if bias_axis is not None and bias_strength != 0:
length = t_image_a.shape[bias_axis]
bias_vector = xp.linspace(-1, 1, num=length)
bias_vector = xp.sign(bias_vector) * (xp.absolute(bias_vector) ** bias_exponent)
new_shape = tuple(s if i == bias_axis else 1 for i, s in enumerate(t_image_a.shape))
bias_vector = xp.reshape(bias_vector, newshape=new_shape)
t_image_a -= bias_strength * bias_vector
t_image_b += bias_strength * bias_vector
del bias_vector
# compute the absolute difference and sign:
diff = t_image_a
diff -= t_image_b
del t_image_b
sgn_diff = xp.sign(diff)
abs_diff = xp.absolute(diff, out=diff)
abs_diff **= 1 / sharpness
del diff
# compute blending map:
blend_map = abs_diff
blend_map *= sgn_diff
blend_map = element_wise_affine(blend_map, 0.5, 0.5, out=blend_map)
del sgn_diff
# Upscale blending map back to original size:
blend_map = sp.ndimage.zoom(blend_map, zoom=downscale, order=1)
# Padding to recover original image size:
blend_map = fit_to_shape(blend_map, shape=image_a.shape)
# Smooth blend map to have less seams:
blend_map = sp.ndimage.uniform_filter(blend_map, size=blend_map_smoothing)
# Fuse using blending map:
image_fused = blend_images(image_a, image_b, blend_map)
if not _display_blend_map:
del image_a, image_b, blend_map
if clip:
image_fused = xp.clip(image_fused, min_value, max_value, out=image_fused)
# Adjust type:
image_fused = image_fused.astype(original_dtype, copy=False)
if _display_blend_map:
from napari import Viewer, gui_qt
with gui_qt():
def _c(array):
return Backend.to_numpy(array)
viewer = Viewer()
viewer.add_image(_c(image_a), name="image_a", contrast_limits=(0, 600))
viewer.add_image(_c(image_b), name="image_b", contrast_limits=(0, 600))
viewer.add_image(_c(blend_map), name="blend_map")
viewer.add_image(_c(image_fused), name="image_fused", contrast_limits=(0, 600))
return image_fused
|
import os
import pickle
from matplotlib import pyplot as plt
from matplotlib import rc
import settings
from compute import _compute_profile_data
from gtfspy.routing.node_profile_analyzer_time_and_veh_legs import NodeProfileAnalyzerTimeAndVehLegs
from util import get_data_or_compute
rc("text", usetex=True)
recompute = False
# change to settings.ITAKESKUS_STOP_ID to see the opposite direction:
destination_stop_id = settings.AALTO_UNIVERSITY_ID
destination_stop_I = settings.get_stop_I_by_stop_id(destination_stop_id)
# some setting up of filenames and origin stops
if destination_stop_id is settings.AALTO_UNIVERSITY_ID:
origin_stop_id = settings.ITAKESKUS_ID
profile_fname_prefix = "itakeskus_to_aalto"
ax1_title = 'From It\\"akeskus to Aalto University'
else:
origin_stop_id = settings.AALTO_UNIVERSITY_ID
profile_fname_prefix = "aalto_to_itakeskus"
ax1_title = 'From Aalto University to It\\"akeskus'
origin_stop_I = settings.get_stop_I_by_stop_id(origin_stop_id)
# Computing the profile and caching some of the data
profile_fname = os.path.join(settings.RESULTS_DIRECTORY, profile_fname_prefix + "_profile.pickle")
try:
if recompute:
raise RuntimeError("Recomputing!")
print(profile_fname)
profile = pickle.load(open(profile_fname, 'rb'))
except:
fname = os.path.join(settings.RESULTS_DIRECTORY, "long_profiles_" + profile_fname_prefix + ".pkl")
params = {
"targets": [destination_stop_I],
"routing_start_time_dep": settings.DAY_START,
"routing_end_time_dep": settings.DAY_END
}
data = get_data_or_compute(fname, _compute_profile_data, recompute=recompute, **params)
print(data["params"])
profiles = data["profiles"]
profile = profiles[origin_stop_I]
pickle.dump(profile, open(profile_fname, 'wb'), -1)
# Spawn an analyzer object, and plot the boarding-count-augmented temporal distance profile
npa = NodeProfileAnalyzerTimeAndVehLegs(profile,
settings.DAY_START + 6 * 3600,
settings.DAY_START + 21 * 3600)
fig = plt.figure(figsize=(11, 3.5))
subplot_grid = (1, 8)
ax1 = plt.subplot2grid(subplot_grid, (0, 0), colspan=6, rowspan=1)
npa.plot_new_transfer_temporal_distance_profile(timezone=settings.TIMEZONE,
format_string="%H:%M",
plot_journeys=False,
ax=ax1,
highlight_fastest_path=False,
default_lw=1.5,
ncol_legend=1)
ax1.set_ylabel("Temporal distance $\\tau$")
ax1.set_title(ax1_title)
ax1.set_xlabel("Departure time $t_{\\text{dep}}$ (min)")
ax1.set_ylabel("Temporal distance $\\tau$ (min)")
# Plot the boarding-count-augmented temporal distance distribution:
ax2 = plt.subplot2grid(subplot_grid, (0, 6), colspan=2, rowspan=1)
ax2 = npa.plot_temporal_distance_pdf_horizontal(use_minutes=True,
ax=ax2,
legend_font_size=11)
ax1.set_ylim(0, 80)
ax2.set_ylim(0, 80)
ax2.set_xticks([0.00, 0.05, 0.1])
ax2.set_ylabel("Temporal distance $\\tau$ (min)")
ax2.set_xlabel("Probability density $P(\\tau)$")
for ax, letter, x in zip([ax1, ax2], "AB", [0.01, 0.04]):
ax2.text(x, 0.98,
"\\textbf{" + letter + "}",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
fontsize=15,
color="black")
fig.tight_layout()
fig.savefig(os.path.join(settings.FIGS_DIRECTORY, "long_profile_with_transfers_" + profile_fname_prefix + ".pdf"))
# Print some statistics:
print("max_temporal_distance: ", npa.max_temporal_distance() / 60.0)
print("min_temporal_distance: ", npa.min_temporal_distance() / 60.0)
print("mean_temporal_distance: ", npa.mean_temporal_distance() / 60.0)
print("mean_temporal_distance_with_min_n_boardings: ", npa.mean_temporal_distance_with_min_n_boardings() / 60.0)
time_diff = npa.mean_temporal_distance_with_min_n_boardings() / 60.0 - npa.mean_temporal_distance() / 60.0
print("difference in mean t: ", time_diff)
print("mean_n_boardings: ", npa.mean_n_boardings_on_shortest_paths())
boarding_diff = npa.mean_n_boardings_on_shortest_paths() - npa.min_n_boardings()
print("difference in boardings: ", boarding_diff)
print("gain per boarding: ", time_diff / boarding_diff)
# Show the plot:
plt.show()
|
import numpy as np
import tensorflow as tf
class ImgPreprocess():
def __init__(self,is_training):
self.is_training=is_training
def img_resize(self, img_tensor, gtboxes_and_label, target_shortside_len, length_limitation):
'''
:param img_tensor:[h, w, c], gtboxes_and_label:[-1, 5]. gtboxes: [xmin, ymin, xmax, ymax]
:param target_shortside_len:
:param length_limitation: set max length to avoid OUT OF MEMORY
:return:
'''
def max_length_limitation(length, length_limitation):
return tf.cond(tf.less(length, length_limitation),
true_fn=lambda: length,
false_fn=lambda: length_limitation)
img_h,img_w=tf.shape(img_tensor)[0],tf.shape(img_tensor)[1]
img_new_h, img_new_w = tf.cond(tf.less(img_h, img_w),
true_fn=lambda: (target_shortside_len,
max_length_limitation(target_shortside_len * img_w // img_h,
length_limitation)),
false_fn=lambda: (
max_length_limitation(target_shortside_len * img_h // img_w, length_limitation),
target_shortside_len))
img_tensor = tf.expand_dims(img_tensor, axis=0)
img_tensor = tf.image.resize_bilinear(img_tensor, [img_new_h, img_new_w])
if gtboxes_and_label is not None and self.is_training:
xmin, ymin, xmax, ymax, label = tf.unstack(gtboxes_and_label, axis=1)
new_xmin, new_ymin = xmin * img_new_w // img_w, ymin * img_new_h // img_h
new_xmax, new_ymax = xmax * img_new_w // img_w, ymax * img_new_h // img_h
img_tensor = tf.squeeze(img_tensor, axis=0)
if gtboxes_and_label is not None and self.is_training:
return img_tensor, tf.transpose(tf.stack([new_xmin, new_ymin, new_xmax, new_ymax, label], axis=0))
else:
return img_tensor
def random_flip_left_right(self,img_tensor, gtboxes_and_label):
def flip_left_to_right(img_tensor, gtboxes_and_label):
h, w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
img_tensor = tf.image.flip_left_right(img_tensor)
xmin, ymin, xmax, ymax, label = tf.unstack(gtboxes_and_label, axis=1)
new_xmax = w - xmin
new_xmin = w - xmax
return img_tensor, tf.transpose(tf.stack([new_xmin, ymin, new_xmax, ymax, label], axis=0))
img_tensor, gtboxes_and_label= tf.cond(tf.less(tf.random.uniform(shape=[], minval=0, maxval=1), 0.5),
lambda: flip_left_to_right(img_tensor, gtboxes_and_label),
lambda: (img_tensor, gtboxes_and_label))
return img_tensor, gtboxes_and_label
if __name__ == '__main__':
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import tensorflow as tf
test=ImgPreprocess(is_training=True)
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3])
output=test.img_resize(img_plac,None,10,100)
with tf.Session() as sess:
img_out=sess.run(output, feed_dict={img_plac:np.random.uniform(low=0,high=255,size=(12,12,3))})
print(img_out)
|
from .models import *
from .module import *
from .tools import *
from .utils import *
|
import windows.rpc
from windows.rpc import ndr
class PLSAPR_OBJECT_ATTRIBUTES(ndr.NdrStructure):
MEMBERS = [ndr.NdrLong,
ndr.NdrUniquePTR(ndr.NdrWString),
ndr.NdrUniquePTR(ndr.NdrLong), # We dont care of the subtype as we will pass None
ndr.NdrLong,
ndr.NdrUniquePTR(ndr.NdrLong), # We dont care of the subtype as we will pass None
ndr.NdrUniquePTR(ndr.NdrLong)] # We dont care of the subtype as we will pass None
## From: RPCVIEW
# long Proc44_LsarOpenPolicy2(
# [in][unique][string] wchar_t* arg_0,
# [in]struct Struct_364_t* arg_1,
# [in]long arg_2,
# [out][context_handle] void** arg_3);
# This function has a [out][context_handle] meaning it return a context_handle
# Context handle are represented by 5 NdrLong where the first one is always 0
# PythonForWindows represent context_handle using NdrContextHandle
class LsarOpenPolicy2Parameter(ndr.NdrParameters):
MEMBERS = [ndr.NdrUniquePTR(ndr.NdrWString),
PLSAPR_OBJECT_ATTRIBUTES,
ndr.NdrLong]
## From: RPCVIEW
# long Proc2_LsarEnumeratePrivileges(
# [in][context_handle] void* arg_0,
# [in][out]long *arg_1,
# [out]struct Struct_110_t* arg_2,
# [in]long arg_3);
# This function has a [in][context_handle] meaning it expect a context_handle
# We can pass the NdrContextHandle returned by Proc44_LsarOpenPolicy2
class LsarEnumeratePrivilegesParameter(ndr.NdrParameters):
MEMBERS = [ndr.NdrContextHandle,
ndr.NdrLong,
ndr.NdrLong]
class LSAPR_POLICY_PRIVILEGE_DEF(object):
@classmethod
def unpack(cls, stream):
size1 = ndr.NdrShort.unpack(stream)
ptr = ndr.NdrShort.unpack(stream)
size2 = ndr.NdrLong.unpack(stream)
luid = ndr.NdrHyper.unpack(stream)
return ptr, luid
class LSAPR_PRIVILEGE_ENUM_BUFFER(object):
@classmethod
def unpack(cls, stream):
entries = ndr.NdrLong.unpack(stream)
array_size = ndr.NdrLong.unpack(stream)
array_ptr = ndr.NdrLong.unpack(stream)
# Unpack pointed array
array_size2 = ndr.NdrLong.unpack(stream)
assert array_size == array_size2
x = []
# unpack each elements LSAPR_POLICY_PRIVILEGE_DEF
for i in range(array_size2):
ptr, luid = LSAPR_POLICY_PRIVILEGE_DEF.unpack(stream)
if ptr:
x.append(luid)
# unpack pointed strings
result = []
for luid in x:
name = ndr.NdrWcharConformantVaryingArrays.unpack(stream)
result.append((luid, name))
return result
# Actual code
## LSASS alpc endpoints is fixed, no need for the epmapper
client = windows.rpc.RPCClient(r"\RPC Control\lsasspirpc")
## Bind to the desired interface
iid = client.bind('12345778-1234-abcd-ef00-0123456789ab', version=(0,0))
## Craft parameters and call 'LsarOpenPolicy2'
params = LsarOpenPolicy2Parameter.pack([None, (0, None, None, 0, None, None), 0x20000000])
res = client.call(iid, 44, params)
## Unpack the resulting handle
handle = ndr.NdrContextHandle.unpack(ndr.NdrStream(res))
# As context_handle have 4 NdrLong of effective data
# We can represent them as GUID
# NdrContextHandle is just a wrapper packing/unpacking GUID and taking
# care of the leading NdrLong(0) in the actual ndr representation of context_handle
print("Context Handle is: {0}\n".format(handle))
## Craft parameters and call 'LsarEnumeratePrivileges'
x = LsarEnumeratePrivilegesParameter.pack([handle, 0, 10000]);
res = client.call(iid, 2, x)
print("Privileges:")
## Unpack the resulting 'LSAPR_PRIVILEGE_ENUM_BUFFER'
priviledges = LSAPR_PRIVILEGE_ENUM_BUFFER.unpack(ndr.NdrStream(res))
for priv in priviledges:
print priv
|
from datetime import datetime
from src import db, ma
class Review(db.Model):
__tablename__ = 'reviews'
id = db.Column(db.Integer, primary_key=True)
star = db.Column(db.Float, nullable=True)
title = db.Column(db.String(120), nullable=True)
review = db.Column(db.String(), nullable=False)
createdAt = db.Column(db.DateTime, nullable=True, default=datetime.utcnow)
updatedAt = db.Column(db.DateTime, nullable=True, default=datetime.utcnow)
reviewer_username = db.Column(db.String(120), nullable=False, unique=False)
movie_id = db.Column(db.String(120), nullable=False, unique=False)
def __init__(self,star,title,review,createdAt,updatedAt,reviewer_username,movie_id):
self.star = star
self.title = title
self.review = review
self.createdAt = createdAt
self.updatedAt = updatedAt
self.reviewer_username = reviewer_username
self.movie_id = movie_id
class ReviewSchema(ma.Schema):
class Meta:
fields = ('id','star','title','review','createdAt','updatedAt','reviewer_username','movie_id')
review_schema = ReviewSchema()
review_schemas = ReviewSchema(many=True)
|
import json
import logging
import re
from datetime import datetime, time
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db import connection
from django.db.models import Count
from django.http import JsonResponse
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from urllib.request import urlopen
from config.utils import get_active_event
from events.models import Event, Ticket
def escape(text):
"""Escape slack control characters"""
return text.replace('&', '&').replace('<', '<').replace('>', '>')
class ResponseType():
IN_CHANNEL = 'in_channel'
EPHEMERAL = 'ephemeral'
class SlackView(View):
response_type = ResponseType.EPHEMERAL
def log_request(self, request):
logger = logging.getLogger('slack.requests')
logger.info("\n{} {}".format(request.method, request.path))
logger.info("GET:" + json.dumps(request.GET))
logger.info("POST:" + json.dumps(request.POST))
def authorize(self, request):
"""Checks the request contains a vaild slack authentication token"""
if not hasattr(settings, 'SLACK_VERIFICATION_TOKEN'):
raise ImproperlyConfigured("SLACK_VERIFICATION_TOKEN setting not set.")
expected_token = settings.SLACK_VERIFICATION_TOKEN
if not expected_token:
raise ImproperlyConfigured("SLACK_VERIFICATION_TOKEN setting is empty.")
token = request.POST.get("token")
if token != expected_token:
raise PermissionDenied("Invalid token")
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
self.log_request(request)
self.authorize(request)
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return JsonResponse({
"text": self.get_text(request),
"attachments": self.get_attachments(request),
"mrkdwn": True,
"response_type": self.response_type
})
def get_text(self, request):
return None
def get_attachments(self, request):
raise None
class TicketsView(SlackView):
response_type = ResponseType.IN_CHANNEL
def get_attachments(self, request):
categories, total = self.get_data()
lines = ["{} `{}`".format(t['category'], t['count']) for t in categories]
text = "\n".join(lines)
footer = "Total sold: {}".format(total)
return [{
"fallback": "Ticket sale overview:\n{}\n{}".format(text, footer),
"pretext": "*Ticket sale overview*",
"text": text,
"footer": footer,
"footer_icon": "https://2018.webcampzg.org/static/images/heart-16px.png",
"mrkdwn_in": ["text", "pretext"],
"color": "#9013FD"
}]
def get_data(self):
tickets = Ticket.objects.filter(event=get_active_event())
categories = (tickets
.values('category')
.annotate(count=Count('*'))
.order_by('-count'))
return categories, tickets.count()
class CommunityVoteView(SlackView):
response_type = ResponseType.IN_CHANNEL
def get_attachments(self, request):
rows = self.get_rows()
lines = ["{}: {} `{}`".format(*row) for row in rows]
text = "\n".join(lines)
if not text:
text = "No votes have been cast yet."
return [{
"fallback": "Community vote:\n{}".format(text),
"title": "Community vote",
"text": text,
"mrkdwn_in": ["text"],
"color": "#9013FD",
}]
def get_rows(self):
event = get_active_event()
sql = """
SELECT u.first_name || ' ' || u.last_name AS name, pa.title, count(*) AS count
FROM voting_communityvote cv
JOIN cfp_paperapplication pa ON pa.id = cv.application_id
JOIN cfp_callforpaper cfp ON cfp.id = pa.cfp_id
JOIN cfp_applicant a ON pa.applicant_id = a.id
JOIN people_user u ON a.user_id = u.id
WHERE cfp.event_id = {}
GROUP BY 1, 2
ORDER BY 3 DESC;
""".format(event.pk)
with connection.cursor() as cursor:
cursor.execute(sql)
return cursor.fetchall()
class TtlView(SlackView):
"""
Prints the time to the next WebCamp.
"""
response_type = ResponseType.IN_CHANNEL
def get_attachments(self, request):
msg = self.get_message()
return [{
"text": msg,
"mrkdwn_in": ["text"],
"color": "#9013FD",
}]
def format_delta(self, delta):
hours, rem = divmod(delta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
return "{} days, {} hours, {} minutes, and {} seconds".format(
delta.days, hours, minutes, seconds)
def get_message(self):
today = timezone.now().date()
event = Event.objects.filter(begin_date__gte=today).order_by('begin_date').first()
if event:
event_start = datetime.combine(event.begin_date, time(9, 0))
delta = event_start - datetime.now()
return "{} starts in *{}*".format(
event.title, self.format_delta(delta))
return "There is no WebCamp scheduled :'("
class EntrioTicketCountView(SlackView):
response_type = ResponseType.IN_CHANNEL
def get_attachments(self, request):
key = self.kwargs.get('key')
try:
data = self.fetch_data(key)
except Exception:
return [{
"title": "Failed loading Entrio data :'(",
}]
return [{
"pretext": "*Workshop ticket sale overview*",
"text": self.get_tickets_text(data),
"mrkdwn_in": ["text", "pretext"],
"color": "#9013FD"
}]
def get_categories(self, data):
for category in data:
name = category['category_name'].strip()
name = re.sub(r"\([^)]+\)$", "", name).strip()
yield name, category['count']
def get_tickets_text(self, data):
categories = sorted(self.get_categories(data))
lines = ["{} `{}`".format(*t) for t in categories]
return "\n".join(lines)
def fetch_data(self, key):
url = "https://www.entrio.hr/api/get_ticket_count?key={}&format=json".format(key)
with urlopen(url) as f:
return json.loads(f.read().decode('utf-8'))
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tests.api import test_api
class TestAPIReleases(test_api.TestAPI):
def test_releases(self):
with test_api.make_runtime_storage(
{'releases': [
{'release_name': 'prehistory', 'end_date': 1365033600},
{'release_name': 'havana', 'end_date': 1381968000},
{'release_name': 'icehouse', 'end_date': 1397692800}]},
test_api.make_records(record_type=['commit'])):
response = self.app.get('/api/1.0/releases')
releases = json.loads(response.data)['data']
self.assertEqual(3, len(releases))
self.assertIn({'id': 'all', 'text': 'All'}, releases)
self.assertIn({'id': 'icehouse', 'text': 'Icehouse'}, releases)
|
"""
Functions to parse the HTML roster
"""
import re
from bs4 import BeautifulSoup
from hockeydata.constants import HTMLREPORTS
import hockeydata.scrape.common as common
def get_roster(game_id: str) -> dict:
"""
:param game_id:
:return: dict of players and coaches
"""
roster_html = get_raw_html(game_id)
players, coaches = parse_html(roster_html)
return {'players': players, 'coaches': coaches}
def get_raw_html(game_id: str):
"""
Gets the raw Roster HTML from htmlreports.
:param game_id:
:return:
"""
url = HTMLREPORTS + '{}{}/RO{}.HTM'.format(game_id[:4], int(game_id[:4]) + 1, game_id[4:])
res = common.get_page(url)
assert res is not None
return res
def parse_html(html) -> tuple:
"""
Uses bs4 to parse the raw HTML. Tries 3 different parsers.
:param html: raw pbp
:return: tuple: (players, head_coaches)
"""
soup = BeautifulSoup(html, "lxml")
players = get_players(soup)
head_coaches = get_coaches(soup)
if len(players) == 0:
soup = BeautifulSoup(html.text, "html.parser")
players = get_players(soup)
head_coaches = get_coaches(soup)
if len(players) == 0:
soup = BeautifulSoup(html.text, "html5lib")
players = get_players(soup)
head_coaches = get_coaches(soup)
return players, head_coaches
def remove_captaincy(player: list):
"""
Gets rid of (A) or (C)
:param player:
:return:
"""
player = re.sub('\(A\)|\(C\)', '', player[2])
return player
def get_players(soup: BeautifulSoup) -> dict:
tables = soup.findAll('table', {'align': 'center', 'border': '0', 'cellpadding': '0', 'cellspacing': '0', 'width': '100%'})
# If it picks up nothing just return the empty list
if not tables:
return None
del tables[0]
player_info = [table.find_all('td') for table in tables]
player_info = [[x.get_text() for x in group] for group in player_info]
player_info = [[group[i:i+3] for i in range(0, len(group), 3)] for group in player_info]
player_info = [[player for player in group if player[0] != '#'] for group in player_info]
# Add whether the player was a scratch
for i in range(len(player_info)):
for j in range(len(player_info[i])):
if i == 2 or i == 3:
player_info[i][j].append(True)
else:
player_info[i][j].append(False)
players = {'Away': player_info[0], 'Home': player_info[1]}
# Scratches aren't always included
if len(player_info) == 4:
players['Away'] += player_info[2]
players['Home'] += player_info[3]
# First condition is to control when we get whitespace as one of the indices
players['Away'] = [remove_captaincy(i) if i[0] != u'\xa0' else i for i in players['Away']]
players['Home'] = [remove_captaincy(i) if i[0] != u'\xa0' else i for i in players['Home']]
# Filter out whitespace
players['Away'] = [i for i in players['Away'] if i[0] != u'\xa0']
players['Home'] = [i for i in players['Home'] if i[0] != u'\xa0']
return players
def get_coaches(soup: BeautifulSoup) -> dict:
coaches = soup.find_all('tr', {'id': "HeadCoaches"})
# If it picks up nothing just return the empty list
if not coaches:
return coaches
coaches = coaches[0].find_all('td')
return {
'Away': coaches[1].get_text(),
'Home': coaches[3].get_text()
}
|
"""Module containing the logic for creating dynamicdict."""
import yaml
import json
from dynamicdict import DynamicDict
def create_from_json_file(filename, used_case_insensitive=False,
used_underscore_for_space=False,
used_underscore_for_hyphen=False,
**kwargs):
"""Create a dynamicdict instance from JSON filename.
Parameters:
filename (string): JSON filename.
used_case_insensitive (bool): the flag uses to get attribute
in lower case. Default is False.
used_underscore_for_space (bool): the flag uses to get attribute
if key has space, and space is translated as underscore.
Default is False.
used_underscore_for_hyphen (bool): the flag uses to get attribute
if key has hyphen, and hyphen is translated as underscore.
Default is False.
kwargs (dict): keyword arguments which would use for JSON instantiation.
"""
from io import IOBase
if isinstance(filename, IOBase):
obj = json.load(filename, **kwargs)
else:
with open(filename) as stream:
obj = json.load(stream, **kwargs)
ddict_obj = DynamicDict(
obj,
used_case_insensitive=used_case_insensitive,
used_underscore_for_space=used_underscore_for_space,
used_underscore_for_hyphen=used_underscore_for_hyphen,
)
return ddict_obj
def create_from_json_data(data, used_case_insensitive=False,
used_underscore_for_space=False,
used_underscore_for_hyphen=False,
**kwargs):
"""Create a dynamicdict instance from JSON data.
Parameters:
data (string): JSON data in string format.
used_case_insensitive (bool): the flag uses to get attribute
in lower case. Default is False.
used_underscore_for_space (bool): the flag uses to get attribute
if key has space, and space is translated as underscore.
Default is False.
used_underscore_for_hyphen (bool): the flag uses to get attribute
if key has hyphen, and hyphen is translated as underscore.
Default is False.
kwargs (dict): keyword arguments which would use for JSON instantiation.
"""
obj = json.loads(data, **kwargs)
ddict_obj = DynamicDict(
obj,
used_case_insensitive=used_case_insensitive,
used_underscore_for_space=used_underscore_for_space,
used_underscore_for_hyphen=used_underscore_for_hyphen,
)
return ddict_obj
def create_from_yaml_file(filename, loader=yaml.SafeLoader,
used_case_insensitive=False,
used_underscore_for_space=False,
used_underscore_for_hyphen=False,):
"""Create a dynamicdict instance from YAML file.
Parameters:
filename (string): a YAML file.
loader (yaml.loader.Loader): a YAML loader.
used_case_insensitive (bool): the flag uses to get attribute
in lower case. Default is False.
used_underscore_for_space (bool): the flag uses to get attribute
if key has space, and space is translated as underscore.
Default is False.
used_underscore_for_hyphen (bool): the flag uses to get attribute
if key has hyphen, and hyphen is translated as underscore.
Default is False.
"""
with open(filename) as stream:
obj = yaml.load(stream, Loader=loader)
ddict_obj = DynamicDict(
obj,
used_case_insensitive=used_case_insensitive,
used_underscore_for_space=used_underscore_for_space,
used_underscore_for_hyphen=used_underscore_for_hyphen,
)
return ddict_obj
def create_from_yaml_data(data, loader=yaml.SafeLoader,
used_case_insensitive=False,
used_underscore_for_space=False,
used_underscore_for_hyphen=False):
"""Create a dynamicdict instance from YAML data.
Parameters:
data (string): a YAML data in string format.
loader (yaml.loader.Loader): a YAML loader.
used_case_insensitive (bool): the flag uses to get attribute
in lower case. Default is False.
used_underscore_for_space (bool): the flag uses to get attribute
if key has space, and space is translated as underscore.
Default is False.
used_underscore_for_hyphen (bool): the flag uses to get attribute
if key has hyphen, and hyphen is translated as underscore.
Default is False.
"""
obj = yaml.load(data, Loader=loader)
ddict_obj = DynamicDict(
obj,
used_case_insensitive=used_case_insensitive,
used_underscore_for_space=used_underscore_for_space,
used_underscore_for_hyphen=used_underscore_for_hyphen,
)
return ddict_obj
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.