content stringlengths 5 1.05M |
|---|
"""API Client module for handling requests and responses."""
import requests
import datetime
from .exceptions import (
PygitaException,
ServerConnectionError,
BadRequestError,
UnauthorisedError,
RequestFailedError,
ServerError,
AuthorizationError,
)
from .constants import (
TOKEN_VALIDITY,
)
from .verse import Verse
from .chapter import Chapter
class Client:
"""API Client module for handling requests and responses."""
def __init__(
self,
CLIENT_ID,
CLIENT_SECRET,
grant_type=None,
scope=None,
):
"""
Client object constructer.
parameters:
-CLIENT_ID: Obtained from Account Dashboard
after registering an app on https://bhagavadgita.io
-CLIENT_SECRET: Obtained from Account Dashboard
after registering an app on https://bhagavadgita.io.
-grant_type: Grant type (optional).
Default value: client_credentials.
-scope: The resources that you would like to access(optional).
Value: verse, chapter, verse chapter
Default value: verse chapter
Returns the Client object.
"""
if grant_type is None:
grant_type = "client_credentials"
if scope is None:
scope = "verse chapter"
self.CLIENT_ID = CLIENT_ID
self.CLIENT_SECRET = CLIENT_SECRET
self.token_expiry = None
self.access_token = None
self.grant_type = grant_type
self.scope = scope
self.__API__BASE_URL = "https://bhagavadgita.io"
self.__API__END_POINT = "/api/v1/"
self.__API__TOKEN_END_POINT = "/auth/oauth/token"
def __apiRequest(self, url, params):
"""
Used internally by the Client object to make calls to the API.
Parameters:
-url: the URL of the API endpoint.
-params: parameters for the request.
Returns the JSON response in the form of a Dictionary.
Otherwise, an exception is raised.
"""
params["access_token"] = self.get_token()
try:
response = requests.get(url, params)
except requests.exceptions.RequestException:
raise ServerConnectionError(
"""Failed to/
connect to bhagavadgita.io."""
)
response_code = response.status_code
if response_code == 400:
raise BadRequestError("Wrong parameters are passed")
elif response_code == 401:
raise UnauthorisedError("Your access_token is not valid")
elif response_code == 402:
raise RequestFailedError("Request Failed")
elif response_code == 500:
raise ServerError("Server side error")
else:
try:
response.raise_for_status()
response = response.json()
except ValueError:
raise PygitaException("Server returned invalid response.")
except Exception:
raise PygitaException(
"""An unknown error occurred /
during the parsing of response."""
)
return response
def request_token(self):
"""
Requests an access_token from the API.
Returns token if access_token is successfully obtained.
Otherwise, an exception is raised.
"""
url = "https://bhagavadgita.io/auth/oauth/token"
try:
request = requests.post(
url,
data={
"client_id": self.CLIENT_ID,
"client_secret": self.CLIENT_SECRET,
"grant_type": self.grant_type,
"scope": self.scope,
},
)
token = request.json()["access_token"]
except Exception:
raise AuthorizationError("Unable to get access_token.")
return token
def is_token_valid(self):
if self.access_token is None or self.token_expiry is None:
return False
current_time = datetime.datetime.now()
if current_time >= self.token_expiry:
return False
return True
def get_token(self):
if self.is_token_valid():
return self.access_token
else:
self.access_token = self.request_token()
current_time = datetime.datetime.now()
validity = datetime.timedelta(seconds=TOKEN_VALIDITY)
self.token_expiry = current_time + validity
return self.access_token
def __request_verse(self, chapter_number, verse_number, language):
if language == "hi":
params = {"language": language}
else:
params = {}
url = self.__API__BASE_URL + self.__API__END_POINT
url += "chapters/{}/verses/{}".format(chapter_number, verse_number)
return self.__apiRequest(url, params)
def get_verse(self, chapter_number, verse_number, language="en"):
json_data = self.__request_verse(
chapter_number,
verse_number,
language,
)
return Verse(self, json_data)
def __request_chapter(self, chapter_number, language):
if language == "hi":
params = {"language": language}
else:
params = {}
url = self.__API__BASE_URL + self.__API__END_POINT
url += "chapters/{}".format(chapter_number)
return self.__apiRequest(url, params)
def get_chapter(self, chapter_number, language="en"):
json_data = self.__request_chapter(
chapter_number,
language,
)
return Chapter(self, json_data)
|
print('='*30)
print(' 10 TERMOS DE UMA PA ')
print('='*30)
primeiro = int(input('Primeiro termo: '))
razão = int(input('Razão: '))
décimo = primeiro + (10 - 1) * razão
for c in range(primeiro, décimo + razão, razão):
print(f'{c}', end=' -> ')
print('THE END') |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from mephisto.data_model._db_backed_meta import (
MephistoDBBackedMeta,
MephistoDataModelComponentMixin,
)
from typing import Optional, Mapping, TYPE_CHECKING, Any
if TYPE_CHECKING:
from mephisto.abstractions.database import MephistoDB
from mephisto.utils.logger_core import get_logger
logger = get_logger(name=__name__)
QUAL_GREATER = "GreaterThan"
QUAL_GREATER_EQUAL = "GreaterThanOrEqualTo"
QUAL_LESS = "LessThan"
QUAL_LESS_EQUAL = "LessThanOrEqualTo"
QUAL_EQUAL = "EqualTo"
QUAL_NOT_EQUAL = "NotEqualTo"
QUAL_EXISTS = "Exists"
QUAL_NOT_EXIST = "DoesNotExist"
QUAL_IN_LIST = "In"
QUAL_NOT_IN_LIST = "NotIn"
SUPPORTED_COMPARATORS = [
QUAL_GREATER,
QUAL_GREATER_EQUAL,
QUAL_LESS,
QUAL_LESS_EQUAL,
QUAL_EQUAL,
QUAL_NOT_EQUAL,
QUAL_EXISTS,
QUAL_NOT_EXIST,
QUAL_IN_LIST,
QUAL_NOT_IN_LIST,
]
COMPARATOR_OPERATIONS = {
QUAL_GREATER: lambda x, y: x > y,
QUAL_GREATER_EQUAL: lambda x, y: x >= y,
QUAL_LESS: lambda x, y: x < y,
QUAL_LESS_EQUAL: lambda x, y: x <= y,
QUAL_EQUAL: lambda x, y: x == y,
QUAL_NOT_EQUAL: lambda x, y: not x == y,
QUAL_IN_LIST: lambda x, y: x in y,
QUAL_NOT_IN_LIST: lambda x, y: x not in y,
}
class Qualification(MephistoDataModelComponentMixin, metaclass=MephistoDBBackedMeta):
"""Simple convenience wrapper for Qualifications in the data model"""
def __init__(
self,
db: "MephistoDB",
db_id: str,
row: Optional[Mapping[str, Any]] = None,
_used_new_call: bool = False,
):
if not _used_new_call:
raise AssertionError(
"Direct Qualification and data model access via Qualification(db, id) is "
"now deprecated in favor of calling Qualification.get(db, id). "
)
self.db: "MephistoDB" = db
if row is None:
row = db.get_qualification(db_id)
assert row is not None, f"Given db_id {db_id} did not exist in given db"
self.db_id: str = row["qualification_id"]
self.qualification_name: str = row["qualification_name"]
class GrantedQualification:
"""Convenience wrapper for tracking granted qualifications"""
def __init__(
self,
db: "MephistoDB",
qualification_id: str,
worker_id: str,
row: Optional[Mapping[str, Any]] = None,
):
self.db: "MephistoDB" = db
if row is None:
row = db.get_granted_qualification(qualification_id, worker_id)
assert row is not None, f"Granted qualification did not exist in given db"
self.worker_id: str = row["worker_id"]
self.qualification_id: str = row["qualification_id"]
self.value: str = row["value"]
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.context as context
from mindspore.common import dtype as mstype
from mindspore.common import Tensor, Parameter
func_map = {
"mul": ops.ScatterNdMul,
"add": ops.ScatterNdAdd,
"sub": ops.ScatterNdSub,
}
np_func_map = {
"mul": lambda a, b: a * b,
"add": lambda a, b: a + b,
"sub": lambda a, b: a - b,
}
class TestScatterNdNet(nn.Cell):
def __init__(self, func, lock, input_x, indices, updates):
super(TestScatterNdNet, self).__init__()
self.scatter_func = func_map.get(func)(use_locking=lock)
self.input_x = Parameter(input_x, name="input_x")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
self.scatter_func(self.input_x, self.indices, self.updates)
return self.input_x
def scatter_nd_np(func, input_x, indices, updates):
result = input_x.asnumpy().copy()
indices_np = indices.asnumpy().copy()
updates_np = updates.asnumpy().copy()
f = np_func_map.get(func)
for idx, _ in np.ndenumerate(np.zeros(indices.shape[:-1])):
upd_idx = tuple(idx)
out_idx = tuple(indices_np[upd_idx])
result[out_idx] = f(result[out_idx], updates_np[upd_idx])
return result
def compare_with_numpy(func, lock, input_x, indices, updates):
expected = scatter_nd_np(func, input_x, indices, updates)
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
graph_output = TestScatterNdNet(func, lock, input_x, indices, updates)()
np.testing.assert_array_almost_equal(graph_output.asnumpy(), expected)
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
pynative_output = TestScatterNdNet(func, lock, input_x, indices, updates)()
np.testing.assert_array_almost_equal(pynative_output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('lock', [True, False])
@pytest.mark.parametrize('func', ['mul', 'sub', 'add'])
@pytest.mark.parametrize('data_type', [mstype.float32, mstype.float64])
@pytest.mark.parametrize('index_type', [mstype.int32])
def test_scatter_nd_small_float(lock, func, data_type, index_type):
"""
Feature: ScatterNd* operators.
Description: test cases for ScatterNd* operator
Expectation: the result match numpy implementation.
"""
input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), data_type)
indices = Tensor(np.array([[0, 0], [1, 1]]), index_type)
updates = Tensor(np.array([1.0, 2.2]), data_type)
compare_with_numpy(func, lock, input_x, indices, updates)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('lock', [True, False])
@pytest.mark.parametrize('func', ['mul', 'sub', 'add'])
@pytest.mark.parametrize('data_type', [mstype.int8, mstype.int16, mstype.int32, mstype.int64])
@pytest.mark.parametrize('index_type', [mstype.int32])
def test_scatter_nd_small_int(lock, func, data_type, index_type):
"""
Feature: ScatterNd* operators.
Description: test cases for ScatterNd* operator
Expectation: the result match numpy implementation.
"""
input_x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), data_type)
indices = Tensor(np.array([[4], [3], [1], [7]]), index_type)
updates = Tensor(np.array([9, 10, 11, 12]), data_type)
compare_with_numpy(func, lock, input_x, indices, updates)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('lock', [True, False])
@pytest.mark.parametrize('func', ['mul', 'sub', 'add'])
@pytest.mark.parametrize('data_type', [mstype.int8, mstype.int16, mstype.int32, mstype.int64])
@pytest.mark.parametrize('index_type', [mstype.int32])
def test_scatter_nd_multi_dims(lock, func, data_type, index_type):
"""
Feature: ScatterNd* operators.
Description: test cases for ScatterNd* operator
Expectation: the result match numpy implementation.
"""
input_x = Tensor(np.ones((4, 4, 4)), data_type)
indices = Tensor(np.array([[0], [2]]), index_type)
updates = Tensor(
np.array(
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
]
),
data_type,
)
compare_with_numpy(func, lock, input_x, indices, updates)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('lock', [True, False])
@pytest.mark.parametrize('func', ['mul', 'sub', 'add'])
@pytest.mark.parametrize('data_type', [mstype.int8, mstype.int16, mstype.int32, mstype.int64])
@pytest.mark.parametrize('index_type', [mstype.int32])
def test_scatter_nd_one_value(lock, func, data_type, index_type):
"""
Feature: ScatterNd* operators.
Description: test cases for ScatterNd* operator
Expectation: the result match numpy implementation.
"""
input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), data_type)
indices = Tensor(np.array([[0, 1]]), index_type)
updates = Tensor(np.array([1.0]), data_type)
compare_with_numpy(func, lock, input_x, indices, updates)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('lock', [True])
@pytest.mark.parametrize('func', ['mul', 'sub', 'add'])
@pytest.mark.parametrize('data_type', [mstype.int64])
@pytest.mark.parametrize('index_type', [mstype.int32])
def test_scatter_nd_lock(lock, func, data_type, index_type):
"""
Feature: ScatterNd* operators.
Description: test cases for ScatterNd* operator with use_locking is true.
Expectation: the result match numpy implementation.
"""
input_x = Tensor(np.ones((5, 4, 4)), data_type)
indices = Tensor(np.zeros((30, 1)), index_type)
updates = Tensor(np.random.randint(low=1, high=3, size=(30, 4, 4)), data_type)
compare_with_numpy(func, lock, input_x, indices, updates)
|
from .network import ServerSocket
import sys
import time
import threading
import json
from concurrent.futures import ThreadPoolExecutor
import logging
import argparse
lag = 0
class Server(object):
def __init__(self, host, port, max_con=100, refuse=200):
self.host = host
self.port = port
self.sock = ServerSocket(host, port)
self.con = 0
self.con_lock = threading.Lock()
self.max_con = max_con
self.refuse = refuse
def handle_command(self, req):
global lag
time.sleep(lag)
if req.get('cmd', None) == 'ping':
return {'status': 'alive'}
return {'status': 'ok'}
def handle_client(self, sock):
try:
req = sock.recv()
while req:
logging.debug('Recieved: {}'.format(req))
resp = self.handle_command(req)
sock.send(resp)
logging.debug('Sent: {}'.format(resp))
req = sock.recv()
except json.decoder.JSONDecodeError:
# Disconnection from the other end
pass
finally:
logging.debug("Closing connection")
sock.disconnect()
self.con_lock.acquire()
self.con -= 1
self.con_lock.release()
def activate(self):
self.sock.listen()
with ThreadPoolExecutor(max_workers=self.max_con) as executor:
while True:
connection, client_address = self.sock.accept()
logging.debug('connection from {}'.format(client_address))
if self.con > self.refuse:
connection.send({'msg': 'too many connections'})
connection.disconnect()
continue
executor.submit(Server.handle_client, self, connection)
logging.debug("Starting thread {}".format(client_address))
self.con_lock.acquire()
self.con += 1
self.con_lock.release()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s [%(levelname)s] - %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Run a Hash Ring.')
parser.add_argument('-H', '--host', dest='host', type=str, default='localhost', help='Host to run the server on.')
parser.add_argument('-p', '--port', dest='port', type=int, default=5003, help='Port to run the server on.')
parser.add_argument('-l', '--lag', dest='lag', type=int, default=0, help='Simulate server lag')
args = parser.parse_args()
lag = args.lag
s = Server(args.host, args.port)
s.activate()
|
"""Tests for `flask_image_search` package."""
import logging
import os
import numpy as np
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # noqa
import pytest
import zarr
from sqlalchemy.sql.expression import func
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s Testing: %(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
IMAGE = os.path.join(BASE_PATH, "./test.jpg")
@pytest.mark.filterwarnings("ignore::DeprecationWarning:tensorflow")
@pytest.mark.parametrize(
"image_search",
["default", "vgg16", "vgg19", "inception_v3"],
ids=["default", "vgg16", "vgg19", "inception_v3"],
indirect=["image_search"]
)
def test_index_image(Image, image_search, tmp_path):
"""Test that indexing images is working correctly"""
tmp_storage = zarr.open(str(tmp_path / 'tmp.zarr'), mode='a',
shape=image_search.storage['/image_features'].shape,
chunks=image_search.storage['/image_features'].chunks,
dtype=np.float32)
tmp_storage[:] = image_search.storage["/image_features"][:]
# choose a random image to delete from the image index
image_to_be_deleted = Image.query.order_by(func.random()).first()
image_search.delete_index(image_to_be_deleted)
assert np.sum(
np.any(
tmp_storage[:] != 0,
axis=1
)
) - 1 == np.sum(
np.any(
image_search.storage['/image_features'][:] != 0,
axis=1
)
)
image_search.index_model(Image, threaded=False) # index all missing images
assert np.sum(
np.any(
tmp_storage[:] != 0,
axis=1
)
) == np.sum(
np.any(
image_search.storage['/image_features'][:] != 0,
axis=1
)
)
@pytest.mark.parametrize(
"image_search, expected",
[
("vgg16", [4512, 2649, 4514, 4516, 2194]),
("vgg19", [2649, 4512, 4514, 2197, 4516]),
("inception_v3", [4512, 4516, 4514, 5171, 2649]),
],
ids=["vgg16", "vgg19", "inception_v3"],
indirect=["image_search"],
)
def test_search(Image, image_search, expected):
results = image_search.search(Image, IMAGE, limit=5)
# check that the results are correct by checking the ids
assert [result[0] for result in results] == expected
@pytest.mark.parametrize(
"image_search, expected",
[
("vgg16", [4512, 2649, 4514, 4516, 2194]),
("vgg19", [2649, 4512, 4514, 2197, 4516]),
("inception_v3", [4512, 4516, 4514, 5171, 2649]),
],
ids=["vgg16", "vgg19", "inception_v3"],
indirect=["image_search"]
)
def test_query_search(Image, image_search, expected):
# images = Image.query.image_search().all()
images = Image.query.order_by(image_search.case(IMAGE, Image, limit=5)).limit(5)
# check that the correct Images were returned
assert [image.id for image in images] == expected
@pytest.mark.parametrize(
"image_search, expected",
[
(
"vgg16",
{
439: [4512, 2649, 2204, 4513, 5115, 5117, 5116],
371: [4514, 4516, 4517, 4518, 1798, 1799, 4515, 4519, 1800],
438: [2194, 2197, 2196, 2193, 2195]
}
),
(
"vgg19",
{
439: [2649, 4512, 2204, 4513, 5115, 5117, 5116],
371: [4514, 4516, 4517, 4518, 4515, 1798, 4519, 1799, 1800],
438: [2197, 2194, 2196, 2195, 2193]
}
),
(
"inception_v3",
{
439: [4512, 2649, 2204, 4513, 5116, 5117, 5115],
371: [4516, 4514, 1798, 4519, 4515, 1800, 4518, 4517, 1799],
1011: [5171, 5172, 5170, 5173, 5178, 5180, 5177, 5179, 5175, 5176, 5174, 5181]
}
),
],
ids=["vgg16", "vgg19", "inception_v3"],
indirect=["image_search"]
)
def test_query_join_search(db, image_search, Image, Radio, expected):
query = Radio.query
query = query.join(Image).options(db.contains_eager(Radio.images)) # join to images
query = query.order_by(image_search.case(IMAGE, Image))
radios = query.all()[:3]
assert [model.id for model in radios] == list(expected.keys())
assert [image.id for image in radios[0].images] == expected[radios[0].id]
assert [image.id for image in radios[1].images] == expected[radios[1].id]
assert [image.id for image in radios[2].images] == expected[radios[2].id]
|
from django import forms
from .models import Customer
class DealerAddCustForm(forms.ModelForm):
cust_email = forms.CharField()
cust_address = forms.CharField()
fname = forms.CharField()
lname = forms.CharField()
phone = forms.CharField()
notes = forms.CharField()
vin = forms.CharField()
car_make = forms.CharField()
car_model = forms.CharField()
class Meta:
model = Customer
fields = ('cust_email', 'cust_address', 'fname', 'lname', 'phone', 'notes', 'vin', 'car_make', 'car_model')
|
from django.shortcuts import redirect, render, HttpResponseRedirect, reverse
from twitteruser.models import Uzer
from authentication.forms import LoginForm, SignUpForm
from django.contrib.auth import login, logout, authenticate
# Create your views here.
def signup_view(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
data = form.cleaned_data
user = Uzer.objects.create_user(username=data['username'],
password=data['password'],
email=data['email'],
first_name=data['first_name'],
last_name=data['last_name'])
user.save()
return HttpResponseRedirect(reverse('home'))
form = SignUpForm()
return render(request, 'signup.html', {'form': form})
def login_view(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
data = form.cleaned_data
user = authenticate(
username=data['username'], password=data['password'])
if user:
login(request, user)
return HttpResponseRedirect(reverse('home'))
form = LoginForm()
return render(request, 'login.html', {'form': form})
def logout_view(request):
log = logout(request)
return redirect('home')
|
# -*- coding: UTF-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from marionette.marionette import Actions
from marionette.wait import Wait
from gaiatest.apps.base import Base
class Keyboard(Base):
'''
There are two underlying strategies in this class;
* send() method which uses logic to traverse the keyboard to type the string sent to it.
Send should be used in tests where the layout of the keyboard is not tested and only string input is important
* tap_x() or anything not send() methods which do not use logic to change keyboard panels.
Tap should be used where the keyboard is expected to open with that key visible
The methods in this class employ a lot of aggressive frame switching to the keyboard and back to the
displayed app because it predominantly acts as a utility class and thus it works best when the main focus
of the test is on the web app rather than the keyboard itself.
'''
name = "Keyboard"
# special characters look-up table in English standard keyboard
lookup_table = {'0': 'º',
'?': '¿',
'$': '€£¥',
'!': '¡',
'a': 'áàâäåãāæ',
'c': 'çćč',
'e': 'éèêëēę€ɛ',
'i': 'įīîìíï',
'l': '£ł',
'n': 'ńñ',
'o': 'ɵøœōôòóö',
's': 'ßśš$',
'u': 'ūûùúü',
'y': '¥ÿ',
'z': 'žźż',
'A': 'ÁÀÂÄÅÃĀÆ',
'C': 'ÇĆČ',
'E': 'ÉÈÊËĒĘ€Ɛ',
'I': 'ĮĪÎÌÍÏ',
'L': '£Ł',
'N': 'ŃÑ',
'O': 'ƟØŒŌÔÒÓÖ',
'S': 'ŚŠŞ',
'U': 'ŪÛÙÚÜ',
'Y': '¥Ÿ',
'Z': 'ŽŹŻ'}
# keyboard table
keyboard_table = ['english',
'dvorak',
'otherlatins',
'cyrillic',
'arabic',
'hebrew',
'zhuyin',
'pinyin',
'greek',
'japanese',
'portuguese',
'spanish']
# special keys locators
_language_key_locator = (By.CSS_SELECTOR, ".keyboard-row button[data-keycode='-3']")
_dotcom_key_locator = (By.CSS_SELECTOR, ".keyboard-row button[data-compositekey='.com']")
_numeric_sign_key = '-2'
_alpha_key = '-1'
_backspace_key = '8'
_enter_key = '13'
_alt_key = '18'
_upper_case_key = '20'
_space_key = '32'
# keyboard app locators
_keyboards_locator = (By.ID, 'keyboards')
_keyboard_frame_locator = (By.CSS_SELECTOR, '#keyboards iframe:not([hidden])')
_button_locator = (By.CSS_SELECTOR, '.keyboard-type-container[data-active] button.keyboard-key[data-keycode="%s"], .keyboard-type-container[data-active] button.keyboard-key[data-keycode-upper="%s"]')
_highlight_key_locator = (By.CSS_SELECTOR, 'div.highlighted button')
_predicted_word_locator = (By.CSS_SELECTOR, '.autocorrect')
# find the key to long press and return
def _find_key_for_longpress(self, input_value):
for key_to_press, extended_values in self.lookup_table.iteritems():
if input_value in extended_values:
return key_to_press
# Try to switch to the correct layout. There are 3 keyboard layers:
# ABC (Default), 123 (Symbols_1) and ALT (Symbols_2)
def _switch_to_correct_layout(self, val):
layout_page = self._layout_page
current_input_type = self._current_input_type
if val.isspace():
# Space is available on every keyboard panel
pass
# Alphabetic keys available on the Default page
elif val.isalpha():
is_upper_case = self._is_upper_case
# If the key to press isalpha and the keyboard layout is not, go back to Default
if not layout_page == 'Default':
self._tap(self._alpha_key)
self.wait_for_condition(lambda m: self._layout_page == 'Default')
# If the key to press isupper and the keyboard is not (or vice versa) then press shift
if not val.isupper() == is_upper_case:
self._tap(self._upper_case_key)
self.wait_for_condition(lambda m: is_upper_case != self._is_upper_case)
# Numbers and symbols are in other keyboard panels
else:
# If it's not space or alpha then it must be in 123 or ALT.
# It can't be in Default so let's go into 123 and then try to find it
if not current_input_type == 'number' and layout_page == 'Default':
self._tap(self._numeric_sign_key)
self.wait_for_element_displayed(*self._key_locator(self._alpha_key))
# If it is not present here then it must be in one of the ALT section
if not self.is_element_present(*self._key_locator(val)):
layout_page = self._layout_page
self._tap(self._alt_key)
self.wait_for_condition(lambda m: layout_page != self._layout_page)
@property
def _is_upper_case(self):
return self.marionette.execute_script('return window.wrappedJSObject.isUpperCase;')
@property
def _is_upper_case_locked(self):
return self.marionette.execute_script('return window.wrappedJSObject.isUpperCaseLocked;')
@property
def _current_input_type(self):
return self.marionette.execute_script('return window.wrappedJSObject.currentInputType;')
@property
def _layout_page(self):
return self.marionette.execute_script('return window.wrappedJSObject.layoutPage;')
# this is to switch to the frame of keyboard
def switch_to_keyboard(self):
self.marionette.switch_to_frame()
keyboards = self.marionette.find_element(*self._keyboards_locator)
self.wait_for_condition(lambda m: 'hide' not in keyboards.get_attribute('class') and \
not keyboards.get_attribute('data-transition-in'),
message="Keyboard not interpreted as displayed. Debug is_displayed(): %s"
%keyboards.is_displayed())
keybframe = self.marionette.find_element(*self._keyboard_frame_locator)
return self.marionette.switch_to_frame(keybframe, focus=False)
@property
def current_keyboard(self):
self.marionette.switch_to_frame()
keyboard = self.marionette.find_element(*self._keyboard_frame_locator).get_attribute('data-frame-name')
return keyboard
# this is to get the locator of desired key on keyboard
def _key_locator(self, val):
if len(val) == 1:
val = ord(val)
return (self._button_locator[0], self._button_locator[1] % (val, val))
# this is to tap on desired key on keyboard
def _tap(self, val):
is_upper_case = self._is_upper_case
is_upper_case_locked = self._is_upper_case_locked
self.wait_for_element_displayed(*self._key_locator(val))
key = self.marionette.find_element(*self._key_locator(val))
Actions(self.marionette).press(key).wait(0.1).release().perform()
# These two tap cases are most important because they cause the keyboard to change state which affects next step
if val.isspace():
# Space switches back to Default layout
self.wait_for_condition(lambda m: self._layout_page == 'Default')
if val.isupper() and is_upper_case and not is_upper_case_locked:
# Tapping key with shift enabled causes the keyboard to switch back to lower
self.wait_for_condition(lambda m: not self._is_upper_case)
# This is for selecting special characters after long pressing
# "selection" is the nth special element you want to select (n>=1)
def choose_extended_character(self, long_press_key, selection, movement=True):
self.switch_to_keyboard()
action = Actions(self.marionette)
# after switching to correct keyboard, set long press if the key is there
self._switch_to_correct_layout(long_press_key)
self.wait_for_element_displayed(*self._key_locator(long_press_key))
key = self.marionette.find_element(*self._key_locator(long_press_key))
action.press(key).wait(1).perform()
# find the extended key and perform the action chain
extend_keys = self.marionette.find_elements(*self._highlight_key_locator)
if movement is True:
action.move(extend_keys[selection - 1]).perform()
action.release().perform()
self.apps.switch_to_displayed_app()
def enable_caps_lock(self):
self.switch_to_keyboard()
if self.is_element_present(*self._key_locator(self._alpha_key)):
self._tap(self._alpha_key)
key_obj = self.marionette.find_element(*self._key_locator(self._upper_case_key))
self.marionette.double_tap(key_obj)
self.apps.switch_to_displayed_app()
# this would go through fastest way to tap/click through a string
def send(self, string):
self.switch_to_keyboard()
for val in string:
if ord(val) > 127:
# this would get the right key to long press and switch to the right keyboard
middle_key_val = self._find_key_for_longpress(val.encode('UTF-8'))
self._switch_to_correct_layout(middle_key_val)
# find the key to long press and press it to get the extended characters list
middle_key = self.marionette.find_element(*self._key_locator(middle_key_val))
action = Actions(self.marionette)
action.press(middle_key).wait(1).perform()
# find the targeted extended key to send
self.wait_for_element_displayed(*self._key_locator(val))
target_key = self.marionette.find_element(*self._key_locator(val))
action.move(target_key).release().perform()
else:
# after switching to correct keyboard, tap/click if the key is there
self._switch_to_correct_layout(val)
self._tap(val)
self.apps.switch_to_displayed_app()
# Switch keyboard language
# Mapping of language code => {
# "ar":"ﺎﻠﻋﺮﺒﻳﺓ",
# "cz":"Česká",
# "de":"Deutsch",
# "el":"Greek"
# "en":"English",
# "en-Dvorak":"Dvorak",
# "es":"Español",
# "fr":"français",
# "he":"עִבְרִית",
# "nb":"Norsk",
# "pt_BR":"Português",
# "pl":"polski",
# "ru":"русский",
# "sk":"Slovenčina",
# "sr-Cyrl":"српска ћирилица",
# "sr-Latn":"srpski",
# "tr":"Türkçe"}
def switch_keyboard_language(self, lang_code):
# TODO At the moment this doesn't work because the UI has changed
# An attempted repair ran into https://bugzilla.mozilla.org/show_bug.cgi?id=779284 (Modal dialog)
keyboard_language_locator = (By.CSS_SELECTOR, ".keyboard-row button[data-keyboard='%s']" % lang_code)
self.switch_to_keyboard()
language_key = self.marionette.find_element(*self._language_key_locator)
action = Actions(self.marionette)
action.press(language_key).wait(1).perform()
target_kb_layout = self.marionette.find_element(*keyboard_language_locator)
action.move(target_kb_layout).release().perform()
self.apps.switch_to_displayed_app()
def tap_keyboard_language_key(self):
self.switch_to_keyboard()
self.wait_for_element_displayed(*self._language_key_locator)
self.marionette.find_element(*self._language_key_locator).tap()
self.apps.switch_to_displayed_app()
# switch to keyboard with numbers and special characters
def switch_to_number_keyboard(self):
self.switch_to_keyboard()
self._tap(self._numeric_sign_key)
self.apps.switch_to_displayed_app()
# switch to keyboard with alphabetic keys
def switch_to_alpha_keyboard(self):
self.switch_to_keyboard()
self._tap(self._alpha_key)
self.apps.switch_to_displayed_app()
# following are "5 functions" to substitute finish switch_to_frame()s and tap() for you
def tap_shift(self):
self.switch_to_keyboard()
if self.is_element_present(*self._key_locator(self._alpha_key)):
self._tap(self._alpha_key)
self._tap(self._upper_case_key)
self.apps.switch_to_displayed_app()
def tap_backspace(self):
self.switch_to_keyboard()
backspace = self.marionette.find_element(self._button_locator[0], self._button_locator[1] % (self._backspace_key, self._backspace_key))
backspace.tap()
self.apps.switch_to_displayed_app()
def tap_space(self):
self.switch_to_keyboard()
self._tap(self._space_key)
self.apps.switch_to_displayed_app()
def tap_enter(self):
self.switch_to_keyboard()
self._tap(self._enter_key)
self.apps.switch_to_displayed_app()
def tap_alt(self):
self.switch_to_keyboard()
if self.is_element_present(*self._key_locator(self._numeric_sign_key)):
self._tap(self._numeric_sign_key)
self._tap(self._alt_key)
self.apps.switch_to_displayed_app()
def tap_dotcom(self):
self.switch_to_keyboard()
dotcom = self.marionette.find_element(*self._dotcom_key_locator)
dotcom.tap()
self.apps.switch_to_displayed_app()
def dismiss(self):
self.marionette.switch_to_frame()
self.marionette.execute_script('navigator.mozInputMethod.removeFocus();')
keyboards = self.marionette.find_element(*self._keyboards_locator)
Wait(self.marionette).until(
lambda m: 'hide' in keyboards.get_attribute('class') and
not keyboards.is_displayed(),
message="Keyboard was not dismissed. Debug is_displayed(): %s, class: %s."
%(keyboards.is_displayed(), keyboards.get_attribute('class')))
self.apps.switch_to_displayed_app()
def tap_first_predictive_word(self):
self.switch_to_keyboard()
self.wait_for_element_displayed(*self._predicted_word_locator)
self.marionette.find_element(*self._predicted_word_locator).tap()
self.apps.switch_to_displayed_app()
|
# Copyright 2018 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test comet_handler"""
from datetime import datetime, timedelta
from unittest import mock
import json
from freezegun import freeze_time
from marshmallow import fields, Schema
from comet_core import Comet
from comet_core.app import EventContainer
from comet_core.model import EventRecord
# Helpers for test_process_unsent_events_recipient_override
RECORD_OWNER = 'not-this-one@test.com'
OVERRIDE_OWNER = 'override@test.com'
class TheTestSchema(Schema):
"""Testing schema."""
test = fields.Str(required=True)
class TheTestType(): # pylint: disable=abstract-method
"""Testing type."""
schema = TheTestSchema
config = {
'owner_reminder_cadence': timedelta(days=7)
}
def set_record(self, record):
self.record = record
self.record.owner_email = RECORD_OWNER
self.record.fingerprint = 'test'
EVENT_RECORD_WITH_OVERRIDE = EventRecord(received_at=datetime(2018, 2, 19, 0, 0, 11),
source_type='test',
data={'test': 'test'})
@freeze_time('2018-05-09 09:00:00')
# pylint: disable=missing-docstring
def test_process_unprocessed_events():
app = Comet()
app.register_parser('datastoretest', json)
app.register_parser('datastoretest2', json)
app.register_parser('datastoretest3', json)
specific_router = mock.Mock()
router = mock.Mock()
escalator = mock.Mock()
app.register_router('datastoretest2', func=specific_router)
app.register_router(func=router)
app.register_escalator(func=escalator)
check_user = 'an_owner'
already_processed_user = 'already_processed_owner'
app.data_store.add_record(
EventRecord(id=1,
received_at=datetime.utcnow() - timedelta(days=5),
source_type='datastoretest',
owner=already_processed_user,
data={},
processed_at=datetime.utcnow() - timedelta(days=5),
fingerprint='f1'))
app.data_store.add_record(
EventRecord(id=2,
received_at=datetime.utcnow() - timedelta(days=4),
source_type='datastoretest',
owner=already_processed_user,
data={},
fingerprint='f1'))
app.data_store.add_record(
EventRecord(id=3,
received_at=datetime.utcnow() - timedelta(days=3),
source_type='datastoretest2',
owner=check_user,
data={},
fingerprint='f3'))
app.data_store.add_record(
EventRecord(id=4,
received_at=datetime.utcnow() - timedelta(days=3),
source_type='datastoretest3',
owner=check_user,
data={},
fingerprint='f4'))
app.process_unprocessed_events()
assert specific_router.call_count == 1
assert router.call_count == 2
assert router.call_args[0][2][0].owner == check_user
assert escalator.call_count == 3
def test_event_container():
container = EventContainer('test', {})
container.set_owner('testowner')
container.set_fingerprint('testfp')
container.set_metadata({'a': 'b'})
record = container.get_record()
assert record.owner == 'testowner'
assert record.fingerprint == 'testfp'
assert 'a' in record.event_metadata
def test_message_callback(app):
@app.register_parser('test')
class TestParser:
def loads(self, msg):
ev = json.loads(msg)
if 'a' in ev:
return ev, None
return None, 'fail'
hydrator_mock = mock.Mock()
app.register_hydrator('test', hydrator_mock)
assert not app.message_callback('test1', '{}')
assert not app.message_callback('test', '{ "c": "d" }')
app.message_callback('test', '{ "a": "b" }')
assert hydrator_mock.called
def test_register_input(app):
assert not app.inputs
@app.register_input(a='b', c='d')
class TestInput:
pass
app.register_input(TestInput)
assert len(app.inputs) == 2
def test_register_parser(app):
assert not app.parsers
@app.register_parser('test1')
class TestParser:
pass
# Override existing
app.register_parser('test1', TestParser)
assert len(app.parsers) == 1
app.register_parser('test2', TestParser)
assert len(app.parsers) == 2
def test_register_hydrator(app):
assert not app.hydrators
@app.register_hydrator('test1')
def test_hydrator(*args):
pass
# Override existing
app.register_hydrator('test1', test_hydrator)
assert len(app.hydrators) == 1, app.hydrators
# Add another
app.register_hydrator('test2', test_hydrator)
assert len(app.hydrators) == 2, app.hydrators
def test_set_config(app):
assert not app.specific_configs
app.set_config('test1', {})
app.set_config('test2', {})
assert len(app.specific_configs) == 2
def test_register_router(app):
assert not app.routers.func_count()
@app.register_router()
def test_router(*args):
pass
app.register_router(func=test_router)
assert app.routers.func_count() == 2
@app.register_router('test1')
def test_router2(*args):
pass
app.register_router('test1', test_router2)
assert len(list(app.routers.for_source_type('test1'))) == 4 # 2 global, 2 specific
assert len(list(app.routers.for_source_type('test2'))) == 2 # 2 global, 0 specific
app.register_router('test2', test_router2)
assert len(list(app.routers.for_source_type('test2'))) == 3 # 2 global, 1 specific
app.register_router(['test1', 'test2'], test_router2)
assert len(list(app.routers.for_source_type('test1'))) == 5 # 2 global, 3 specific
assert len(list(app.routers.for_source_type('test2'))) == 4 # 2 global, 2 specific
def test_register_escalator(app):
assert not app.escalators.func_count()
@app.register_escalator()
def test_escalator(*args):
pass
assert app.escalators.func_count()
def test_validate_config(app):
@app.register_parser('test1')
class TestParser:
pass
assert app.parsers
app.validate_config()
assert not app.parsers
app = Comet()
app.register_parser('test1', TestParser)
@app.register_router('test1')
def test_router(*args):
pass
app.validate_config()
assert app.parsers
def test_start_stop_inputs(app):
class TestInput:
__init__ = mock.Mock(return_value=None)
stop = mock.Mock()
app.register_input(TestInput, a='b')
assert not TestInput.__init__.called
app.start_inputs()
assert TestInput.__init__.called
assert 'a' in TestInput.__init__.call_args[1]
app.stop()
assert TestInput.stop.called
def test_run(app):
def f(*args):
app.running = False
app.process_unprocessed_events = mock.Mock()
with mock.patch("time.sleep") as mocked_sleep:
mocked_sleep.side_effect = f
app.run()
mocked_sleep.assert_called_once()
app.process_unprocessed_events.assert_called_once()
|
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from libtbx import adopt_init_args
import mmtbx.refinement.real_space
from mmtbx.refinement.real_space import individual_sites
import math, sys
from cctbx import maptbx
import scitbx.math
import mmtbx.idealized_aa_residues.rotamer_manager
import collections
from libtbx import group_args
import boost_adaptbx.boost.python as bp
from six.moves import range
ext = bp.import_ext("mmtbx_rotamer_fit_ext")
def flatten(l):
if l is None: return None
return sum(([x] if not (isinstance(x, list) or isinstance(x, flex.size_t))
else flatten(x) for x in l), [])
###
# TODO: do not fit residues whose side chains are involced into bonds!
###
class monitor(object):
def __init__(self, id_str, selection, map_data, unit_cell, weights, pairs,
cmv, rotamer_evaluator, log):
adopt_init_args(self, locals())
self.states = collections.OrderedDict()
def add(self, residue, state):
vals = collections.OrderedDict()
target = 0
target_neg = 0
exceed_map_max_value = False
for i in self.selection:
atom = residue.atoms()[i]
key = "%s_%s_%s"%(
atom.parent().parent().parent().id, atom.parent().resname,
atom.name.strip())
name = atom.name.strip().upper()
element = atom.element.strip().upper()
if(element in ["H","D"]): continue
mv = self.map_data.eight_point_interpolation(
self.unit_cell.fractionalize(atom.xyz))
vals[name] = mv
if(mv > self.cmv[key]*3 and not element in ["S","SE"]):
exceed_map_max_value = True
target += mv
if(mv < 0): target_neg += mv
#
rot = self.rotamer_evaluator.evaluate_residue(residue)
self.states[state] = group_args(
vals = vals,
sites_cart = residue.atoms().extract_xyz(),
target = target,
target_neg = target_neg,
exceed_map_max_value = exceed_map_max_value,
rot = rot)
def finalize(self, residue):
if(len(self.states.keys())==1 or self.selection.size()==0): return
state = "start"
S = self.states["start"]
F = self.states["fitting"]
T = self.states["tuneup"]
#
if((S.rot=="OUTLIER" and F.rot!="OUTLIER" and not F.exceed_map_max_value) or
(F.target>S.target and F.target_neg>=S.target_neg and not F.exceed_map_max_value) or
(F.target_neg>S.target_neg) or
(S.exceed_map_max_value and not F.exceed_map_max_value)):
state = "fitting"
N = self.states[state]
if((N.rot=="OUTLIER" and T.rot!="OUTLIER" and not T.exceed_map_max_value) or
(T.target>N.target and T.target_neg>=S.target_neg and not T.exceed_map_max_value) or
(T.target_neg>N.target_neg) or
(N.exceed_map_max_value and not T.exceed_map_max_value)):
state = "tuneup"
#
residue.atoms().set_xyz(self.states[state].sites_cart)
#
if(state != "tuneup"):
self.add(residue = residue, state = "revert")
#
def show(self):
if(len(self.states.keys())==1 or self.selection.size()==0): return
print(self.id_str, file=self.log)
for k,v in zip(self.states.keys(), self.states.values()):
vals = " ".join(["%s: %5.2f"%(k_, v_)
for k_,v_ in zip(v.vals.keys(), v.vals.values())])
print(" %7s: score: %7.3f %s %s"%(k, v.target, vals, v.rot), file=self.log)
class run(object):
def __init__(self,
residue,
mon_lib_srv,
rotamer_manager,
sin_cos_table,
cmv,
unit_cell,
rotatable_hd=None,
vdw_radii=None,
xyzrad_bumpers=None,
target_map=None,
target_map_for_cb=None,
backbone_sample=False,
accept_only_if_max_shift_is_smaller_than=None,
log=None):
adopt_init_args(self, locals())
if(self.log is None): self.log = sys.stdout
self.co = mmtbx.refinement.real_space.aa_residue_axes_and_clusters(
residue = self.residue,
mon_lib_srv = self.mon_lib_srv,
backbone_sample = True,
log = self.log)
self.m = None
if(self.target_map is not None and len(self.co.clusters)>0):
# Set weights
AN = {"S":16, "O":8, "N":7, "C":6, "SE":34, "H":1, "D":5}
#AN = {"S":1, "O":1, "N":1, "C":1, "SE":1, "H":1}
self.weights = flex.double()
for atom in self.residue.atoms():
self.weights.append(AN[atom.element.strip().upper()])
# Bonded pairs
exclude = ["C","N","O","CA"]
reference = exclude + ["CB"]
atoms = self.residue.atoms()
self.pairs = []
for i, ai in enumerate(atoms):
if(ai.name.strip() in reference):
mv = self.target_map.eight_point_interpolation(
self.unit_cell.fractionalize(ai.xyz))
if(ai.name.strip() in exclude): continue
if(ai.element.strip().upper() in ["H","S","SE"]): continue
for j, aj in enumerate(atoms):
if i==j: continue
if(aj.name.strip() in exclude): continue
if(aj.element.strip().upper() in ["H","S","SE"]): continue
d = ai.distance(aj)
if d < 1.6:
pair = [i,j]
pair.sort()
if(not pair in self.pairs):
self.pairs.append(pair)
# Set monitor
id_str=""
if(self.residue.parent() is not None and
self.residue.parent().parent() is not None):
id_str+="chain: %s"%(self.residue.parent().parent().id)
id_str+=" residue: %s %s"%(self.residue.resname, self.residue.resseq.strip())
if(len(self.co.clusters)>1):
msel = flex.size_t(flatten(self.co.clusters[1:][0].vector))
else:
msel = flex.size_t()
self.m = monitor(
id_str = id_str,
selection = msel,
map_data = self.target_map,
unit_cell = self.unit_cell,
weights = self.weights,
pairs = self.pairs,
cmv = self.cmv,
rotamer_evaluator = self.rotamer_manager.rotamer_evaluator,
log = self.log)
self.m.add(residue = self.residue, state = "start")
if(self.target_map is None):
assert not backbone_sample
# Actual calculations
self.chi_angles = self.rotamer_manager.get_chi_angles(
resname = self.residue.resname)
if(len(self.co.clusters)>0):
if(backbone_sample):
self.fit_c_beta(c_beta_rotation_cluster = self.co.clusters[0])
self.fit_side_chain(clusters = self.co.clusters[1:])
if(self.m is not None):
self.m.finalize(residue = self.residue)
# Too bulky, but very useful. Use for debugging only.
#self.m.show()
def get_target_value(self, sites_cart, selection=None, target_map=None):
if(target_map is None): target_map = self.target_map
if(selection is None):
return maptbx.real_space_target_simple(
unit_cell = self.unit_cell,
density_map = target_map,
sites_cart = sites_cart)
else:
return maptbx.real_space_target_simple(
unit_cell = self.unit_cell,
density_map = target_map,
sites_cart = sites_cart,
selection = selection)
def fit_side_chain(self, clusters):
rotamer_iterator = \
mmtbx.refinement.real_space.fit_residue.get_rotamer_iterator(
mon_lib_srv = self.mon_lib_srv,
residue = self.residue)
if(rotamer_iterator is None): return
selection_clash = self.co.clash_eval_selection
selection_rsr = self.co.rsr_eval_selection
if(self.target_map is not None):
start_target_value = self.get_target_value(
sites_cart = self.residue.atoms().extract_xyz(),
selection = selection_rsr)
sites_cart_start = self.residue.atoms().extract_xyz()
sites_cart_first_rotamer = list(rotamer_iterator)[0][1]
# From this point on the coordinates in residue are to initial rotamer!
self.residue.atoms().set_xyz(sites_cart_first_rotamer)
axes = []
atr = []
for i, angle in enumerate(self.chi_angles[0]):
cl = clusters[i]
axes.append(flex.size_t(cl.axis))
atr.append(flex.size_t(cl.atoms_to_rotate))
#
if(self.target_map is not None and self.xyzrad_bumpers is not None):
# Get reference map values
ref_map_vals = flex.double()
for a in self.residue.atoms():
key = "%s_%s_%s"%(
a.parent().parent().parent().id, a.parent().resname,
a.name.strip())
ref_map_vals.append(self.cmv[key])
# Get radii
radii = mmtbx.refinement.real_space.get_radii(
residue = self.residue, vdw_radii = self.vdw_radii)
# Exclude rotatable H from clash calculation
tmp = flex.size_t()
for i in selection_clash:
if(self.rotatable_hd[self.residue.atoms()[i].i_seq]): continue
tmp.append(i)
selection_clash = tmp[:]
# Ad hoc: S or SE have larger peaks!
if(self.residue.resname in ["MET","MSE"]): scale=100
else: scale=3
moving = ext.moving(
sites_cart = self.residue.atoms().extract_xyz(),
sites_cart_start = sites_cart_start,
radii = radii,
weights = self.weights,
bonded_pairs = self.pairs,
ref_map_max = ref_map_vals * scale,
ref_map_min = ref_map_vals / 10)
#
ro = ext.fit(
fixed = self.xyzrad_bumpers,
axes = axes,
rotatable_points_indices = atr,
angles_array = self.chi_angles,
density_map = self.target_map,
moving = moving,
unit_cell = self.unit_cell,
selection_clash = selection_clash,
selection_rsr = selection_rsr, # select atoms to compute map target
sin_table = self.sin_cos_table.sin_table,
cos_table = self.sin_cos_table.cos_table,
step = self.sin_cos_table.step,
n = self.sin_cos_table.n)
elif(self.target_map is not None and self.xyzrad_bumpers is None):
ro = ext.fit(
target_value = start_target_value,
axes = axes,
rotatable_points_indices = atr,
angles_array = self.chi_angles,
density_map = self.target_map,
all_points = self.residue.atoms().extract_xyz(),
unit_cell = self.unit_cell,
selection = selection_rsr,
sin_table = self.sin_cos_table.sin_table,
cos_table = self.sin_cos_table.cos_table,
step = self.sin_cos_table.step,
n = self.sin_cos_table.n)
else:
ro = ext.fit(
sites_cart_start = sites_cart_start.deep_copy(),
axes = axes,
rotatable_points_indices = atr,
angles_array = self.chi_angles,
all_points = self.residue.atoms().extract_xyz(),
sin_table = self.sin_cos_table.sin_table,
cos_table = self.sin_cos_table.cos_table,
step = self.sin_cos_table.step,
n = self.sin_cos_table.n)
sites_cart_result = ro.result()
if(sites_cart_result.size()>0):
dist = None
if(self.accept_only_if_max_shift_is_smaller_than is not None):
dist = flex.max(flex.sqrt((sites_cart_start - sites_cart_result).dot()))
if(dist is None):
self.residue.atoms().set_xyz(sites_cart_result)
else:
if(dist is not None and
dist < self.accept_only_if_max_shift_is_smaller_than):
self.residue.atoms().set_xyz(sites_cart_result)
else:
self.residue.atoms().set_xyz(sites_cart_start)
else:
self.residue.atoms().set_xyz(sites_cart_start)
if(self.m): self.m.add(residue = self.residue, state = "fitting")
# # tune up
if(self.target_map is not None):
tune_up(
target_map = self.target_map,
residue = self.residue,
mon_lib_srv = self.mon_lib_srv,
rotamer_manager = self.rotamer_manager.rotamer_evaluator,
unit_cell = self.unit_cell,
monitor = self.m,
torsion_search_start = -30,
torsion_search_stop = 30,
torsion_search_step = 1)
def fit_c_beta(self, c_beta_rotation_cluster):
selection = flex.size_t(c_beta_rotation_cluster.selection)
sites_cart = self.residue.atoms().extract_xyz()
sites_cart_start = sites_cart.deep_copy() # XXX
start_target_value = self.get_target_value(
sites_cart = sites_cart,
selection = selection,
target_map = self.target_map_for_cb)
ro = ext.fit(
target_value = start_target_value+1.e-6,
axes = [c_beta_rotation_cluster.axis],
rotatable_points_indices = [c_beta_rotation_cluster.atoms_to_rotate],
angles_array = [[i*math.pi/180] for i in range(-20,21,1)],
density_map = self.target_map_for_cb,
all_points = sites_cart,
unit_cell = self.unit_cell,
selection = selection,
sin_table = self.sin_cos_table.sin_table,
cos_table = self.sin_cos_table.cos_table,
step = self.sin_cos_table.step,
n = self.sin_cos_table.n)
sites_cart_result = ro.result()
if(sites_cart_result.size()>0):
self.residue.atoms().set_xyz(sites_cart_result)
else:
self.residue.atoms().set_xyz(sites_cart_start)
class run_with_minimization(object):
def __init__(self,
target_map,
residue,
vdw_radii,
xray_structure,
mon_lib_srv,
rotamer_manager,
# This is cctbx.geometry_restraints.manager.manager
geometry_restraints_manager,
real_space_gradients_delta,
selection_radius = 5,
rms_bonds_limit = 0.03, # XXX probably needs to be much lower
rms_angles_limit = 3.0, # XXX
backbone_sample_angle=None,
cmv = None,
allow_modified_residues=False):
adopt_init_args(self, locals())
# load rotamer manager
self.rotamer_manager = mmtbx.idealized_aa_residues.rotamer_manager.load(
rotamers="favored")
# pre-compute sin and cos tables
self.sin_cos_table = scitbx.math.sin_cos_table(n=10000)
self.backbone_atom_names = ["N", "CA", "O", "CB", "C"]
self.residue_iselection = self.residue.atoms().extract_i_seq()
assert (not self.residue_iselection.all_eq(0))
self.residue_selection = flex.bool(
xray_structure.scatterers().size(), self.residue_iselection)
self.residue_backbone_selection = flex.size_t()
for atom in self.residue.atoms():
if(atom.name.strip() in self.backbone_atom_names):
self.residue_backbone_selection.append(atom.i_seq)
self.residue_backbone_selection = flex.bool(
xray_structure.scatterers().size(), self.residue_backbone_selection)
self.target_map_work = target_map
self.target_map_orig = target_map.deep_copy()
self.fit_backbone()
negate_selection = mmtbx.refinement.real_space.selection_around_to_negate(
xray_structure = self.xray_structure,
selection_within_radius = self.selection_radius,
iselection = self.residue.atoms().extract_i_seq())
self.target_map_work = mmtbx.refinement.real_space.\
negate_map_around_selected_atoms_except_selected_atoms(
xray_structure = self.xray_structure,
map_data = target_map,
negate_selection = negate_selection,
atom_radius = 1.5)
self.fit_rotamers()
def fit_backbone(self):
# move in place (pure geometry regularizaition of residue in question)
self.real_space_refine(optimize_weight=False, start_trial_weight_value=0)
# fit n-c-o-ca-cb only (ignore side chain!). XXX BAD: amino-acid specific!
self.grid_sample_around_c_n_axis()
# fine-tune
self.real_space_refine(optimize_weight=True, start_trial_weight_value=50)
def fit_rotamers(self):
sps = self.xray_structure.special_position_settings()
mmtbx.refinement.real_space.fit_residue.run(
vdw_radii = self.vdw_radii,
target_map = self.target_map_work,
target_map_for_cb = self.target_map_orig,
mon_lib_srv = self.mon_lib_srv,
unit_cell = self.xray_structure.unit_cell(),
residue = self.residue,
sin_cos_table = self.sin_cos_table,
cmv = self.cmv,
rotamer_manager = self.rotamer_manager)
sites_cart_poor = self.xray_structure.sites_cart()
sites_cart_poor.set_selected(self.residue_iselection,
self.residue.atoms().extract_xyz())
self.xray_structure= self.xray_structure.replace_sites_cart(sites_cart_poor)
def grid_sample_around_c_n_axis(self):
sps = self.xray_structure.special_position_settings()
scorer = mmtbx.refinement.real_space.score(
target_map = self.target_map_work,
residue = self.residue,
unit_cell = self.xray_structure.unit_cell())
def get_cluster(self):
axis=[]
atoms_to_rotate=[]
use_in_target_selection = flex.size_t()
counter = 0
for atom in self.residue.atoms():
if(atom.name.strip() in ["N", "C"]):
axis.append(counter)
else:
atoms_to_rotate.append(counter)
if(atom.name.strip() in self.backbone_atom_names):
use_in_target_selection.append(counter)
counter += 1
return mmtbx.refinement.real_space.cluster(
axis = axis,
atoms_to_rotate = atoms_to_rotate,
selection = use_in_target_selection)
cl = get_cluster(self)
residue_sites_cart = self.residue.atoms().extract_xyz()
scorer.reset(
sites_cart = residue_sites_cart,
selection = cl.selection)
angle_start = 0
angle_end = 360
if (self.backbone_sample_angle is not None):
assert (self.backbone_sample_angle > 0)
angle_start = - self.backbone_sample_angle
angle_end = self.backbone_sample_angle
mmtbx.refinement.real_space.torsion_search(
clusters = [cl],
sites_cart = residue_sites_cart,
scorer = scorer,
start = 0,
stop = 360,
step = 1)
self.residue.atoms().set_xyz(new_xyz=scorer.sites_cart)
selection = self.residue.atoms().extract_i_seq()
sites_cart_poor = self.xray_structure.sites_cart()
sites_cart_poor.set_selected(selection, scorer.sites_cart)
self.xray_structure= self.xray_structure.replace_sites_cart(sites_cart_poor)
def real_space_refine(self, optimize_weight, start_trial_weight_value):
brm = individual_sites.box_refinement_manager(
xray_structure = self.xray_structure,
target_map = self.target_map_work,
geometry_restraints_manager = self.geometry_restraints_manager,
real_space_gradients_delta = 1./4,
max_iterations = 500)
brm.refine(
selection = self.residue_selection,
optimize_weight = optimize_weight,
start_trial_weight_value = start_trial_weight_value,
selection_buffer_radius = self.selection_radius,
box_cushion = 2,
rms_bonds_limit = self.rms_bonds_limit,
rms_angles_limit = self.rms_angles_limit)
self.xray_structure = brm.xray_structure
self.residue.atoms().set_xyz(brm.sites_cart.select(self.residue_iselection))
def get_rotamer_iterator(mon_lib_srv, residue):
rotamer_iterator = mon_lib_srv.rotamer_iterator(
fine_sampling = True,
comp_id=residue.resname,
atom_names=residue.atoms().extract_name(),
sites_cart=residue.atoms().extract_xyz())
if (rotamer_iterator is None):
return None
if (rotamer_iterator.problem_message is not None):
return None
if (rotamer_iterator.rotamer_info is None):
return None
return rotamer_iterator
class tune_up(object):
def __init__(self,
target_map,
residue,
mon_lib_srv,
rotamer_manager,
unit_cell,
monitor = None,
torsion_search_start = -20,
torsion_search_stop = 20,
torsion_search_step = 2):
adopt_init_args(self, locals())
self.clusters = mmtbx.refinement.real_space.aa_residue_axes_and_clusters(
residue = self.residue,
mon_lib_srv = self.mon_lib_srv,
backbone_sample = False).clusters
score_residue = mmtbx.refinement.real_space.score3(
unit_cell = self.unit_cell,
target_map = self.target_map,
residue = self.residue,
rotamer_eval = self.rotamer_manager)
mmtbx.refinement.real_space.torsion_search(
clusters = self.clusters,
sites_cart = self.residue.atoms().extract_xyz(),
scorer = score_residue,
start = self.torsion_search_start,
stop = self.torsion_search_stop,
step = self.torsion_search_step)
self.residue.atoms().set_xyz(new_xyz=score_residue.sites_cart)
if(monitor is not None):
monitor.add(residue = self.residue, state = "tuneup")
#
# These functions are not used anywhere. And not tested anymore.
# They are here as an example of correct backrub move, according to
# original paper https://doi.org/10.1016/j.str.2005.10.007
# Unfortunately, for proper backrub move we need previous and next residues,
# but current code is build under assumption that one residue is enough for
# rotamer fitting. One will have to reconsider this idea and do some changes
# to make it possible to do proper backrub move.
#
def _find_theta(ap1, ap2, cur_xyz, needed_xyz):
from mmtbx.building.loop_closure.ccd import ccd_python
f, s_home, r_norm, r_home = ccd_python._get_f_r_s(
axis_point_1=ap1,
axis_point_2=ap2,
moving_coor=cur_xyz,
fixed_coor=needed_xyz)
b = list(2*r_norm*(f.dot(r_home)))[0]
c = list(2*r_norm*(f.dot(s_home)))[0]
znam = math.sqrt(b*b+c*c)
sin_alpha = c/znam
cos_alpha = b/znam
alpha = math.atan2(sin_alpha, cos_alpha)
return math.degrees(alpha)
def backrub_move(
prev_res,
cur_res,
next_res,
angle,
move_oxygens=False,
accept_worse_rama=False,
rotamer_manager=None,
rama_manager=None):
import boost_adaptbx.boost.python as bp
ext = bp.import_ext("mmtbx_validation_ramachandran_ext")
from mmtbx_validation_ramachandran_ext import rama_eval
from scitbx.matrix import rotate_point_around_axis
from mmtbx.conformation_dependent_library.multi_residue_class import ThreeProteinResidues, \
RestraintsRegistry
if abs(angle) < 1e-4:
return
if prev_res is None or next_res is None:
return
saved_res = [{},{},{}]
for i, r in enumerate([prev_res, cur_res, next_res]):
for a in r.atoms():
saved_res[i][a.name.strip()] = a.xyz
if rotamer_manager is None:
rotamer_manager = RotamerEval()
prev_ca = prev_res.find_atom_by(name=" CA ")
cur_ca = cur_res.find_atom_by(name=" CA ")
next_ca = next_res.find_atom_by(name=" CA ")
if prev_ca is None or next_ca is None or cur_ca is None:
return
atoms_to_move = []
atoms_to_move.append(prev_res.find_atom_by(name=" C "))
atoms_to_move.append(prev_res.find_atom_by(name=" O "))
for atom in cur_res.atoms():
atoms_to_move.append(atom)
atoms_to_move.append(next_res.find_atom_by(name=" N "))
for atom in atoms_to_move:
assert atom is not None
new_xyz = rotate_point_around_axis(
axis_point_1 = prev_ca.xyz,
axis_point_2 = next_ca.xyz,
point = atom.xyz,
angle = angle,
deg = True)
atom.xyz = new_xyz
if move_oxygens:
registry = RestraintsRegistry()
if rama_manager is None:
rama_manager = rama_eval()
tpr = ThreeProteinResidues(geometry=None, registry=registry)
tpr.append(prev_res)
tpr.append(cur_res)
tpr.append(next_res)
phi_psi_angles = tpr.get_phi_psi_angles()
rama_key = tpr.get_ramalyze_key()
ev_before = rama_manager.evaluate_angles(rama_key, phi_psi_angles[0], phi_psi_angles[1])
theta1 = _find_theta(
ap1 = prev_ca.xyz,
ap2 = cur_ca.xyz,
cur_xyz = prev_res.find_atom_by(name=" O ").xyz,
needed_xyz = saved_res[0]["O"])
theta2 = _find_theta(
ap1 = cur_ca.xyz,
ap2 = next_ca.xyz,
cur_xyz = cur_res.find_atom_by(name=" O ").xyz,
needed_xyz = saved_res[1]["O"])
for a in [prev_res.find_atom_by(name=" C "),
prev_res.find_atom_by(name=" O "),
cur_res.find_atom_by(name=" C ")]:
new_xyz = rotate_point_around_axis(
axis_point_1 = prev_ca.xyz,
axis_point_2 = cur_ca.xyz,
point = a.xyz,
angle = theta1,
deg = True)
a.xyz = new_xyz
for a in [cur_res.find_atom_by(name=" C "),
cur_res.find_atom_by(name=" O "),
next_res.find_atom_by(name=" N ")]:
new_xyz = rotate_point_around_axis(
axis_point_1 = cur_ca.xyz,
axis_point_2 = next_ca.xyz,
point = a.xyz,
angle = theta2,
deg = True)
a.xyz = new_xyz
phi_psi_angles = tpr.get_phi_psi_angles()
rama_key = tpr.get_ramalyze_key()
ev_after = rama_manager.evaluate_angles(rama_key, phi_psi_angles[0], phi_psi_angles[1])
if ev_before > ev_after and not accept_worse_rama:
for a in [prev_res.find_atom_by(name=" C "),
prev_res.find_atom_by(name=" O "),
cur_res.find_atom_by(name=" C ")]:
new_xyz = rotate_point_around_axis(
axis_point_1 = prev_ca.xyz,
axis_point_2 = cur_ca.xyz,
point = a.xyz,
angle = -theta1,
deg = True)
a.xyz = new_xyz
for a in [cur_res.find_atom_by(name=" C "),
cur_res.find_atom_by(name=" O "),
next_res.find_atom_by(name=" N ")]:
new_xyz = rotate_point_around_axis(
axis_point_1 = cur_ca.xyz,
axis_point_2 = next_ca.xyz,
point = a.xyz,
angle = -theta2,
deg = True)
a.xyz = new_xyz
|
"""Default model package."""
import base64
import os
from arduinozore.settings import path
from yaml import SafeLoader
from yaml import YAMLObject
from yaml import dump
from yaml import safe_load
class Model(YAMLObject):
"""Model default class."""
yaml_tag = u'!Model'
yaml_loader = SafeLoader
def __init__(self, name):
"""Init model."""
self.name = name
def __repr__(self):
"""Represent model in order to save it."""
return "%s(name=%r)" % (self.__class__.__name__, self.name)
@classmethod
def load_yaml(cls, folder, filename):
"""Load yaml from file."""
try:
with open(path(folder, filename), 'r') as f:
model = safe_load(f)
except (FileExistsError, FileNotFoundError):
model = None
return model
def save_yaml(self, folder):
"""Save model to file."""
config_file = self.get_filename()
config_file = path(folder, config_file)
with open(config_file, 'w') as f:
d = dump(self, default_flow_style=False,
allow_unicode=True, encoding=None)
f.write(d)
def get_filename(self):
"""Get filename to save."""
return __class__.filenamify(self.name) + ".yaml"
def _delete(self, folder):
"""Delete model file."""
os.remove(path(folder, self.get_filename()))
@classmethod
def filenamify(cls, name):
"""Return filename base64 encoded from filename."""
return base64.urlsafe_b64encode(name.encode('UTF-8')).decode()
@classmethod
def unfilenamify(cls, filename):
"""Return filename base64 decoded from filename."""
return base64.urlsafe_b64decode(filename.encode()).decode('UTF-8')
@classmethod
def _get_all(cls, folder):
"""Get all models configurations."""
models = list()
if not os.path.exists(folder):
os.makedirs(folder)
config_files = os.listdir(folder)
if config_files is not None and len(config_files) > 0:
for config_file in config_files:
model = cls.load_yaml(folder, config_file)
models.append(model)
else:
models = None
return models
@classmethod
def _get(cls, name, folder):
"""Get model by name."""
try:
model = cls.load_yaml(folder, cls.filenamify(name) + ".yaml")
except Exception:
model = None
return model
|
#!/usr/bin/env python
"""
This program takes a command line argument n (positive integer) and prints the nth fibonacci number.
"""
#
### INSTRUCTOR COMMENT:
# See the sequences file for my main comments, which are repeated here.
# Also, imports should be done at the top of the file, ideally, except for
# specific imports needed only for the __main__ block, which should be at the
# top of the __main__ block.
#
def main(argv):
"""
This function will execute if fib.py is run from the command line. It prints the nth fibonacci number.
"""
#importing sequences.py as a module
import sequences as seq
#ensuring that there are enough command line arguments
if len(argv)<2:
print "Not enough command line arguments."
from sys import exit
exit(1)
#printing the nth fibonacci number
print seq.fibonacci(int(argv[1]))[-1]
def test_main():
"""
Tests the functioning of main(argv) by executing the program and checking the output string.
"""
from subprocess import get_output
assert check_output(['./fib.py','10'],'r') == '55'
if __name__ == "__main__":
from sys import argv
main(argv)
|
import hashlib
import hmac
import time
class BitfinexAuth:
def __init__(self, api_key: str, secret_key: str):
self.api_key = api_key
self.secret_key = secret_key
def generate_auth_payload(self):
nonce = self._make_nonce()
auth_payload = 'AUTH{nonce}'.format(nonce=nonce)
sig = self._auth_sig(auth_payload)
payload = {
"apiKey": self.api_key,
"authSig": sig,
"authNonce": nonce,
"authPayload": auth_payload,
"event": 'auth',
}
return payload
def generate_api_headers(self, path, body):
"""
Generate headers for a signed payload
"""
nonce = str(self._make_nonce())
signature = "/api/" + path + nonce + body
sig = self._auth_sig(signature)
return {
"bfx-nonce": nonce,
"bfx-apikey": self.api_key,
"bfx-signature": sig,
"content-type": "application/json"
}
# private methods
def _make_nonce(self) -> int:
nonce = int(round(time.time() * 1000000))
return nonce
def _auth_sig(self, auth_payload) -> str:
sig = hmac.new(self.secret_key.encode('utf8'),
auth_payload.encode('utf8'),
hashlib.sha384).hexdigest()
return sig
|
import json
from decimal import Decimal
from os import PathLike
from typing import Any, Tuple, Iterable, Union
import requests
__all__ = [
'tuplify',
'json_loadf',
'json_loadr',
'json_loads',
]
def tuplify(value: Any) -> Tuple:
if value is None:
return ()
if isinstance(value, Tuple):
return value
if isinstance(value, Iterable) and not isinstance(value, str):
return tuple(value)
return value,
def json_loadf(path: Union[str, PathLike]) -> Any:
"""Open and deserialize a JSON file, returning a JSON-compatible Python object."""
with open(path) as f:
return json.load(f, parse_float=Decimal, parse_constant=_parse_invalid_const)
def json_loadr(url: str) -> Any:
"""Fetch and deserialize a remote JSON resource, returning a JSON-compatible Python object."""
r = requests.get(url)
r.raise_for_status()
return r.json(parse_float=Decimal, parse_constant=_parse_invalid_const)
def json_loads(value: str) -> Any:
"""Deserialize a JSON string, returning a JSON-compatible Python object."""
return json.loads(value, parse_float=Decimal, parse_constant=_parse_invalid_const)
def _parse_invalid_const(c):
"""Called when '-Infinity', 'Infinity' or 'NaN' is encountered in the
JSON-encoded input. These JavaScript constants are not strictly allowed
by the JSON data model."""
raise ValueError(f"{c} is not a valid JSON value")
|
import logging
log = logging.getLogger(__name__)
def get_answers_tree(project, snapshot=None):
values = {}
valuesets = {}
# first we loop over all values of this snapshot
# the values are gathered in one nested dict {attribute_id: set_index: collection_index: value}
# additionally all values with an attribute labeled 'id' are collected in a dict {attribute.parent.id: value.text}
for value in project.values.filter(snapshot=snapshot):
if value.attribute:
# put values in a dict labled by the values attibute id, the set_index and the collection_index
if value.attribute.id not in values:
values[value.attribute.id] = {}
if value.set_index not in values[value.attribute.id]:
values[value.attribute.id][value.set_index] = {}
if value.collection_index not in values[value.attribute.id][value.set_index]:
values[value.attribute.id][value.set_index][value.collection_index] = {}
values[value.attribute.id][value.set_index][value.collection_index] = value
# put all values with an attribute labeled 'id' in a valuesets dict labeled by the parent attribute id
if value.attribute.key == 'id':
if value.attribute.parent.id not in valuesets:
valuesets[value.attribute.parent.id] = {}
valuesets[value.attribute.parent.id][value.set_index] = value.text
# then we loop over sections and questionsets to collect questions and answers
sections = []
try:
project.catalog.sections
except AttributeError:
pass
else:
for catalog_section in project.catalog.sections.order_by('order'):
questionsets = []
for catalog_questionset in catalog_section.questionsets.order_by('order'):
if catalog_questionset.attribute and catalog_questionset.is_collection:
questions = []
for catalog_question in catalog_questionset.questions.order_by('order'):
sets = []
# for a questionset collection loop over valuesets
if catalog_questionset.attribute.id in valuesets:
for set_index in valuesets[catalog_questionset.attribute.id]:
valueset = valuesets[catalog_questionset.attribute.id][set_index]
# try to get the values for this question's attribute and set_index
answers = get_answers(values, catalog_question.attribute.id, set_index)
if answers:
sets.append({
'id': valueset,
'answers': answers
})
else:
set_index = 0
while True:
# try to get the values for this question's attribute and set_index
answers = get_answers(values, catalog_question.attribute.id, set_index)
if answers:
sets.append({
'id': '#%i' % set_index,
'answers': answers
})
set_index += 1
else:
break
if sets:
questions.append({
'sets': sets,
'text': catalog_question.text,
'attribute': catalog_question.attribute,
'is_collection': catalog_question.is_collection or catalog_question.widget_type == 'checkbox'
})
if questions:
questionsets.append({
'questions': questions,
'attribute': catalog_questionset.attribute,
'is_collection': True,
})
else:
# # for a questionset loop over questions
questions = []
for catalog_question in catalog_questionset.questions.order_by('order'):
# try to get the values for this question's attribute
answers = get_answers(values, catalog_question.attribute.id)
if answers:
questions.append({
'text': catalog_question.text,
'attribute': catalog_question.attribute,
'answers': answers,
'is_collection': catalog_question.is_collection or catalog_question.widget_type == 'checkbox'
})
if questions:
questionsets.append({
'title': catalog_questionset.title,
'questions': questions,
'attribute': catalog_questionset.attribute,
'is_collection': False
})
if questionsets:
sections.append({
'title': catalog_section.title,
'questionsets': questionsets
})
return {'sections': sections}
def get_answers(values, attribute_id, set_index=0):
answers = []
try:
for collection_index, value in sorted(values[attribute_id][set_index].items()):
answers.append(value.value_and_unit)
except KeyError:
pass
return answers
def is_last_owner(project, user):
# check if user is owner
if user in project.owners:
# check if the user is the last owner
return project.owners.count() <= 1
else:
return False
def save_import_values(project, values, checked):
for value in values:
if value['value'].attribute:
value_key = '{value.attribute.uri}[{value.set_index}][{value.collection_index}]'.format(
value=value['value']
)
if value_key in checked:
current_value = value.get('current')
if current_value is None:
value['value'].project = project
value['value'].save()
else:
# make sure we have the correct value
assert current_value.snapshot is None
assert current_value.attribute == value['value'].attribute
assert current_value.set_index == value['value'].set_index
assert current_value.collection_index == value['value'].collection_index
current_value.text = value['value'].text
current_value.option = value['value'].option
current_value.value_type = value['value'].value_type
current_value.unit = value['value'].unit
current_value.save()
def save_import_snapshot_values(project, snapshots, checked):
for snapshot in snapshots:
snapshot['snapshot'].project = project
snapshot['snapshot'].save(copy_values=False)
for value in snapshot['values']:
if value['value'].attribute:
value_key = '{value.attribute.uri}[{snapshot_index}][{value.set_index}][{value.collection_index}]'.format(
value=value['value'],
snapshot_index=snapshot['index']
)
if value_key in checked:
value['value'].project = project
value['value'].snapshot = snapshot['snapshot']
value['value'].save()
def save_import_tasks(project, tasks):
for task in tasks:
project.tasks.add(task)
def save_import_views(project, views):
for view in views:
project.views.add(view)
|
import json
from .reader import Reader
from .register import add_reader
@add_reader
class JSON(Reader):
extensions = ["json"]
def loads(self, string, **options):
return json.loads(string, **options)
|
from model.contact import Contact
from model.group import Group
import random
def test_del_contact_to_group(app, orm, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name="Test_name", middle_name="Test_middle_name", last_name="Test_last_name",
address="asdasdasd", home_number="123", mobile_number="131231231",
birth_day="3", birth_month="March", birth_year="1996"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test3", header="dsffsd", footer="sggsdgs"))
contacts = db.get_contact_list()
contact = random.choice(contacts)
groups = db.get_group_list()
group = random.choice(groups)
contacts_in_group = orm.get_contacts_in_group(group)
if contacts_in_group == []:
app.contact.add_contact_to_group(contact.id, group.name)
app.contact.del_contact_to_group(contact.id, group.name)
contacts_in_group = orm.get_contacts_in_group(group)
assert contact not in contacts_in_group
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.0)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x3d\xdf\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\xea\x00\x00\x00\x50\x08\x06\x00\x00\x00\xd6\x4b\x36\x9d\
\x00\x00\x20\x00\x49\x44\x41\x54\x78\x9c\xed\x9d\x07\x78\x54\x65\
\xda\x86\x9f\xe9\x33\xe9\x95\x34\x12\x20\xa1\xf7\xae\x62\x43\x8a\
\x05\x54\x6c\xd8\xb0\xef\xda\xd6\xee\xda\xfe\xb5\x77\x2c\xbb\x76\
\x5d\xd7\x75\xed\x65\x41\x54\x44\x91\xa2\x22\x88\x22\xbd\xf7\x1e\
\xd2\x0b\x81\xb4\xe9\x33\xff\xf5\xbc\x53\x08\xc9\xa4\x27\x10\xd6\
\xef\xf6\x9a\x0b\x33\x73\xe6\xcc\x39\x67\x92\xf3\x7c\x6f\xd7\xc4\
\xfc\x72\x83\x4b\xab\xd1\x68\xe1\xc7\xed\xf5\xc0\xe5\x75\xc3\xe3\
\xf5\xc2\x03\x2f\x8e\x11\x5c\x00\xec\xc7\xca\xc1\x2a\x14\x0a\x85\
\xe2\x7f\x12\x13\x00\x7d\x4b\x4f\x4c\x0b\x0d\xb4\x1a\x2d\xf4\x1a\
\x2d\x74\x87\x64\x19\x7a\xab\xc7\xa1\xfb\x1f\xb8\x5a\x06\xff\x43\
\xa1\x50\x28\x14\x8a\x63\x12\x1a\xc7\x1e\xaf\x5b\x8c\xe5\x9a\x68\
\xd5\xd7\xa9\x50\x28\x14\x0a\x45\xc7\x45\x09\xb5\x42\xa1\x50\x28\
\x14\x1d\x18\x25\xd4\x0a\x85\x42\xa1\x50\x74\x60\x94\x50\x2b\x14\
\x0a\x85\x42\xd1\x81\x51\x42\xad\x50\x28\x14\x0a\x45\x07\x46\x09\
\xb5\x42\xa1\x50\x28\x14\x1d\x18\x25\xd4\x0a\x85\x42\xa1\x50\x74\
\x60\x94\x50\x2b\x14\x0a\x85\x42\xd1\x81\x51\x42\xad\x50\x28\x14\
\x0a\x45\x07\x46\x09\xb5\x42\xa1\x50\x28\x14\x1d\x18\x25\xd4\x0a\
\x85\x42\xa1\x50\x74\x60\x94\x50\x2b\x14\x0a\x85\x42\xd1\x81\x69\
\xf1\x94\x0f\x85\x42\xf1\xbf\x83\xf7\xd8\x99\x94\xa7\x50\x74\x58\
\x34\xd0\xb4\xcb\xa1\x51\xa8\x4b\x00\x24\xa8\xaf\x5e\xa1\xf8\x63\
\xa1\xd1\xea\x61\xb3\x1f\x00\x9c\x07\x95\x73\x4d\xa1\x68\x03\x74\
\xfa\x30\x68\xb4\x21\x06\x39\xea\xcc\xd0\x69\x74\xb2\x24\x6e\x09\
\x14\xea\xdd\x4a\xa8\x15\x8a\x3f\x16\x1a\x8d\x0e\x36\x5b\x31\xe2\
\x4c\x09\xb8\xa7\xc7\x95\xc8\x30\xa9\x5b\x80\x42\xd1\x1e\xac\xa8\
\xd8\x85\x97\xb3\x67\x52\xad\xa1\x37\x84\xc1\xeb\x6d\xbe\x58\x2b\
\xd7\xb7\x42\xf1\x07\xc4\xe1\xaa\x86\x45\x67\xc1\xef\xc3\x9f\x42\
\x0f\x4b\xb2\xfa\x15\x50\x28\xda\x89\x29\x49\xa3\xd0\x37\x3c\x0d\
\x37\xac\x7b\x01\xd0\x99\xa0\xd3\x34\xdf\x7b\xa5\xfc\x5d\x0a\xc5\
\x1f\x10\x8f\xbd\x14\x63\x3b\x1d\xa7\x44\x5a\xa1\x38\x02\x5c\x9f\
\x72\x1a\x12\x22\xbb\xc1\xe5\xaa\x6a\xd1\x87\x1d\x15\x8b\xda\xe9\
\x75\xc3\xe3\x76\xf0\x6e\x01\x78\x9c\x35\x5e\xd1\x00\x5a\xbd\x6f\
\xd5\xa1\x35\x42\x2f\x3e\x7d\x85\x42\xd1\xf6\x78\x11\x11\x2a\x96\
\xa6\x50\x28\xda\x85\x48\xad\x09\x25\x5e\x4f\x8b\x76\x7d\x84\x84\
\x5a\x03\xbb\xdb\x06\x38\xca\x7c\xc2\x6c\x88\x44\x98\x29\x0e\x9d\
\x4d\xe9\x88\xd3\x87\x07\xb7\xf2\xc0\x8b\x52\x67\x25\x72\xec\xfb\
\x61\xb7\xef\x87\x9b\xab\x0f\x9d\x19\x30\xc6\xc0\xa4\x35\xb6\x38\
\x10\xaf\x50\x28\xea\xfe\x4d\x3a\x3c\x2e\x75\x55\x14\x8a\x23\x80\
\xcb\xeb\x96\x07\x5a\x98\x15\xde\x8e\x42\xad\x61\xc6\x0a\xec\xcc\
\x2a\x75\x1c\x00\x2c\x9d\x30\x3e\xe9\x64\x8c\x8b\xeb\x8f\x91\x51\
\x3d\xd0\x3b\x2c\x05\xc9\xc6\x98\x3a\xef\xa2\x14\xe7\xda\xf7\x63\
\x53\x55\x2e\x7e\x3b\xb8\x15\xf3\xcb\xd6\xe3\xb7\xb2\x8d\xb0\x57\
\xe7\x03\xe6\x04\x98\x0d\x11\xf0\xb6\x70\x55\xa2\x50\x28\x14\x0a\
\xc5\xb1\x46\xbb\x08\xb5\x64\x94\xba\x2a\x01\x6b\x11\xe2\x23\xb3\
\x70\x63\xe6\x64\x5c\x95\x7c\x0a\x7a\x85\xa5\x36\xfe\x5e\x00\x9d\
\xc5\xda\x8e\xc3\xe9\x71\x03\xf0\x58\xb7\x8b\xb0\xaa\x62\x17\xde\
\xcd\x5b\x80\xb7\xf3\x7e\x80\xad\x32\x1b\x86\xf0\xce\xd0\x69\x34\
\x2d\xca\x9e\x53\x28\x14\x0a\x85\xe2\x58\xa2\x4d\x85\x9a\xc5\xde\
\x5e\x8d\x06\xb6\xea\x5c\x71\x59\xff\xb5\xe7\xb5\x78\xa8\xeb\x85\
\x88\xd1\x87\xb5\x6a\xbf\x43\x23\x33\x31\xb4\x57\x26\xee\xce\x38\
\x1b\x77\x6d\xff\x00\xb3\xf6\x7d\x07\x67\x58\x2a\xcc\xfa\x70\x78\
\xc5\x9d\x70\xf4\xf1\x78\x3d\xd0\x68\x34\x21\x0b\xde\xf9\x9c\x2d\
\x10\x8f\xd7\x1a\x61\xd6\x19\x8f\xd8\x22\xc3\xe5\xf5\x40\xab\xd1\
\x40\xdb\x44\x97\x8b\xd7\xef\xa6\x61\x7e\x40\xfb\x94\xee\x2b\x14\
\x0a\x85\xa2\x39\xb4\x99\x50\x53\x8c\x18\x63\x76\x54\xec\x41\x56\
\x6c\x3f\x7c\xda\xef\x4e\x8c\x8c\xca\x6a\xd3\x2f\x23\xcb\x92\x84\
\x6f\x06\xde\x87\x87\xc2\x33\xf0\xf4\xf6\xf7\x60\x73\xdb\x61\x36\
\xc5\x1d\x55\xb1\xd6\x68\xb4\x70\x79\x5c\x70\xd9\x8a\xe5\x2a\x68\
\xcd\x09\x30\x6a\xf5\x87\xb9\xe7\x79\x5d\x8c\x5a\x03\xc2\xf4\x11\
\xa8\xf6\xd8\xe1\xf1\x7a\xeb\x88\xa0\xef\xfa\x79\xe0\x70\x54\xc8\
\x3b\x34\x86\x08\x89\xcb\xb7\xd4\xcd\x1f\x58\x34\xb9\x1d\xe5\xbe\
\x58\xbf\x21\x1c\x66\x86\x1a\xbc\x9e\x90\x91\x7e\x9e\x87\xd3\xe3\
\x82\xdb\x56\x02\x68\x75\x80\x29\x9e\x1d\x31\x64\xfb\xd6\x20\xde\
\x15\xb7\x15\x70\x94\x03\x1a\x1d\x0c\xc6\x18\xe8\xb4\xda\x46\x17\
\x2a\xf2\x3e\x67\x39\xe0\xac\x00\xb4\x26\xf9\x9e\xf1\x3f\xd0\x41\
\xcb\x77\x5e\x95\x00\x3d\x4e\xfa\x70\x98\x0d\x91\x8d\xfe\xfe\xf2\
\xbb\x71\x7b\xdd\x70\xda\x4a\x01\x6e\x6b\x8c\x81\x59\x1f\xd6\x61\
\x16\xa9\x0a\x85\xa2\x7d\x69\x93\xf2\xac\xa0\x48\x57\xee\xc1\xb8\
\x94\xd1\xd8\x74\xfc\x2b\x6d\x2e\xd2\x35\x79\x2a\xeb\x52\x7c\x3a\
\xe4\x31\xb1\x50\x6d\xf6\x52\xb9\xf9\x1d\x2d\x9c\x1e\x27\x5c\xce\
\x4a\xbc\xda\xff\x4e\x5c\xd3\x75\x12\xbc\x6e\x9b\xdc\x54\x03\xd0\
\x3a\x75\x38\xca\xf0\x5a\xf7\xab\xb1\xfd\xb8\x17\xf1\x48\x97\xf3\
\xe1\xb0\x15\x89\xe0\xd4\x94\x1c\x37\x2d\x5f\x68\xd1\x35\x22\x03\
\x3d\xa2\xb2\x10\xa9\x33\xc3\xdd\x8a\x64\x1f\x17\xbc\x92\x90\xf7\
\x78\x8f\x2b\xf0\x42\xbf\xdb\x91\x15\x9e\x01\xa7\xdb\x81\xd0\x32\
\x0d\x39\x66\x1e\xfb\x94\x8c\xb3\xf1\xc9\xc0\x07\xe0\x70\xdb\xe1\
\x60\x66\x7e\x2b\x71\xbb\xed\x48\x34\x46\x61\x64\xc2\x50\x0c\x8c\
\xed\x0b\x83\x56\x07\xb7\xa7\x61\x81\xe1\x31\xba\xdc\x56\x64\x86\
\xa7\x63\x42\xea\x58\x0c\x8b\x1b\x00\x37\x3c\xf2\x38\xd6\xe1\xf5\
\xe8\x6c\x49\xc2\x89\x89\x23\x91\x11\x96\x22\x3f\x37\x04\x7f\x4b\
\xf8\xbd\x19\x35\x7a\x9c\x98\x38\x02\xa7\xa7\x8c\x46\x8a\x39\x5e\
\xae\x8f\x42\xa1\xf8\x63\xd0\x26\x42\xed\xd1\x68\xe0\xa8\xdc\x8b\
\x71\x29\x63\x31\x7f\xc8\x23\x30\xb6\xa0\xa0\xbb\xb9\x5c\x96\x74\
\x02\xbe\x19\xfe\x14\xe0\xb6\x8a\xe5\xa5\x39\x02\x9f\x59\x07\x8d\
\x0e\xee\xaa\x7d\x18\x9b\x38\x1c\xb7\xa5\x9d\x8e\xf7\x7a\xdd\x80\
\x7b\xba\x9c\x07\xa7\xb5\x08\x76\x5b\x09\xec\x95\xd9\x70\xd3\x2a\
\x84\x06\x7d\xc2\x52\x90\x60\x88\x44\xa6\x25\x49\xac\x54\x87\xdb\
\x0a\x47\x55\x8e\x24\xc9\xd1\xba\x76\xba\xad\xb0\xe8\x8c\x58\x39\
\xfc\x29\x6c\x1b\xf9\xa2\x2c\x74\x9c\xb6\xa2\x16\x1f\x9a\x8b\x59\
\xf6\x5a\x23\x1e\x48\x3f\x07\xf7\xa4\x4f\xc0\xa7\xfd\x6e\x85\xdb\
\x59\x01\x67\x3d\x22\xe9\xac\xce\xc7\xcd\xe9\x67\xe1\xe3\x3e\x37\
\xe3\xf2\xa4\x51\x38\x21\xa6\x17\xbc\xd6\x02\x9f\x55\xdd\x0a\x78\
\x0e\x67\xc6\x0d\xc2\xd2\xa1\x8f\x63\xf5\xf0\xa7\x11\xab\x0f\x83\
\xb3\x4e\x2d\xa1\x06\x6e\x2e\x2c\x5c\xd5\xb0\x57\xe7\xc1\x59\x5d\
\x00\x57\x55\x2e\xee\xec\x7c\x26\xbe\x1b\x70\x0f\xa6\xf7\xbb\x1d\
\x4e\xb7\x5d\x1e\xde\x10\xff\x1d\x2b\x88\xe8\xda\x0a\x71\x57\xfa\
\x99\x58\x3c\xe4\x11\x3c\xd8\x65\x92\x5c\x9f\x50\x8b\x27\xbb\xc7\
\x25\x0b\x2d\x47\x75\x3e\xdc\xd5\x79\x88\xd1\x87\xcb\x7b\xe6\x0e\
\xbc\x0f\x67\xc4\x0d\x80\xcb\x5a\x18\xf2\x5a\xa8\x9e\xdd\x0a\xc5\
\xff\x1e\xad\x76\x7d\xd3\x9a\xb5\x57\xe7\x20\x2b\xb6\x3f\xe6\x0f\
\x79\xe8\x88\x5e\xa0\x73\xe2\x06\xe1\xe5\xfe\x7f\xc5\x9d\x6b\x9f\
\x81\x4b\x67\x69\x51\xc7\x97\x96\xc2\x85\x01\xad\x79\x98\x12\xf0\
\x71\x9f\xbf\x04\xf7\xb2\xa0\x6c\x23\xe0\x71\xe1\xfc\xf4\xd3\x31\
\x30\x22\x03\xdf\xef\x5f\x8b\x65\x05\xbf\x60\xbf\x5f\x9c\x0e\xba\
\xaa\x01\xc7\x41\x0c\x4b\x18\x8e\x8b\xb3\x8e\xc3\x76\x6b\x01\x3e\
\x2c\x5c\x0c\xb8\xed\x30\x68\x74\xc1\x72\x35\x5a\x50\x74\x73\xd6\
\xbe\x89\x6b\x9a\xd0\xf8\x5d\x5e\x77\x1c\xc4\xc8\xc4\xe3\xc4\x0d\
\x4f\x16\x1e\xd8\x0c\xb8\x2a\xa0\x33\x46\x87\xb6\xaa\x75\x16\x2c\
\x28\xdb\x24\x37\x7a\xbe\xff\xf5\xee\x57\x63\x58\xd1\x52\x29\xab\
\xa3\xdb\xbe\xbe\xdb\x7f\x53\x8e\x47\xeb\x5f\x0f\x6a\xfd\x5b\xc3\
\xff\x19\xac\x0a\xb0\xd1\x9d\x4b\x37\xb0\x46\x8f\x88\xb0\x14\x4c\
\x4c\x3e\x05\x63\x63\xfb\xe1\x86\x8d\xaf\x4a\xf6\x3f\x24\xfe\xef\
\x05\x68\xdd\xdb\x4b\xe0\xd0\x59\xea\x7e\x80\x29\xc6\xe7\x42\x3e\
\x46\xca\x8d\x98\x33\x40\x74\x9a\x43\xd7\x2d\x18\x22\x10\xf7\x36\
\x4b\x18\xa3\xd1\x2b\xaa\x3b\xae\x4b\x3d\x0d\xab\xca\x77\xe1\xbf\
\x45\xbf\x1d\xda\xd6\xff\xfd\x3a\x34\x21\xfe\x7c\xb5\x3a\x98\xcd\
\x9d\x64\xab\x23\x29\xda\x55\x55\xd5\x28\x28\x28\xc4\x81\x03\x07\
\xe0\x74\x3a\x61\x30\x18\x10\x15\x15\x85\xe4\xe4\x4e\x88\x8c\x8c\
\x3c\x62\xc7\xa1\x50\xfc\x2f\xd2\x2a\xa1\xe6\x0d\xc3\xce\x78\x9b\
\xd6\x84\xd9\x83\x1b\x17\xe9\x72\xb7\x5d\x6e\xfa\xe6\x26\x0a\x6a\
\x85\xc7\x21\x49\x4d\x96\x06\x5c\xdb\x77\xa4\x8d\xc7\xac\xd2\x55\
\xf8\x31\x77\x3e\x74\x11\x5d\x5a\x1d\x53\x6d\x0a\x3c\x6f\x17\x13\
\xc3\x6c\xa5\x78\x77\xf8\x33\x48\x36\x46\xcb\xbb\x6e\xdf\xf9\x09\
\x56\x14\x2c\x94\x3a\xf1\xbf\x75\x39\x17\xc3\x23\x33\x11\xa1\x33\
\x63\xd9\x9e\xaf\x0f\xdf\xab\xbd\x4c\x5e\xbb\x2f\xe3\x6c\xf9\xf1\
\x3f\x05\xbf\x00\x1e\x1b\x2c\x52\x2b\xee\x63\x73\x75\x1e\x50\x9d\
\x0f\x67\x6d\xa7\x07\xaf\x9f\xa5\x13\xe0\xad\xdf\x7a\xf2\xf0\xfe\
\xef\xb6\xe2\xbc\x84\xa1\xc1\xe7\x3e\x28\x58\x0c\xd8\x0f\xc0\xa5\
\xa9\xa7\x1e\x5d\xa3\xc5\xa6\xbd\x5f\xe3\xc2\xf0\x34\x7c\xd9\xef\
\x0e\x0c\x8d\xec\x8a\xb3\xd2\xc6\xe1\xfb\x6d\xef\xc1\x11\x96\x56\
\xff\x55\xd1\x99\x61\x36\x27\xc8\xa2\x22\xf4\xd1\x68\xfc\xf5\x83\
\x80\xd5\xe3\xf4\x1f\xb3\x46\xdc\xd8\x5e\xb7\x0b\x7d\xa3\x7b\xe1\
\x94\x98\x5e\x18\x1d\xd3\x47\x04\x3a\xc1\x10\x25\xdb\xfe\x56\xb9\
\x0f\xbb\xac\x3e\x8f\xc2\x5e\x5b\x89\x84\x04\x6e\xe9\x75\x9d\x2c\
\xc6\x3c\x35\xe2\xdb\xbc\x66\xef\x17\x2e\xc2\xca\xfd\x1b\x61\x30\
\x46\x35\xf1\x1b\x3c\x9a\x1c\xaa\x5f\xb6\xcb\xbf\x3e\xb1\x76\xbb\
\x6d\xb2\x48\x3b\x31\x75\x08\x4e\x8c\xee\x81\xd3\x62\xfb\x61\x64\
\x54\xf7\xe0\x71\xae\xaa\xdc\x8b\x6c\x7b\x29\x32\x4c\xf1\x28\x74\
\x1c\xc4\x98\xf4\xb3\x31\x39\xf9\x24\x54\xd5\x70\x9d\x9b\xb4\x06\
\xe4\xd9\xcb\xf0\x62\xce\x77\xf0\x78\x3c\xd0\x6b\xdb\xbf\x4d\x42\
\x4e\x4e\x2e\x96\x2d\x5b\x89\x3d\x7b\xf6\xa2\xa4\xa4\x14\xd5\xd5\
\xd5\x70\xb9\x5c\xd0\xe9\xf4\x08\x0b\xb3\x20\x2e\x2e\x16\x5d\xba\
\x64\x60\xd8\xb0\x21\xc8\xca\xea\xd6\x91\xbe\x88\x56\xb3\x6a\xd5\
\x2a\xf4\xed\xdb\x17\x66\xb3\xf9\x98\x3a\x6e\x7e\x3f\x7a\xbd\xea\
\x1e\x7d\x2c\xd1\xaa\x6f\x8b\x82\xe0\xb5\x15\xe2\xee\xde\x37\xa1\
\x27\xc5\xa3\x01\xde\x2b\x58\x84\xbb\x37\xbd\x09\x93\xce\x8c\xf7\
\x06\xdc\x85\xb3\xe2\x06\x35\xb8\xfd\x07\x05\xbf\xe0\xae\xcd\x6f\
\xc2\xa0\x35\xe0\xed\x7e\x77\xe0\xbc\x84\x61\xf5\x6e\xfb\x61\xef\
\x1b\x91\x56\xbc\x42\xfa\x17\x1b\x75\xed\xff\x47\x43\x57\xbf\xab\
\x7c\x0f\x26\x75\xbb\x10\xd7\x25\x9f\x24\xcf\x2d\xa9\xd8\x85\xd7\
\xb6\x7f\x04\x4d\x58\x2a\xbc\xae\x6a\xec\xb3\xef\x17\x31\x2e\x91\
\x64\xa8\x5a\x1d\xa0\xb4\x7a\x94\xfb\x63\x8c\x7b\x6c\x25\xbe\x5b\
\xb5\xd7\x0b\x4b\x8d\xed\x1e\x48\x3f\x1b\x7b\x13\x46\x20\xd9\x1c\
\x1f\x14\x27\x8a\xd2\x76\x6b\x3e\x5e\xc8\x9e\x25\x16\x58\x7d\x9d\
\xdb\x1c\x2e\x1b\x60\xee\x84\xcb\x3b\x9d\x20\x3f\xff\x72\x70\x2b\
\xf6\xda\x4b\xf0\xea\x88\x67\xe5\x3d\xee\x10\x8b\x19\x2e\x3e\x1c\
\xee\x6a\xe8\xb4\x66\xb1\xb8\x69\xfd\xbe\xd7\xf3\x4f\xf8\x24\x32\
\x13\x06\x4e\x84\x09\x61\x35\x87\xe9\x4c\xf8\xbd\x7c\x07\xde\xcd\
\x99\x03\x9d\xce\x02\x6d\x33\x3c\x1a\x4e\x7f\xfc\x7b\xce\xc0\xfb\
\x90\x6e\x8e\x3f\xec\xb5\x5f\x0f\x6e\x45\x7e\xc5\x0e\xf4\x8e\xea\
\x21\x3f\xf3\x5a\x0e\x8d\xea\x86\x7b\xd2\x27\x86\xdc\xd7\xa6\xea\
\x5c\x2c\xcf\x5f\x0c\x1c\x13\x42\x1d\x1a\xba\xc0\x2f\xce\xbc\x14\
\x6f\xf5\xbc\xee\xb0\xd7\x8b\x1d\xe5\xf8\xa6\x78\x19\x2c\x3a\x53\
\xf0\xfb\x2e\x74\x1e\xc4\x63\x99\x97\xe1\xec\x98\xbe\x75\xf6\xc5\
\x85\xd0\xd4\x7d\xb3\xe0\xf5\x38\xdb\x5d\xa8\x67\xcf\x9e\x87\xdf\
\x7f\x5f\x86\xd2\xd2\xfd\xb0\x58\x2c\x48\x4a\x4a\x14\x31\x36\x99\
\x4c\x70\x38\x1c\x28\x2b\x3b\x80\xa2\xa2\x62\xec\xd9\x93\x8d\xb5\
\x6b\xd7\x8b\x58\x4f\x9a\x34\x11\x3a\xdd\x91\xcb\x29\xf9\xfe\xfb\
\xef\xf1\xfa\xeb\xaf\xe3\xae\xbb\xee\xc2\xb8\x71\xe3\xda\x6c\xbf\
\xff\xf7\x7f\xff\x87\xa9\x53\xa7\xe2\xe3\x8f\x3f\xc6\x94\x29\x53\
\xda\x6c\xbf\xed\xcd\x25\x97\x5c\x82\xbd\x7b\xf7\xe2\xf7\xdf\x7f\
\xaf\xf3\x49\xd7\x5e\x7b\x2d\xb6\x6c\xd9\x82\x6f\xbf\xfd\x16\xf1\
\xf1\xf1\x47\xfa\xd0\x14\x0d\xd0\xaa\xbf\x64\x87\xb3\x12\x96\xf0\
\x0c\x3c\xde\x65\x52\x83\xdb\xed\xb2\x16\xe3\xba\xf5\x7f\x07\x18\
\x37\x75\xdb\x71\xfe\x86\x97\x51\x7a\xe2\x3f\x11\xae\x33\x85\xdc\
\x9e\x9d\xc9\xae\xd9\xf0\x12\x40\x6b\xdd\xeb\xc2\x05\x1b\x5e\x46\
\xd9\x49\x6f\x23\xba\x9e\x32\xaf\x54\x63\x0c\xae\xee\x7c\x06\x3e\
\xd8\xf9\x19\x40\xab\xba\x1d\x5d\x7e\xe2\xea\xaf\xda\x87\xb4\xd8\
\x7e\xf8\xa2\xef\x6d\xf2\x1c\xad\xc3\xf3\xd7\x3e\x27\x31\x6b\x8e\
\x39\x6b\x4e\x3f\xd7\xa0\x55\xec\x75\x23\xde\x70\xc8\x45\x78\x43\
\xea\x98\x7a\xdf\xf3\xdc\xee\x2f\x44\xfc\xf5\x21\x6e\x78\x22\xa8\
\xb6\x62\x8c\x49\x1b\x87\x2e\x66\xdf\x44\xa4\x7f\xe4\xcc\x11\x57\
\x3a\xe3\xe8\xcd\x21\xc9\x18\x8d\xbb\xbb\x9c\xd7\xe0\x3b\x4e\x8e\
\xee\x85\x77\x76\x4f\x97\xb6\xaf\x74\x71\x1f\xca\x82\x2f\xf1\x7d\
\x0f\xd5\x79\x62\x01\x06\xce\x35\x8f\xd9\xf1\x6c\x5e\xc3\xef\xde\
\xd2\x29\x18\xae\x58\x59\xb1\x07\x1f\xe4\xfd\x80\x05\xe5\xdb\xb1\
\xe1\xe0\x36\xa0\x32\x1b\x63\x86\x3d\x21\xaf\xa5\x9b\xe2\xb0\xba\
\x7c\x37\x5e\xdc\xf7\x9d\x6c\x6f\x77\x3b\x60\xd6\x99\x70\x63\xea\
\x18\x59\xbc\x94\xbb\xac\xbe\xd6\xb3\x1d\x10\x8d\x46\x0f\x1b\x7f\
\x1f\xd8\x95\x8f\x54\xe7\xa1\xac\x66\x18\x84\x31\x79\xae\x81\x1c\
\xe5\x22\xc6\xc4\xe6\x71\xe2\x3f\xf9\x0b\x31\xaf\x74\x35\x16\x1c\
\xdc\x82\xf2\xb2\x8d\x88\x88\x1d\x80\x64\x83\xcf\x73\x93\x64\x88\
\xc6\x4b\xbb\xa6\x63\x5f\xf2\x49\xa8\x76\xdb\x51\xed\xac\xc4\xf1\
\xb1\x7d\x31\x3e\xb6\x3f\x76\xdb\x8a\x83\x19\xff\xed\x85\xcb\xe5\
\xc6\xc7\x1f\x7f\x86\x55\xab\xd6\xc2\x64\x32\x62\xd8\xb0\xc1\x18\
\x34\x68\x00\x32\x33\xbb\x21\x36\xf6\x50\x13\xa3\xf2\xf2\xf2\xa0\
\x48\x6f\xdc\xb8\x19\x3f\xff\xbc\x08\x85\x85\x85\xb8\xfa\xea\x29\
\x08\x0b\x6b\x5d\xc9\x66\x53\x59\xb6\x6c\x19\x66\xcf\x9e\x8d\x33\
\xcf\x3c\xb3\x4d\x85\x7a\xf8\xf0\xe1\xf2\xef\xab\xaf\xbe\x8a\x73\
\xce\x39\x47\xdc\xfc\x4d\x65\xfa\xf4\xe9\xd8\xb6\x6d\x1b\xa2\xa3\
\xa3\xa5\xfa\x41\xab\xd5\x4a\x69\x27\xbd\x20\xfc\x59\xd3\x84\xef\
\x6e\xff\xfe\xfd\x38\xfe\xf8\xe3\x71\xfa\xe9\xbe\xbf\xe9\xaf\xbe\
\xfa\x0a\x6f\xbe\xf9\x26\xfa\xf7\xef\x8f\x83\x07\x0f\x4a\xf8\x81\
\x04\x8e\x6b\xf7\xee\xdd\x78\xff\xfd\xf7\x31\x71\xe2\x44\x5c\x7d\
\xf5\xd5\xf8\xe7\x3f\xff\x89\x9b\x6e\xba\x29\xb8\x3f\x7a\x07\xf8\
\xfa\x29\xa7\x9c\x22\x8b\xae\xda\x3c\xfb\xec\xb3\x58\xb2\x64\x09\
\x8c\x46\xa3\x1c\x67\x73\xe0\xf9\xb8\xdd\x6e\x0c\x1a\x34\x08\x8f\
\x3f\xfe\x78\x9b\x7d\x07\x7f\x24\x5a\x7c\x77\x13\x41\xb0\xef\xc7\
\x25\x99\x97\x8a\x7b\xb7\x21\xec\x5e\xf6\xf5\x76\xf8\xda\x81\x32\
\x69\xa8\x81\xec\x63\x22\xae\x41\x6e\xaf\x37\x4b\x66\xb7\xd7\xe3\
\x38\xcc\xdd\x19\x8a\x5b\x52\x4f\xc3\x07\xd9\xb3\xe0\xf4\xba\x24\
\xd6\xdb\x1e\x04\x46\x03\x32\x9e\xfb\xe3\xd0\xc7\xa0\xf7\x8b\xcc\
\x25\x9b\xde\x42\x61\xd9\x7a\x20\x32\xb3\x8e\xdd\xb9\x9f\x8b\x8d\
\xaa\x5c\xb9\xa1\x12\x11\x95\xaa\x1c\x14\xb1\x5c\xa9\x66\x43\x39\
\x8f\x13\xa9\x26\xdf\x4d\x8e\xbd\xd0\xd7\x57\xe5\xf8\x4b\xb3\x34\
\xe8\x1b\x9e\x2a\x82\xb4\xcf\x5e\x8a\x1f\x19\x03\xd7\x1a\xea\xcd\
\x74\x77\xfa\x33\xa3\xef\xf4\x8b\x32\xaf\xf3\xac\x92\x55\x88\xd7\
\x87\x8b\xeb\x94\x37\x82\xa6\x64\x4f\x7b\x9b\xd0\xec\xce\xa4\x31\
\xe0\x87\xb2\x0d\xf2\xbd\x06\x2c\x6e\xb7\xc7\x01\x8b\xd6\x8c\xe3\
\x92\x4f\x96\xef\xe1\x80\xad\x58\xdc\xda\x90\x98\xac\x16\x17\x75\
\x1a\x85\xe2\xe8\x32\x59\xd4\xac\xa8\xdc\x8d\x52\x57\x25\x52\x4d\
\xb1\x78\x29\xe7\x7b\x7c\xb2\xf1\x15\x20\xba\xa7\xc4\x67\x61\x4e\
\x0c\xba\xcc\x53\x8c\x31\xd8\x5d\x99\x8d\x7b\x65\xfa\x8c\x59\x62\
\xed\x30\xc6\x62\x72\xe2\x71\x48\x33\x19\x1b\x3c\xc6\xa3\x8d\xcb\
\x5d\x8d\x34\x73\x02\x06\x27\x0c\x93\xdf\xcd\x72\x5b\x29\x06\x85\
\x67\xc8\x51\xf5\x0d\x4b\xc3\x09\x9d\xcf\x44\x42\x58\x12\x56\xee\
\x5f\x2f\xed\x73\xe1\xf7\x20\xdc\xb2\xee\x79\xc0\x63\x05\x2c\x29\
\x40\x78\x67\xa9\x02\xe0\xef\x85\x49\xa3\x47\xa2\x21\x12\x1f\xec\
\x9a\x86\x9f\x8a\x96\xf8\xce\xae\x6a\x0f\x2e\xec\xfd\x17\x11\xea\
\xf6\x1a\x5c\x5f\x93\xcf\x3e\x9b\x86\x95\x2b\xd7\x20\x36\x36\x1a\
\xe3\xc6\x9d\x86\x53\x4e\x39\x29\xe4\x76\x14\x89\x81\x03\xfb\xcb\
\x63\xd9\xb2\x15\x98\x33\xe7\x07\x6c\xda\xb4\x15\x1f\x7e\xf8\x19\
\xae\xbf\xfe\x9a\x23\x62\x59\x07\x44\xa7\xad\x2d\xc4\x0b\x2f\xbc\
\x10\xb7\xdd\x76\x1b\x5e\x7b\xed\x35\x11\xb9\xd1\xa3\x47\x37\xf9\
\xbd\x8f\x3d\xf6\x18\x36\x6d\xda\x14\xfc\x99\xe2\xc7\x38\x7e\x69\
\x69\x69\xb3\x8e\x61\xd2\xa4\x49\x41\xa1\xde\xb7\x6f\x1f\x7e\xf8\
\xe1\x07\xac\x5c\xb9\x12\xe1\xe1\xe1\x92\x23\x40\x2a\x2b\x2b\x45\
\xb4\xab\xaa\xaa\x64\x9b\xab\xae\xba\x0a\x9f\x7e\xfa\x29\x6e\xbe\
\xf9\x66\x11\xce\x13\x4e\xf0\x79\xdd\xfe\xf2\x97\xbf\xc8\xb5\xa2\
\xe0\x87\x5a\x44\x2d\x5c\xb8\x10\x73\xe7\xce\x45\x8f\x1e\x3d\x64\
\x61\x51\xb3\xbc\x32\xb0\xb0\x08\x55\x72\xa9\xf1\x37\xa6\xe2\xc2\
\xa4\xb8\xb8\x58\x09\x75\x0b\x69\xb1\x50\x3b\x29\x22\x5a\x23\xce\
\x8f\x1f\xda\xe8\xb6\x7d\xc2\xd2\xf0\x64\x9f\x9b\xf0\xf0\xe6\xb7\
\xa1\xd5\x87\xe1\xbd\xbe\x37\xc9\x8d\xa7\x3e\x32\x2d\x9d\xf0\x5c\
\xef\x9b\x70\xff\xe6\xb7\xe4\x33\xfe\xd9\xfb\x46\xc4\x1a\xc2\x1b\
\xfc\x8c\x11\x91\x59\x18\x1a\x3f\x18\xab\x4a\x56\xca\x4d\xbe\x3d\
\x60\x66\x36\xc5\x67\xc6\xd0\xc7\xd0\x8b\xd9\xdb\x00\x5e\xcb\xfb\
\x09\x33\x76\x7d\x0e\x6d\x64\x96\x88\xa2\x53\xac\x45\x63\xf0\x86\
\x39\x38\xb2\x0b\x86\xa6\x9f\x89\x34\xb3\xaf\x0e\xb8\x47\x58\x12\
\x86\xa4\x4f\xc0\xa8\x68\x9f\x5b\xd7\x53\xc3\xa2\x66\xfc\x91\x2c\
\x2b\xdf\x89\x93\x7e\xbf\xcb\x97\x71\xad\xd1\x63\xfd\xa8\xd7\xd0\
\x3f\xbc\x33\xfe\xb4\xf5\x1d\xcc\xcf\x99\x07\x53\x44\xd7\x90\x5e\
\x03\x7e\xa6\xdb\x56\x84\x2e\x71\x03\x71\x4e\xfc\x10\x79\x6e\x46\
\xf1\x72\xb8\xad\x85\x28\x0b\x4b\x46\xf7\xa5\x7f\x95\xf7\x79\xda\
\xc8\xe1\xc0\x6b\x41\x31\x35\xd5\x68\x05\xeb\x74\x54\x22\x2d\x2a\
\x01\xf3\x07\x3d\x50\x67\x7b\x8a\x0c\x33\xb8\x03\x0c\x5b\xf9\x10\
\xf2\xed\x07\x30\x20\x3c\xdd\x17\xaf\xa5\x30\x73\x21\x43\x4b\xd3\
\x55\x0d\x73\xcd\x90\x01\x6f\x06\xd2\x30\x46\x2f\xc9\x7a\xf4\xb4\
\x1c\x0b\xbd\xdf\x5d\xd6\x22\x8c\x4f\x3e\x15\xef\xf5\xbe\xa1\xce\
\x6b\x93\x12\x86\xc9\x83\xbc\x90\x33\x07\xdf\x96\xac\x90\xff\x97\
\xd0\x04\xcf\x95\x19\xfa\x4c\xa8\x73\x55\xc3\x63\x49\x0e\x7a\x1f\
\x7c\xc9\x68\x5e\xdf\x36\x72\x5d\x5c\xc1\x70\x46\x7b\xcb\xf4\xa2\
\x45\x8b\xb1\x62\xc5\x2a\x44\x47\x47\x61\xf2\xe4\xf3\x31\x60\x40\
\xff\x26\xbd\x6f\xe4\xc8\xe1\x22\x96\x1f\x7f\xfc\xb9\x58\xd7\x73\
\xe6\xcc\xc7\xc4\x89\x67\xb6\xf3\xd1\x42\x62\xe6\x84\xee\xf8\xe6\
\x40\x01\x5e\xbb\x76\xad\x1c\x33\xc5\x86\xc2\x47\x4b\x92\xf1\x5d\
\xc2\xb8\x34\x85\xa7\x57\xaf\x5e\xf8\xec\xb3\xcf\xc4\xc5\x4e\xb8\
\x1d\xb7\xa7\x38\x52\xa0\x28\xbe\x14\x44\x8a\x7a\x80\x7f\xff\xfb\
\xdf\xc8\xcb\xcb\x13\x41\x64\xac\x98\xef\xa7\x95\xfd\xdc\x73\xcf\
\x61\xc8\x90\x21\xc1\xcf\x68\x08\x0a\x70\xcf\x9e\x3d\x83\x5b\xdc\
\x72\xcb\x2d\x48\x4d\x4d\xc5\xa5\x97\x5e\x8a\x99\x33\x67\xe2\xb8\
\xe3\x8e\x93\xe7\xe9\xea\xde\xb9\x73\x27\x96\x2e\x5d\x1a\x5c\xb4\
\x7c\xf4\xd1\x47\x18\x31\x62\x84\x58\xde\xe4\x95\x57\x5e\x91\xd7\
\xe7\xcf\x9f\x8f\xb8\xb8\xb8\x90\x9f\xca\x63\x4d\x4f\x4f\xc7\xa2\
\x45\x8b\x90\x9c\xec\x9b\xb8\x16\x38\xce\x9a\xf1\xee\x9a\xc7\x1e\
\x78\x9e\xcf\x0d\x18\x30\xe0\x98\x8b\xe5\x77\x24\x5a\x2c\xd4\x6e\
\x8f\x0d\x06\x4b\x27\x49\x3a\x6a\x0a\x0f\x65\x4c\xc2\xa5\xcc\x42\
\xd6\x99\x90\x61\x8c\x6d\xf4\x1d\x4c\xb4\x3a\x3f\x71\x84\xc4\xd9\
\xba\x99\xea\xae\x86\x99\x6c\xb5\xbc\x7c\x17\xb6\x59\xf3\x51\xe0\
\x38\x28\x16\xf7\xe6\xca\x7d\xd2\x57\xdc\x4e\x6b\x53\x6f\x81\x41\
\x6b\x96\x9b\x5b\x5b\x64\xbf\x72\x1f\x6e\x6b\x11\x1e\xee\x77\x07\
\xce\x89\x1b\x28\xcf\xfd\x5a\xbe\x03\xb7\xaf\x7f\x51\xa6\x80\xdd\
\xdf\x65\x12\x2e\x49\x1c\x89\xff\xdb\xf1\x31\xbe\xcf\xfb\x11\x61\
\xfe\xc4\xb0\xbf\xa4\x8e\x93\x47\x80\xf3\x13\x86\xe3\xfc\xd1\x9f\
\x04\x7f\x8e\xd1\x47\x48\xa3\x13\x8a\x72\xa0\xc5\xea\x4e\x96\x65\
\x31\x86\xcd\x24\x35\xad\x31\xb8\xa8\x11\x0b\x53\x6e\xd6\xa1\xcf\
\x87\xb5\xd3\x70\x56\xe3\x89\xae\xe7\x07\x9f\xdb\xc6\x85\x83\x3f\
\xd1\x4b\x4a\xb6\x34\x06\x5f\xef\x75\x7f\xd9\x58\x8b\x31\xc7\xc3\
\xed\x3f\x2e\xbd\xc6\x54\xe3\x1a\x6b\x1a\xf4\x96\xd4\xa4\x66\xac\
\xfc\xda\x94\x53\xd0\xdb\x18\x0d\x83\x21\x52\x32\xd5\xff\x93\xf3\
\xbd\x58\x96\xa4\xc8\x59\x8e\x24\x73\x27\xdc\x98\x79\xa9\xfc\x3e\
\xb0\x4c\xcb\xa4\x37\x4b\xc9\x52\xc7\xe7\x50\x32\x5d\x43\x58\x3d\
\x0e\xe8\xfc\x89\x83\x4c\x4e\x7c\xb4\xdf\x6d\xf0\x78\x5d\x30\x6b\
\x4d\xd8\x6b\x2d\xc4\xac\xb2\xf5\xe2\x95\x89\xd2\x5b\x90\xef\x38\
\x88\x51\xa9\x63\x70\x6e\xd2\xc9\xb0\x7b\x9c\x70\x38\xcb\x71\xbc\
\xff\x77\xb2\x31\xcf\x53\x6b\x28\x2b\x2b\xc3\x2f\xbf\xfc\x26\x89\
\x62\xe3\xc6\x8d\x6e\xb2\x48\x07\x60\xfc\xfa\x9c\x73\xce\xc2\xe7\
\x9f\x7f\x81\x25\x4b\x96\x61\xf0\xe0\x81\x48\x4b\x6b\xbc\xad\x70\
\x6b\xb0\xd9\x6c\xf2\xee\x8a\x8a\x8a\x66\xed\x85\x62\xb6\x7c\xf9\
\xf2\xe0\xcf\xf4\x0e\x50\x68\xe8\x72\x0e\x88\x51\x62\x62\xa2\x08\
\x1b\xb7\xb5\x5a\xad\xb2\x18\x88\x89\x89\x91\xc5\x41\xcd\xcf\x1b\
\x39\x72\xe4\x61\x42\x1d\xb0\x62\x03\x7c\xfe\xf9\xe7\x62\xf1\xd2\
\x15\xdd\x1c\x17\x7a\x4d\xe8\x9d\xe0\xb1\xd0\xc5\x9c\x91\x91\x21\
\x56\x3a\xfc\x0b\x0a\x2e\x30\x28\xd2\xdf\x7d\xf7\x1d\xbe\xfe\xfa\
\x6b\x11\xf4\x33\xce\x38\x03\x6b\xd6\xac\x11\x77\x36\x2d\xec\xee\
\xdd\xbb\xe3\xd7\x5f\x7f\xc5\x4f\x3f\xfd\x24\xc7\x7f\xf7\xdd\x77\
\xcb\x7e\x02\x70\xd1\xc1\x47\x40\xa4\x51\x43\x88\xf3\xf3\xf3\xf1\
\xd6\x5b\x6f\xe1\xd1\x47\x1f\x3d\x4c\xb4\x79\x4e\x0c\x3d\x70\x51\
\xc0\xf3\x52\x2d\x9f\x5b\x4e\xcb\x03\x7b\x6c\xdc\x10\x96\x86\x34\
\x53\xe3\xa2\x1b\xa0\x7b\x33\x67\xdf\xf6\xf0\x5b\xad\x01\x76\xd9\
\x8a\xf1\xfc\xde\xaf\xf0\x45\xd1\x32\x94\x7b\x9c\x88\xd4\x9b\x11\
\xaf\x8f\x44\x8c\x21\x5c\x84\xf1\xdc\x84\xa1\xa8\x8a\x1f\x84\x7d\
\xd6\x42\x6c\xa9\xce\x87\xdd\x5a\x24\x0d\x49\x60\x8a\x85\xc9\x10\
\x0e\x8d\xb7\xe5\x9d\xad\x68\xad\x6a\x4d\xb1\xf8\x3c\x77\x2e\xae\
\xe9\x74\x3c\xe2\x0d\x11\x18\xb7\xf2\x51\x89\xa3\xeb\xa3\x7b\xe1\
\x9e\xf4\xb3\x24\x6b\xf7\x85\x9e\xd7\xe2\xfb\xec\x59\xd8\x6e\x2b\
\xc4\x30\x67\x37\x69\xe1\xc9\x26\x1f\x0e\x8f\x13\x6e\xaf\x57\x16\
\x0e\x26\x8a\x8d\xff\xe6\xbd\xad\xba\x00\x5e\xc6\x7c\x0c\x51\x18\
\x1a\xe1\x5b\xf4\x6c\xa5\xb8\x52\x90\xb5\x06\x44\x1a\xa3\x10\xeb\
\x17\x24\x27\xb7\xab\x27\xae\x2f\xb1\xe1\xea\x7c\xa4\xc7\x0f\xc6\
\x55\x49\x87\x5c\x91\x92\x07\xa0\xf1\x15\x46\x99\x74\x16\x69\x94\
\x31\x20\x6e\x20\x86\x47\x76\x43\xb5\xa7\xf9\x0d\x4d\x78\x1d\x28\
\x28\x3f\x1c\xd8\x80\x42\x5b\x29\xf4\x3a\xd3\x61\xd7\xd4\x60\x8c\
\x40\xa9\xa3\x12\x23\x57\x3d\x22\xc9\x4f\x65\xd6\x22\x5c\x90\x32\
\x1a\x4f\x77\x9b\x2c\xa2\x72\xce\x86\x7f\xa0\x88\x25\x48\x5a\x3d\
\xb6\x56\x17\x88\x5b\x9b\x4c\x88\x1b\x24\x8f\x00\xd9\xce\x4a\xec\
\xe4\x75\xf0\xe7\x2c\xf4\x0e\xef\x8c\xc7\xbb\x5d\xd4\xec\xe3\x3d\
\xda\xe8\x2d\x9d\x30\xaf\x6c\x03\x4e\x5a\xfd\xa4\xdf\xf5\x5d\x8c\
\xfb\x33\x2f\xc5\x35\xc9\xa7\x60\x5a\xd1\x52\x3c\xba\xe3\x43\x24\
\x84\x25\x63\x63\xd9\x66\x4c\x4a\xf1\xb9\x4f\xd9\x72\xf7\xb1\xae\
\x17\x1c\x76\xe4\xb3\x97\xdc\x81\x0a\x8f\x0d\x51\xb0\x20\xdb\x56\
\x82\xfb\xba\x5e\x84\xab\x13\x47\xd4\x39\xbb\xf6\x6c\x0a\xb3\x63\
\xc7\x2e\xe4\xe7\x17\xa2\x6f\xdf\xde\x18\x3d\xfa\x94\x16\xed\x63\
\xe8\xd0\xc1\x58\xbf\x7e\x23\x96\x2e\x5d\x81\xe5\xcb\x57\xb6\xbb\
\x50\x07\xdc\xc9\x8c\xd1\x36\x07\x5a\xb9\x8c\xb1\xd3\x42\xde\xb3\
\x67\x8f\xc4\x75\x69\x15\x32\x31\xad\x3e\x8b\x97\x02\xf7\xa7\x3f\
\xfd\x49\xfe\x65\x42\x16\x85\x93\x49\x75\x8c\x45\xd7\x87\xdd\x6e\
\xc7\x6f\xbf\xfd\x26\xf1\xe6\x96\x8a\x74\x00\x0a\x23\x05\x7b\xeb\
\xd6\xad\xc1\x92\x38\x5a\xde\x01\x4b\x96\x22\x4d\x6b\x9e\x62\xca\
\x05\x05\x5f\xe3\x76\x3c\x3e\x3e\xf7\xcc\x33\xcf\xc8\xf1\x92\x8b\
\x2f\xbe\xf8\x30\xa1\xa6\x97\x80\x2e\x75\x5e\x93\x8b\x2e\xba\x48\
\x2c\x6c\xee\x8f\xbc\xfd\xf6\xdb\x78\xf2\xc9\x27\x65\xfb\xb3\xcf\
\x3e\x3b\x28\xe6\xef\xbe\xfb\x2e\xee\xb8\xe3\x0e\xcc\x9b\x37\x4f\
\x9e\xe3\xe7\x29\x5a\x46\x2b\x32\x70\x3c\x30\x6b\xf5\x47\x24\x26\
\xb6\xcb\x56\x82\x9b\x36\xbd\x86\xf9\x25\x2b\xd1\x25\x22\x03\x77\
\xa5\x4f\xc4\x39\x89\xc3\x31\x30\x3c\xbd\xde\xf7\xd0\x1a\xfb\xbd\
\x7c\x3b\x66\x96\xac\xc0\x97\x45\xcb\x61\xad\xd8\xe3\x9f\xbe\x15\
\xd5\xe2\xd6\x8b\x46\x43\x34\xb6\x54\xee\xc5\xf0\x95\x8f\x20\x5a\
\x6f\xf1\x0d\x1e\xd1\xea\xf1\x58\xb7\x8b\x83\xf5\xcf\x77\x31\xa1\
\xcd\x10\x81\x5b\xb7\xbc\x83\x5b\xb7\xbd\xef\x77\xd1\xd6\x87\x16\
\x7a\xba\x77\x3d\x0e\x24\x44\x74\xc5\x90\xc8\x2e\xb2\xdd\xe2\x83\
\x5b\xa5\xe4\x8d\x2e\x4d\xee\x97\x56\x14\xa9\x14\x8b\xb8\x9e\xd8\
\x34\xdd\xc1\x6e\x1b\x5e\xef\x71\x55\x83\xe7\xe0\xb6\x16\xe0\x8e\
\x3e\x7f\x11\xa1\x68\x0d\xe7\xac\xff\x07\xbe\x3d\xb8\x0d\xfa\x5a\
\x83\x56\x74\x5a\x83\x08\xf2\xf2\xd2\x35\x92\xc9\xce\xf8\xfc\xca\
\x08\xdf\x79\xd1\xae\x9f\xbf\x7f\x9d\xc4\xe8\xe5\xfc\xcc\xf1\x92\
\x39\x4e\xbe\x61\x89\x5d\xc9\x2a\x44\x18\xa3\x60\xd6\x1a\xf1\x5d\
\xc1\x42\x5c\xe4\x3f\x46\x26\x93\x6d\xaa\xda\x87\xfb\x77\x7e\x0e\
\x83\x56\x0f\xbb\xdb\x0e\x8b\xce\x8c\xfb\x32\x26\x36\x9a\x1f\x71\
\xb4\xd1\xeb\xcc\x28\x72\x1c\x40\x51\x65\xb6\xcf\x13\x52\x95\x8b\
\x1d\x29\xa7\xc9\x51\xed\xb6\x17\x63\x4b\xe1\x12\x20\x3c\x45\x4a\
\xe7\x02\xc9\x95\x4c\x36\x7b\x3e\xfb\x3b\xb8\xbc\x2e\x84\xeb\xcc\
\xd8\x65\x2d\x10\xaf\x4b\xa4\xd6\x77\xae\x4c\x12\x7c\x79\xf7\x34\
\xec\xab\xce\x83\xd5\x63\x47\xa5\xa3\x1c\xa7\xc5\x0f\x91\xca\x88\
\xf6\xec\x25\x50\x58\x58\x24\x37\xf2\x21\x43\x06\xb6\x6a\x3f\x43\
\x86\x0c\xc6\xba\x75\x1b\x24\xd1\x8c\x42\xd5\x5c\xb7\x74\x73\x28\
\x28\x28\x10\x11\xda\xbc\x79\x73\xb3\xde\x97\x95\x75\xa8\xb3\x22\
\x13\xb4\x06\x0f\x1e\x2c\xd6\x61\x5a\x5a\x5a\xbd\x75\xe1\xfc\x8c\
\x0d\x1b\x36\x60\xf2\xe4\xc9\x92\x94\xd5\x14\x98\x81\x4d\x81\xa5\
\x58\x32\x33\x3d\xd4\x22\x20\x90\x60\xc6\xd7\x28\xe6\x8f\x3c\xf2\
\x88\x88\x66\x6d\xb8\xa8\xe8\xd4\xa9\x93\x2c\x2a\x02\x82\x4b\xe1\
\x3e\xe9\x24\xdf\xc2\x9d\xb1\xf1\x1b\x6e\xb8\x41\xae\x37\xe3\xcc\
\x0b\x16\x2c\x90\xec\xf5\x7b\xee\xb9\x07\x13\x26\x4c\x08\x26\xb3\
\xd1\x2a\xef\xdd\xbb\x77\x9d\xfd\x73\x9f\x11\x11\x11\x72\x6e\x0f\
\x3f\xfc\xb0\x9c\x6f\x9f\x3e\x7d\x30\x6b\xd6\x2c\x9c\x7f\xfe\xf9\
\xf2\xef\x3f\xfe\xf1\x8f\x60\xfc\x9d\x19\xf1\x5c\x80\x9c\x7c\xf2\
\xc9\xb2\x7d\xa8\x63\x56\x34\x8d\x0e\x5f\x4c\xf7\xe0\x8e\x8f\xf0\
\xcc\xb6\xf7\xd0\x37\xa6\x2f\x66\x0f\x7b\x02\x67\x35\x21\x26\x0e\
\xff\xcd\x3d\x3d\xf1\x38\x49\x38\xca\xcd\xdc\x8f\xd7\x73\xe6\xe0\
\xc5\xec\x6f\x61\xab\xdc\xdb\xe2\xe9\x5b\x5e\xba\x22\xcd\x9d\x50\
\x62\x2b\x46\x09\x6f\xba\x5e\xb7\x64\x7f\x3f\x98\x71\x8e\xbc\xbe\
\xf0\xe0\x16\xcc\xcf\x9d\x0b\x63\x78\x67\x18\x35\x3a\x71\x67\x6a\
\x34\xf5\x58\xc0\xb4\x4c\xd9\xf0\x83\x16\xbf\x7d\x3f\x4e\x4b\x39\
\x25\x38\x38\x63\xaf\xf4\xdb\x36\x48\x0c\xb2\x66\x26\x78\x29\xb3\
\x85\x43\x35\xb9\x60\xc9\x55\xe5\x5e\x9c\x98\x3a\x16\xe7\xfa\x63\
\xd3\xf5\xa1\xd1\x85\x49\xa2\x5a\x8e\xbd\x4c\x2c\xbc\xe6\x12\x18\
\x3b\x52\xc2\x6e\x70\x21\xac\x7b\x26\xc0\x51\x2c\x74\xfe\x70\x05\
\x45\x3b\xc1\x7f\x0e\x7c\x67\x9a\x29\x0e\xb9\x5c\x70\x40\xe7\x3b\
\x6e\xbf\x05\xf8\x49\xe1\x6f\x98\xb6\xe9\x35\x20\xdc\x9f\xb5\xef\
\xaa\x42\x4a\x97\x73\xe5\x35\xbe\xbf\xd8\x5a\x84\xe7\x37\xbf\xe1\
\x4f\x26\xab\x14\x2f\xc9\x8d\xa9\xa7\xd5\x2b\xd4\xc1\x06\x22\xae\
\xea\x66\x9f\x63\x8b\x61\xef\x6e\x7a\x18\x6a\xb8\xf4\xb9\x28\x94\
\x59\xea\xe6\x04\xf1\x3c\xd0\xb3\x12\x58\x78\x49\x48\x83\x5e\x23\
\x7a\xa5\xdc\xf6\x60\x28\x80\x89\x86\x53\xb7\xbc\x2d\xb5\xf5\xd0\
\x85\xc9\x39\x24\xc6\x0d\x82\x45\xeb\xbb\xde\x89\x86\x28\x7c\xbf\
\x6b\x1a\xd6\x70\x31\xc4\x6f\xa3\x6a\x0f\xf6\xf5\xb9\x45\x84\xba\
\x3d\x17\xce\x79\x79\xf9\xfe\xba\xe8\x2e\xad\xda\x4f\x97\x2e\x9d\
\x91\x94\xd4\x49\xac\x5d\x5a\xe8\x5d\xbb\x66\x34\xe1\x5d\xcd\x87\
\x96\x2d\xe3\xb0\x09\x09\x09\x58\xb7\x6e\x9d\xb8\x68\x53\x52\x52\
\x5a\xb4\x2f\x5a\x98\x74\x0b\xd3\x4a\xbc\xf3\xce\x3b\x43\x6e\xf3\
\xc1\x07\x1f\xc8\xbf\xb4\x2a\x9b\x0a\xad\x75\x0a\x30\xe3\xdd\x6f\
\xbc\xf1\x46\xbd\xef\xa2\xe0\xd2\x8d\x4f\x17\xf5\x13\x4f\x3c\x11\
\x72\x1b\x0a\x2c\x45\x9f\xe5\x62\x3c\x4f\xde\xdf\x68\xf5\xd2\x5d\
\x4f\xb8\xc8\xe0\x23\x00\x3f\x93\xdb\x53\x94\xb9\x18\x69\x8c\x40\
\x26\xf9\x43\x0f\x3d\x24\x82\xcc\xe3\x78\xf9\xe5\x97\xb1\x7e\xfd\
\x7a\xc9\x7e\xa7\xbb\x9d\x6e\x7e\xc6\xb1\x07\x0e\x1c\x28\xa1\x03\
\x5e\x2f\x5a\xf4\xb4\xa6\x95\x50\xb7\x9c\x56\x08\x35\x6b\x81\x6d\
\xfe\x2c\xeb\xf6\xd1\xfb\x11\xbf\xdf\x89\x15\xa5\xab\xf1\x9f\x21\
\x8f\xe0\xda\xd4\xb1\x2d\xde\x0f\xc5\xe1\xd9\xac\xcb\xf1\xe7\xd4\
\xb1\xb8\x61\xcb\xdb\xf8\x29\xef\x27\xb8\x29\xa6\xb5\x6e\xaa\x4d\
\x81\x37\x5e\x76\xc1\x72\x70\xb8\x86\xe3\x00\x3e\xe9\x7b\x8b\xbc\
\x8b\xae\xec\x5b\x68\x41\xdb\x4a\x30\x32\xf9\x64\xcc\x1b\x78\x9f\
\xc4\x0b\x1b\x72\xb5\xd3\x35\x7c\xf3\xb6\xf7\xf0\xfe\xc6\x57\x30\
\x29\xfe\x50\x9d\x38\x13\xe3\xf6\x96\x6d\x92\x2e\x53\x81\xd0\x02\
\x33\x82\x0b\x19\x57\xae\x55\x93\xcd\x95\xb6\x8d\x31\x67\x63\x14\
\x3e\xe9\x7d\x63\xa3\x67\x60\xb4\x24\xe3\x95\x9c\xb9\x78\x35\x67\
\x6e\x93\x62\xa7\x75\xd1\x48\xfe\x12\x8f\xdd\x18\x22\x77\xa0\x21\
\x28\xe0\x85\xcc\x76\xa7\x4b\xdb\x92\x0c\x8d\xc6\x1c\x94\x15\xa9\
\x11\x66\xfc\x3c\x50\x66\xe5\x2c\x0f\x96\x75\x55\x32\x63\x9e\xe5\
\x4d\x3c\x77\x3e\x34\xbe\x47\x43\xa2\xc4\xec\xf3\x78\x43\x14\x12\
\xc3\xd2\x42\xd6\x8d\xb7\x29\x1a\x86\x03\x34\x92\x2b\x51\xed\xb6\
\x36\xb9\xf7\xbc\x5c\x7f\x4e\x9a\xd3\x9b\x64\xd1\x12\xa8\xca\x91\
\x2c\x5a\x9e\x27\x5f\xf7\x7f\xdf\xc5\xf6\x52\xb8\xe1\x86\x01\x5a\
\x5f\xc7\x36\x26\xdd\x05\x92\x26\x35\x06\xe8\xdb\xa6\x1b\x70\xbd\
\x50\x24\x0e\x1e\x2c\x47\x6c\x6c\x2c\xa2\xa3\x5b\xd7\x69\x2c\xe0\
\x6e\x65\x8d\x75\x73\x63\xc7\xcd\x81\xc2\xcc\xa4\x2d\x5a\x90\x74\
\x7d\x53\x68\x5b\x5a\xf3\x4c\x77\x2f\xad\xc2\x77\xde\x79\x47\xe2\
\xcd\xb5\x33\xd6\xb9\x20\xf8\xe4\x93\x4f\xc4\xfd\x7b\xee\xb9\xe7\
\x36\x79\xbf\xbb\x76\xed\x92\xe3\x7c\xe1\x85\x17\xa4\xcc\xab\x3e\
\xb8\x7f\x26\x86\x65\x66\x66\xd6\xeb\x81\xa0\x50\xd3\x35\xfd\xc0\
\x03\x0f\x04\xdd\xd6\xac\x8b\xae\x19\x6b\x0f\x45\x53\xca\xc1\xe0\
\xdf\x8e\x9d\xe7\xe8\xd2\xe7\xf1\xde\x78\xe3\x8d\x92\xcd\xcd\x90\
\xc0\x89\x27\x9e\x28\xdb\x8c\x1d\x3b\x16\x7f\xfe\xf3\x9f\xd1\xaf\
\x5f\x3f\x59\x20\x71\x81\xa3\x68\x3d\x2d\x57\x58\xbd\x19\xb9\xd6\
\x62\xa9\x91\xee\x15\xd6\xb2\x55\x6a\x7d\x50\xda\x52\x16\x5e\x85\
\x83\xce\x0a\xe4\x8c\x9f\x85\xb4\x36\x6a\x64\xc1\xe9\x5b\x3f\x0e\
\x79\x04\x77\x46\x74\xc1\x2b\xdb\xdf\x87\x23\x2c\xd5\x2f\xd6\xcd\
\xb3\xac\xd9\xf0\xc4\x53\x99\x8d\x3f\xf7\xfa\x13\x4e\x8d\xf6\xb9\
\x88\x58\x76\xc5\x78\x33\xf4\x61\x88\xd0\x99\x0e\xeb\x32\xd6\x10\
\xec\xd5\x85\xb0\x54\x5c\x90\x38\x3c\xb8\xd5\xe9\x71\xfd\xf1\x45\
\xf6\x37\xe2\xfa\xce\x34\xfb\x1a\xc9\xb0\x3e\xd6\x21\x5d\xe0\x0e\
\x17\x6a\x17\x8f\xdd\x56\x82\x97\x06\x3f\x18\xac\x9b\x9e\x55\xba\
\x06\x29\xc6\x68\x89\x43\xd7\x46\xab\x35\xc0\x6d\x2b\x93\xf7\xd4\
\xe7\x46\x6f\x18\xdf\xd2\x83\x63\x46\x2d\x86\x48\x78\x3c\xce\x26\
\xbe\xc3\x77\x8d\xba\x9a\x13\x70\x75\x97\xf3\x10\x6f\x8c\xc2\x2d\
\x9b\xdf\x0c\x76\xe9\xba\x3d\xed\x74\x9c\x1c\x95\x85\x30\x7f\xad\
\xbc\xd5\x55\x8d\x13\xfc\xd7\xb6\xbb\x25\x09\xef\x0e\x7d\xd4\xef\
\x85\xf0\x4d\xf9\xa2\x7b\x3c\xce\x10\x51\xef\x67\x32\xfb\xfe\xd2\
\xee\x57\xe0\xf5\x1e\x57\xb7\xe0\x1c\x5b\xc6\x25\x9b\x5e\xc7\xb4\
\xec\x6f\x61\x6a\xf0\xef\xc1\x1b\x4c\xb6\x63\xa6\xfb\x90\x94\xd1\
\x98\xda\xfb\x7a\xdc\xb1\xfd\x43\xec\xb1\x96\xc8\xf3\x69\xc6\x58\
\x7c\x30\xf8\x41\xc9\x6f\xd0\x6b\x75\x92\x67\x11\xa1\xb3\x04\x17\
\xc4\x1c\xec\x32\x39\x61\x18\x2c\xfe\x6b\x55\xed\xaa\xc6\xe0\xc8\
\x4c\xf9\xff\xf6\x4a\x26\xa3\xd5\xe7\x74\xd2\xa3\x64\x6a\x75\x59\
\x15\xdd\xae\xdc\x47\xcd\x2c\xea\xf6\xe0\xc7\x1f\x7f\x14\x8b\x95\
\x25\x41\xb4\x76\x99\xf4\xd5\x52\xa1\x66\xf6\xf7\xad\xb7\xde\x2a\
\x09\x53\xff\xf9\xcf\x7f\x70\xfd\xf5\xd7\x1f\xf6\x3a\x93\xa9\x72\
\x72\x72\x24\x56\xcb\xf8\x6f\x53\x61\x46\x36\x61\x8d\x37\x33\xc8\
\xeb\xc3\xb7\x50\x3a\xd8\xa4\x76\xac\xbc\xbe\x01\x02\xb1\x65\x5a\
\xce\x8c\x5d\x07\xdc\xe8\x0c\x07\x30\xab\x9d\xaf\x6d\xdc\xb8\x51\
\xbc\x24\x81\xef\x82\x9f\xc5\x84\xb3\xae\x5d\xeb\x26\x0a\x07\xee\
\x95\x8c\xc5\x7f\xf1\xc5\x17\x98\x33\x67\x4e\xd0\x93\x00\x7f\xb6\
\x3c\x5d\xed\xb4\xe4\xe9\x6a\xa7\xab\x9c\xfb\x6d\xea\x62\x40\x11\
\x9a\x16\x0b\xb5\x41\x6b\x84\xb3\x2a\x17\x4b\xca\x77\xb4\xb9\x50\
\xf7\x5f\x72\xbb\x74\x64\xb2\x8e\xff\x1a\xed\x51\x25\xfb\x72\x8f\
\xab\xc5\x1d\xf9\xfc\xd6\x7f\xc1\x1d\xd1\x45\x92\xa3\x9a\x9c\x64\
\xa6\xd1\xc9\x00\x92\xcc\xf8\xa1\x78\xa7\xc7\x35\xc1\xa7\x59\x32\
\xc3\x86\x2c\x25\x6c\xce\x51\xb1\x07\x17\x6c\x7c\xc5\x97\x29\xd9\
\x40\x97\x6c\x8a\xce\xb7\x05\xbf\xe0\x86\x1e\x57\x1d\x26\xec\x13\
\xe3\x06\xfb\xac\xa5\xea\x5c\xf4\xf5\xc7\x80\x77\x58\x0b\x7d\x89\
\x6b\x35\x4a\xcf\x24\x93\xbb\x3a\x1f\xd7\x76\xbf\x12\x77\xa6\x8d\
\x97\xe7\xb2\xd9\x2c\x66\xf3\x1b\x58\x34\xf4\xd1\x90\x9f\xca\x49\
\x5e\x27\xc4\x0f\x96\x46\x25\xd5\x6e\x47\xb3\x93\xeb\x78\xcc\xfc\
\xef\xcb\x92\xe5\xc8\xb5\x16\x40\x1f\xaa\xf7\x76\xc0\xd2\x77\xd9\
\xe5\x1c\x02\x35\xe3\x61\x5a\x03\xd6\x8e\x78\x36\x98\x11\x7f\xcb\
\x96\xb7\x83\xed\x57\x8f\x8b\xca\x92\x47\x28\xe2\x0c\xe1\xb8\x2e\
\xf9\xd4\x7a\x8f\x49\x5c\xc9\xb5\x17\x0c\x1e\x27\xe2\x0c\x47\xb6\
\xc7\xb4\x34\xe4\x09\xb9\x70\xf1\xdd\xa4\x1c\x2e\x5f\x4d\x7d\x85\
\xcb\x97\x89\xfc\xe7\x94\xd1\xb8\xd7\xdf\x6d\xad\x77\x78\x3a\x2a\
\xfd\x6e\x7a\xc6\xaa\xaf\x4a\x3e\xb9\xde\xcf\x39\x25\xa6\xb7\x3c\
\x42\x91\x69\x49\xf4\x7d\xa7\x6d\x2c\xd8\x46\xa3\x09\x16\x8b\x59\
\x7a\x7a\xdb\xed\xce\x56\x95\xda\xd0\x85\xea\x70\xd8\x45\x2c\x4c\
\xed\x58\x07\x4f\xd1\x60\x09\xd3\x83\x0f\x3e\x28\x96\x35\x1b\x82\
\xfc\xf2\xcb\x2f\x12\x33\x6d\x09\xb7\xdf\x7e\xbb\x24\x4e\xdd\x77\
\xdf\x7d\x38\xef\xbc\xf3\x24\xeb\x1b\x7e\xf7\x35\x05\x9a\x6e\x65\
\x96\x49\x35\x07\x5a\xbc\x5c\x04\xb0\xf4\xa9\x21\x28\xe8\xcc\xba\
\x6f\xa8\x1e\x9c\xd7\x93\x31\xec\xa7\x9f\x7e\x5a\xac\x59\xfe\x0d\
\x32\x8e\x4c\x77\x34\x33\xbc\x03\xb1\xea\x00\x14\x63\x36\x6e\xa1\
\xfb\x9a\xb1\xea\x9a\x5c\x76\xd9\x65\x92\x0d\x5e\x9b\x9a\x82\xcb\
\x1a\x6e\x0a\x75\xa0\xf9\x0b\x61\xcc\x9a\xd7\x86\x8b\x99\x40\x08\
\x20\x10\x2f\x57\xb4\x9c\x16\x0b\x35\x5d\x7d\x4e\x8d\x06\x9f\x15\
\x2f\xc5\x35\x0d\xdc\x54\x9a\xcb\x6d\x3b\x3e\xc2\xa6\xa2\xa5\x28\
\x98\xb8\xa0\x5d\x44\x3a\xc0\x73\x59\x97\x61\x43\x75\x0e\x66\xe7\
\xcc\x85\x26\xa2\x0b\x34\x4d\xb8\xb1\xf9\x1a\x9e\x14\x49\xb2\xd8\
\xfc\x21\x0f\x1f\xf6\x5a\xf0\xdd\x3a\x0b\x0a\x5d\x55\xf8\x8a\xfd\
\xbd\xed\x8d\x34\x30\x70\x55\xc3\x94\x38\x02\x6f\x77\xbf\x52\x7e\
\xfc\x77\xc1\x22\xf4\x0b\x4b\xc1\x09\x51\x3d\x30\x3c\xba\x27\x56\
\x54\xee\x09\x8e\x0b\xdd\xc8\x04\x2c\x96\xf0\xd4\x4e\x16\xd2\xea\
\x51\xe9\x77\x11\x93\x0b\xd6\xff\x1d\xfb\xab\xf2\x90\x61\x4a\x08\
\xf9\x91\xac\xa9\xbe\xb9\xdf\xed\xb8\x32\x29\x74\x93\x8a\xa6\xb2\
\xcb\x56\x88\xec\x03\x9b\xa0\xaf\xd5\x07\x5c\x3a\x71\xf1\x78\xec\
\x25\x32\xd3\xfa\x9c\xcc\xcb\x70\x4f\x57\x5f\x77\xb3\xb0\x1a\x31\
\xed\x0f\x0b\x16\x41\xa3\x35\xe0\x99\xec\x6f\xd0\xc3\x92\x2c\xd6\
\x63\x4d\x2a\x5d\x95\x18\x13\x37\x08\x23\x23\x33\x51\xec\xac\xc0\
\xc7\x85\xbf\x8a\xdc\xd5\x6e\x9b\xca\x5a\xee\x95\x15\xbb\x25\x23\
\xff\xb0\xe3\x30\x44\x60\x43\xd5\x3e\xfc\x7a\x70\x5b\x9d\x7d\xb7\
\x35\x1a\x59\xa8\x69\xb1\xcb\x5a\x08\x4d\x8d\x7a\x7f\xdf\xe0\x11\
\xad\xaf\x49\x8e\x74\xf1\xeb\x8c\xcb\xfa\xde\x12\xfc\x7b\x09\x64\
\xf3\x33\xab\x3d\xdf\x56\x8c\x42\x67\x25\xde\xce\xfb\x29\x58\x11\
\x10\xc0\xe6\x71\xc8\x22\x80\x8b\x15\x7e\xff\x73\xf6\xaf\xc3\x9a\
\x8a\x9d\x88\xd0\x1f\xee\x51\x60\x03\x1e\xba\xdf\xb9\xf0\xf4\xb4\
\x71\xa7\x36\xa3\xd1\x80\xf8\xf8\x38\xec\xdd\xbb\x0f\xa5\xa5\x25\
\xad\x72\x7f\xb3\xe5\x28\xfb\x82\xd3\xda\xaa\xaf\x6e\xb7\xb5\x50\
\x64\x98\xd8\x45\xb1\x26\xb4\xa4\x05\xd4\x64\xea\x00\x00\x1d\x9a\
\x49\x44\x41\x54\x29\xd4\x4c\xc4\x62\x12\x55\x4b\xa0\xa5\xcc\xfd\
\xb1\xd9\x09\xf7\xc7\x6c\xe6\xc0\xbe\x19\x0f\x7f\xef\xbd\xf7\x24\
\x34\xd0\x54\x68\xe5\x52\xa8\x19\x1f\x6e\xac\x21\x0b\x45\x9a\x34\
\xb4\x7f\x5a\xd2\xb4\x90\x69\xcd\xd2\x8a\xa5\xa1\x40\xcf\xc5\xd0\
\xa1\x43\x65\x21\x40\x97\x38\x9f\xa3\xa0\x53\x70\xb7\x6f\xdf\x2e\
\xf5\xd3\x74\xd5\x27\x25\x25\x05\xad\xe5\xa2\xa2\x22\x8c\x1f\x3f\
\x3e\xe4\x67\x04\xba\x92\xd1\x05\x4e\x31\x86\x3f\xeb\x9b\xb5\xd8\
\xf0\x2f\x5a\x58\x8f\x0d\x7f\xb7\xb4\x61\xc3\x86\x05\x4b\xc5\x14\
\x2d\xa7\xc5\x7f\xcd\x32\x65\xc9\x9c\x80\x79\x85\xbf\x61\x67\xf7\
\x22\x64\x35\xd2\xeb\xbb\x29\xd0\xbd\xfb\xfa\xa6\xd7\xf1\xd6\xc8\
\xe7\x91\x54\x4f\xbb\xd0\x35\x95\x7b\xf1\xe2\x8e\x8f\x31\xa9\xf3\
\x19\x98\x9c\x30\xbc\xde\xbd\xce\x29\xdb\x80\x8f\xb3\xbf\xc1\xd9\
\xa9\xe3\x71\x69\x88\x32\x16\x32\xad\xef\x1d\x48\xda\xbf\x1e\x55\
\xf6\x03\x30\x1b\xa3\x1b\xb5\x2e\x9d\xae\x6a\x44\xeb\xc3\xf1\xe9\
\xe0\x07\x91\xe9\x77\x33\xb3\x0c\xac\x77\x58\x8a\x2c\x5c\x7c\x17\
\xc6\x0d\x9d\xb3\x1a\xbd\x3a\x8d\x94\x9a\x69\xba\x39\x43\xad\x01\
\x98\x48\x56\xe1\xaa\xc2\x3b\xb9\xf3\x31\x35\xfb\x5b\xdc\xd1\xf9\
\x74\x5c\xbf\xe9\x75\x3c\x92\x79\x89\x08\xf5\x59\x71\x03\xb0\xa2\
\x74\x0d\x06\xfb\x33\xa6\x57\x55\xee\xa9\x93\x48\x26\x25\x57\x96\
\x64\x4c\xcf\xf9\x1e\x93\xb5\x06\xb1\x48\x57\x16\xfe\x8a\xd8\x98\
\xde\x87\xcd\xc4\x3e\x0c\x43\xb8\xb4\xeb\x3c\x29\xba\x97\x24\x7a\
\x35\x17\xfe\x81\x6b\x25\x1e\x5b\x2e\x8b\x92\xda\xb0\x7d\x6a\x17\
\x4b\x12\x26\x77\x39\x17\x57\x26\x9f\x22\x13\xc4\x02\xd0\x2d\xfb\
\x7e\xc1\x22\xbc\xb6\xef\x5b\xac\x39\xb8\x1d\x7a\x43\x04\xfe\xb1\
\xe3\x63\x5f\x03\x93\xda\xd7\xbe\x72\x2f\xee\x18\xf2\xa8\x08\xf5\
\x4e\x6b\x21\xee\x5e\xfd\x94\xef\x79\x7d\x2d\x4b\x8e\xc2\x6d\x8a\
\x83\xd9\x1c\x7f\xd8\xf4\x2c\xa3\x25\x09\x5f\x15\x2f\xc3\x57\x1c\
\x78\xd2\xee\x83\x5a\x7c\x93\xc0\x98\xe8\x66\xa2\xc7\xc3\xff\x79\
\x4c\x94\xf3\xb8\xec\x18\x14\xd3\x1b\x57\x24\x9d\x84\x29\xc9\x27\
\x21\xa5\x46\x0f\x01\xfe\xbe\x3f\xbd\x7b\x3a\x3e\x2e\x5c\x02\xbb\
\xc7\x2e\xfb\xb9\x69\xed\x33\x75\x77\xcf\x85\x4f\x58\x67\x59\x5c\
\x51\xa8\x9f\xce\x9e\x89\xc5\xdb\x3f\xf4\xb7\xcb\xad\x75\x1c\x3a\
\x23\xcc\xe6\x24\xb9\x2e\x6d\x3d\x3d\x2b\x35\x35\x45\xea\xa8\xb7\
\x6d\xdb\x21\xed\x42\x5b\xca\x8e\x1d\x3b\x51\x5c\x5c\x82\xfe\xfd\
\xfb\x49\x96\x72\x5b\x43\x51\xfb\xeb\x5f\xff\x1a\xcc\x80\x26\xa3\
\x46\x8d\x92\xff\xa7\x9b\x96\xa2\xc2\xb2\xa1\x96\x70\xc1\x05\x17\
\xc8\x7b\xb9\x0f\xba\xc1\x03\xe5\x55\xac\x81\x6e\x6e\x3c\x96\xae\
\x68\x66\xa5\x33\x5b\x9b\xad\x3d\x19\x63\xae\x2f\x0c\xc7\xcf\xa1\
\xb5\xdc\x90\x50\xd3\x53\x41\x91\x65\xcb\x54\xd6\x2e\x73\x5f\x3c\
\x67\xf6\xf5\x66\xcc\x9a\x6d\x40\x6b\x42\x2f\xc0\x7f\xff\xfb\x5f\
\x11\xf2\x9a\xed\x44\x1b\x22\x20\xba\xb4\xda\x29\xf4\x0c\x01\xf0\
\x9a\xf2\xfd\xb4\xa6\xef\xbf\xff\x7e\x39\x56\xc6\xd3\xb9\x0d\xb3\
\xcc\xf9\xd9\x3c\x37\x45\xcb\x69\xd5\xb2\x9b\x03\x30\x58\xab\x7c\
\xdb\xce\x8f\x31\xbb\xff\xdd\xad\xfe\x1a\xae\xde\xf2\x0e\x22\xa2\
\x7b\xe3\xa6\x94\xd0\x6e\x4e\x96\x27\x9d\xb1\xee\x79\x14\xe5\x2f\
\xc0\x27\x05\x8b\xb0\xf8\x84\xd7\x70\x62\x74\xcf\x3a\xdb\xad\xad\
\xcc\xc6\x59\xcb\xff\x26\x6e\xd7\x4f\x72\x7f\x44\xd2\xa8\xd7\x71\
\x9a\xbf\x8d\x65\x4d\xc2\x75\x46\xfc\xad\xdb\xc5\x78\x70\xc3\x4b\
\x70\x1b\xa3\x1b\x4d\xc7\x71\xdb\x4a\x30\xb9\xfb\x14\x4c\x88\xf5\
\x65\x48\xbe\x90\x3b\x1f\xdf\x15\x2f\xc3\xcf\x83\x1f\x0c\x66\x6c\
\xcb\x76\xf6\x32\x8c\x88\x1c\x8b\xa7\xba\x4d\x6e\xf4\x9c\xdf\x2d\
\xf8\x05\xff\xb7\xe9\x55\xfc\x3b\x7f\x01\x60\x2d\xc6\xea\xaa\x7d\
\xf2\xfc\x85\x09\x23\x50\xec\x77\x85\xd2\xc2\xfa\xad\x7c\xa7\x58\
\xf2\xb5\xd1\x78\x3d\x30\x45\x64\xe0\x8b\xbc\x1f\xf1\x45\xee\x5c\
\x19\xbb\xc9\x86\x21\xf5\xdd\xa2\x8d\xe6\x64\xbc\x9e\x3b\x1f\x6f\
\xe4\xce\x6f\xa3\x64\xb2\xc3\x3f\xc9\x65\x2b\xc2\xdd\x3d\xaf\xc6\
\xed\x69\x67\x04\x9f\xa3\x95\x47\x17\x77\xb5\xc7\x8e\x3f\x6d\x7e\
\x53\x92\xc9\xb4\x51\xfe\x76\xab\xc1\xa6\x25\xb5\x8e\xd8\x14\x17\
\x2c\x57\x92\x96\xb0\x81\xee\x67\xfa\x1a\x89\x34\xfe\xce\x6d\x46\
\x7d\x58\xdd\x11\x97\x5e\xaf\xc4\x74\x9d\x3a\x2d\x8e\x48\x78\xcc\
\x0b\x89\x27\xd7\x5c\x95\x49\xc9\x9c\xab\x0a\xef\xf7\xb9\x05\x83\
\x6b\x2c\x58\xf2\x1c\x65\x48\x35\xc6\xe2\xbf\x45\x4b\xf1\xee\xa6\
\x37\x81\xd8\xbe\x30\x19\xa2\x60\x67\x56\xbf\x08\x79\xed\x85\x85\
\x06\x7a\xc9\xd3\xf0\x9d\x48\x0c\x33\xc1\x4d\x71\xbe\x86\x38\x35\
\xb6\xe1\x41\xe8\xf4\xe1\x32\x44\xb4\xee\x3e\x5a\x4f\x4a\x4a\xb2\
\xc4\x48\x57\xad\x5a\x83\x13\x4f\x3c\xbe\x45\xe3\x2b\x69\xe9\x2d\
\x5f\xbe\x4a\x16\x7c\xbd\x7a\xb1\x1d\x65\xdb\x7f\x39\x4c\x66\xa2\
\x00\x32\x2b\xb9\xe6\x31\x52\x38\xd8\xf4\x83\x59\xdb\x2c\x1b\x0a\
\x74\xef\x6a\x2e\x74\x15\x33\x5e\x1c\xc8\xbe\xbe\xf2\xca\x2b\x25\
\x46\xdd\x5c\x98\x44\x46\xf7\x39\x13\xeb\x98\x94\x85\x7a\x12\xbb\
\x02\x31\x7d\x0a\x6f\x43\x35\xd9\xf0\x27\x94\x51\x18\x03\x82\x4e\
\xaf\x45\xa8\x7d\xd2\x1d\x4d\xcb\x9b\xfb\xe5\xf9\x5c\x73\xcd\x35\
\x8d\x86\x33\x02\xe5\x61\x25\x25\x25\x78\xf1\xc5\x17\xc5\xcd\xcf\
\xba\x72\x7a\x2f\xb8\x68\x61\x99\xd7\xb4\x69\xd3\x24\xd1\x8c\x0d\
\x53\xd8\x21\xed\xde\x7b\xef\x95\xc5\x80\xca\xf8\x6e\x1d\xad\xf3\
\x8f\xb1\x14\x27\x2c\x05\xdf\xef\x9b\x8d\xe9\xc9\xa7\x4a\x82\x4b\
\x4b\xe1\x34\xa9\x5f\x0a\x7f\xc1\x8b\xfd\x42\x97\x3e\x10\xc6\x54\
\x8b\x38\xfe\x31\xbc\xb3\x94\x34\xed\xb2\x15\x85\x14\x6a\xba\x12\
\x41\x17\x35\x2d\x8e\x8a\x3d\x62\x91\x85\x12\x6a\x72\x6b\xda\x58\
\x3c\xb5\x67\x06\xac\xae\x2a\x98\xea\xb1\xe2\x03\xe8\x2d\x89\x98\
\x96\x3b\x0f\xe7\xc5\x0d\x92\x8e\x6c\xf7\xad\x79\x1a\x63\x3b\xfb\
\xda\x20\x1e\x26\x33\x5a\x3d\x2a\xdc\x36\xff\xf3\x5e\xec\x64\xe3\
\x15\x5a\xda\xfe\x44\x28\x26\x41\xb1\xd1\x07\xeb\x65\xd9\x2f\xbb\
\xca\x10\x89\x9d\x15\xbb\x65\x48\xc5\xf7\xc5\x2b\x50\xec\x2c\xc7\
\xa0\x88\x0c\xbc\xe5\x77\x89\x2f\x38\xb0\x09\x07\x2a\xb3\xa1\x35\
\xd7\x75\x67\x07\xe2\x91\x46\x53\x8c\x68\x84\xd3\x55\x05\x4f\x03\
\x16\x24\xeb\x9c\x1d\xd6\xfd\x3e\xb7\x7c\x0b\x6b\x6e\x79\xc3\x70\
\x86\xa7\xc1\x62\x88\x80\x27\x84\x40\xda\xfd\xcf\xcd\x29\x5d\x83\
\x27\xb6\x7f\x80\xa4\x88\x2e\xf8\xaa\xff\x9d\xd0\x69\x74\x48\xb6\
\x24\xa2\x80\x9d\xd2\x9c\x15\xc8\x8c\xcc\xc4\x87\x83\x1f\xf6\xb5\
\x5e\xf5\x2f\x1a\x7c\xfd\xd3\x35\x52\x1f\xdc\xdb\x1f\x9f\x67\xfb\
\xd4\x5f\x8e\xff\x87\xc8\x0f\xaf\x21\xb3\xb8\xf9\x1e\x36\x8e\x61\
\x59\xd7\x6b\xbb\xa6\xc1\x60\x4e\xa8\x95\x03\xee\x15\x77\xb4\xe9\
\x48\xcd\x28\x0f\x7e\xf8\xa1\xdf\x04\x39\x56\x8f\x3d\x78\x6e\xff\
\xc9\xfb\x09\xcf\x6e\xff\x10\x93\x33\x26\xe0\x99\x6e\x17\x23\x82\
\x8b\x8e\xf0\x34\x99\x3c\xe6\xb0\x95\xe0\xb1\x1e\x57\xe3\xec\x84\
\xc1\xb0\xba\x7d\xe3\x40\xb5\xfe\x3c\x06\x87\xd7\x25\x39\x0c\x66\
\xbf\x3b\xfb\xa5\xee\x57\xe0\xae\xce\x67\x4a\x28\x81\xdb\xd1\xb5\
\x1f\x58\x28\x5e\xb5\xe5\x6d\xec\xae\xdc\x0b\x63\x3b\xc4\xe7\x7b\
\xf4\xc8\x42\xb7\x6e\x5d\xb0\x7d\xfb\x0e\x7c\xf5\xd5\x2c\x5c\x75\
\xd5\xe5\xcd\xde\xc7\xcc\x99\xdf\x21\x3b\x7b\x1f\xd2\xd3\x3b\x63\
\xc4\x88\x96\xdf\x2f\xea\x83\x96\xf4\x97\x5f\x7e\x29\xb5\xbe\xb5\
\xe3\xae\x8c\x21\x33\x6b\x9b\x35\xbf\x7c\x2c\x5e\xbc\x58\xb2\xa8\
\x9b\x0b\xfb\x5e\x53\x64\xe9\x42\xa6\x2b\x98\x56\x2c\xad\x6a\x5a\
\xed\xcd\xe1\xb4\xd3\x4e\xc3\x8a\x15\x2b\xc4\xa2\x6e\x28\xa1\x95\
\x9f\x43\x41\xa4\xb5\xda\x50\x53\x94\xce\x9d\x3b\xcb\xbf\x81\x3e\
\xdf\x74\xad\x07\x3a\xb3\xb1\x7c\x8a\xc9\x61\x81\x8c\x74\x5e\x07\
\xf6\x28\x67\xef\xef\x19\x33\x66\xe0\xa9\xa7\x9e\x92\x47\x7d\xf0\
\xf8\xe8\x9e\xa7\xd5\x4e\x2b\x99\xd0\x95\x4e\xf8\x3e\x96\x7a\x51\
\xec\x19\x1b\xe7\x42\x89\x8b\x0b\x2e\x8c\xf8\x7d\x70\x20\x0a\xcb\
\xc5\x02\xed\x5c\x15\xcd\xa7\xd5\x81\x2c\x8e\xa1\x74\x1b\x22\x71\
\xd9\xda\xa9\x18\x7c\xe2\x9b\xe8\xd1\xc2\x3e\xdb\x5f\x15\xaf\x10\
\x77\xdd\xd5\x49\xf5\xff\xb2\x77\x32\x46\xe1\xe9\x1e\xd7\xe2\xd9\
\xcd\x6f\x60\x7c\xd6\x14\x5c\x94\x38\x32\xe4\x76\xa7\xc7\x0d\xc0\
\x55\x3d\xaf\xc3\x8c\xec\x99\x18\xdb\xe3\x4a\x5c\xde\xc0\x3e\xa3\
\x74\x16\x8c\x8d\x1b\x88\x6f\x73\xe6\x4a\xc6\x76\x43\x30\x71\xaa\
\xdc\x59\x8e\xb3\xd7\xbf\xe8\xcb\x38\x76\xdb\x90\x65\x6e\xd8\x75\
\xc7\x79\xc1\x43\x56\x3c\x88\x4a\xc6\x29\x79\xf3\xac\xce\xc3\x05\
\x5d\x26\x61\x46\xbf\x3b\x7c\xf1\x66\x3e\xa4\x87\xb5\x11\x26\x43\
\x24\xec\x15\x3b\x25\x4e\xf9\x50\x8d\xa9\x55\xd3\x8b\x97\xcb\x67\
\xf9\x86\x8d\xd4\x37\xf5\x59\xd3\x24\xcb\x91\xc9\x64\xa3\x12\x86\
\x4a\x32\x19\x63\x9f\xcd\x75\x8e\x32\x91\x8c\x6e\xfb\xe9\xc5\xcb\
\x42\x26\x93\x71\x31\xc1\x8e\x5b\x0b\x0f\x6c\xc6\x77\xf9\x3f\x03\
\xe5\xbb\x71\x61\x9f\x1b\x6b\xbc\xdf\xdf\xc0\xdf\x55\x2d\xf5\xd1\
\x23\xeb\x49\x20\xab\x09\xe7\x2c\x9f\x14\x62\x41\x46\xd8\x05\xed\
\x55\x17\x63\x65\xa1\x63\xf2\x47\x13\x7e\xbf\x1a\x63\x0c\x1e\xde\
\x3d\x5d\x26\x65\xfd\x5e\xb8\x58\xbc\x26\x51\xdd\x2e\xa8\x71\x2d\
\x7c\x2d\x57\xbd\xae\x2a\x9c\x9d\x30\x14\xc3\xea\xb8\xb3\xeb\xc2\
\x2c\xf8\xee\xb5\xba\xf6\x05\x48\x30\x46\x63\x17\x3d\x31\xed\x20\
\xd4\x74\x7b\x4e\x98\x70\x3a\xfe\xf5\xaf\x5c\x69\x58\xf2\xe5\x97\
\x33\x71\xc1\x05\x0d\x4f\xce\xab\x09\xfb\x7b\x73\x38\x07\xad\x42\
\xbe\x2f\x3c\xbc\x6d\x27\x68\x31\xc9\x8b\xf5\xbd\x14\x2c\x36\xdb\
\x08\x35\x73\x99\x89\x4e\x74\xf9\xb2\xd4\x8a\x4d\x3e\xd8\xa3\xbb\
\x5b\xb7\xa6\xb9\xf1\x19\xf3\x66\xad\x33\x27\x4f\xc1\x6f\x59\xd3\
\x0a\xa5\xdb\x97\x03\x2b\xf8\x2f\x1f\xec\xed\xdd\x14\xd8\xdd\xab\
\xa9\x53\xc4\x02\xae\xe3\x40\x02\x5b\x6d\xe8\xde\xe6\x02\x85\x09\
\x62\xb4\x6e\x59\xf6\xc6\x6c\x6e\x5a\xb2\xb4\x9a\xe9\xae\xe7\xe2\
\x84\x42\xcd\x52\x35\xfe\xcc\x26\x2e\x81\x3a\x67\x8a\x2a\xdb\x8a\
\xd6\x97\x68\x47\xf1\x67\x8c\x9e\x22\xcc\xd8\x33\xcf\x3d\xb0\x30\
\xa0\x87\x82\xb5\xd5\x4c\x5c\xa3\xf5\x1c\xc8\x7a\xa7\xc5\x4d\xeb\
\x9a\x65\x6b\x81\x4c\x73\x45\xcb\x68\xf5\x95\x63\x1d\xb2\xc9\x14\
\x0f\x7b\x75\x1e\x4e\x58\x7a\x2f\x96\x1d\xf7\x77\x64\x9a\x9b\x3f\
\xa9\x66\xc1\x81\xcd\x08\xb3\x24\x07\x9b\x63\xd4\xc7\xdf\xba\x9c\
\x2b\x8d\x2e\xe2\x1b\xd8\x8e\x82\xf6\x41\xef\x1b\xf0\xf7\xcc\x8b\
\x91\x60\x6c\xbc\x54\x62\x78\x44\x57\x7c\xdb\x04\x37\x70\xa0\x86\
\xda\xe6\xb2\x62\x3f\xad\x76\x53\x4c\x9d\xc4\x9f\x3a\xef\x09\x0c\
\xde\xe0\x8a\x99\x96\x2e\xad\x41\xbf\xc5\xcb\xe3\x94\xfd\x68\x8d\
\x48\x0b\x4b\x41\x6e\xe5\x5e\xc9\xf6\x9e\x9a\x3d\x0b\x37\xa5\x8e\
\x95\x6b\xc1\x79\xd5\xef\xe7\xfd\x24\x0d\x33\xda\x62\x08\x05\xdd\
\xf7\x53\x92\x46\x1d\xd6\x7f\xbc\x25\x30\xb9\x2d\xfb\xe0\x56\xe8\
\x6b\x8d\xc4\x33\x1a\xa3\xb0\xe2\xc0\x66\x49\x94\xa3\x68\x7b\x22\
\xdd\x87\x0f\xd7\xf0\xa3\x33\xc5\x61\x4b\x75\x1e\xce\x5a\xf5\x18\
\xee\xcd\xbc\x14\x63\x62\x7a\xcb\xd9\x3d\xb1\xe7\x4b\xcc\x2c\xfc\
\x0d\x59\x91\xdd\x44\xc2\x02\x30\x91\xaf\xd4\x5a\x04\x8d\xde\x8c\
\xf7\x7b\xdd\x18\xac\x2f\x9f\x51\xba\xc2\x3f\xbd\xab\xe3\xc1\xc5\
\x93\x5e\x1f\x81\xb9\x85\xbf\xca\x77\xa7\x0f\xef\x0c\x97\x46\x5f\
\x67\x6e\x37\xf3\x1b\xbc\xe6\x44\x5c\xbf\xe9\x4d\x8c\x8c\xed\x8b\
\x7f\xfa\x4b\xca\x78\x7d\xae\xdc\xfc\x4f\x74\xd2\x9b\x11\xc1\x1c\
\x8a\xe0\xe0\x0d\x8d\x58\xd1\x5b\x2b\x76\xe1\xe6\xf4\x89\xb8\xc5\
\x9f\xf1\x9f\xeb\x38\x88\xed\xe5\x3b\xa0\x69\xc7\x6c\xf7\xac\xac\
\x4c\x99\x29\x3d\x63\xc6\x4c\xfc\xfa\xeb\xef\x28\x2f\xaf\xc0\x98\
\x31\xa7\x22\x23\xa3\xfe\xac\xe5\x82\x82\x42\x2c\x58\xb0\x48\x5c\
\xe6\x11\x11\xe1\xd0\xeb\x0d\xc1\xe6\x19\x6d\x01\xad\x46\x4e\x7f\
\x62\x32\x17\x85\x8c\x1d\xb2\x1a\xca\xa2\xa6\xa8\xd0\x35\x4e\xd1\
\x65\x59\x14\x7b\x6d\x33\xa6\x5b\x1f\xec\x87\x4d\x97\x2e\x63\xb1\
\x8c\x7f\x53\x88\x29\x86\x14\x3e\xf8\xbb\x98\xf1\x67\x0a\xf8\x87\
\x1f\x7e\x28\x96\x25\x5f\x6b\xcb\xb1\x9a\x9c\x7c\x45\xea\x73\x7d\
\x53\x78\xd9\x39\x8d\x56\x33\x33\xb1\x69\xd9\xb2\xbe\x99\xe7\x49\
\x97\x35\xdd\xd3\xac\xd3\x66\xb2\xd7\xa9\xa7\x9e\x2a\xc2\x4f\x77\
\x3d\xc5\x93\xd7\x83\xaf\x8f\x19\x33\x46\xb2\xe2\x19\x16\x08\x05\
\xdd\xfd\x4c\xfe\x63\xfc\xb9\x66\x8c\x9f\xde\x05\xba\xc2\x69\x39\
\xd7\x8c\xd3\xb3\xde\x9b\xcd\x50\xf8\x19\xac\xaf\x6e\x6d\x8b\xd4\
\x3f\x32\x6d\xb3\xc4\x61\xf7\xa5\xb0\x54\x94\x56\xed\xc3\xf0\xa5\
\x77\x63\xf6\xd0\xc7\x71\x7c\x13\x87\x75\x04\xd8\x65\x2d\x42\xe7\
\x26\xf6\x0d\x6f\x48\xa4\x6b\xd2\x14\x91\x26\x1c\xb3\x48\x77\x35\
\x93\x9d\xb4\x8d\x98\xa5\xb2\x30\x91\xd8\xa9\x09\x76\x36\x1a\xa9\
\x6f\x3b\xbf\xa8\x6a\xa4\x2c\xc9\x88\x6a\x9d\x49\xde\x67\x67\xc2\
\x91\x5f\xb8\x0a\xf8\x7e\x8f\x03\x5f\x0e\x7e\x08\xe3\x63\xfb\xe2\
\xa2\xf5\x2f\x61\x6e\xfe\x0f\xb0\xea\xcc\xd2\x17\x1c\xfe\x61\x0d\
\x32\xdf\x5a\xeb\x9b\xc8\xd5\xea\x04\x21\xad\xde\x37\x7a\xd3\x4f\
\x3e\x87\x98\x78\x9c\x75\xb2\xa9\x6b\x43\xf7\x6b\x9c\x3e\x42\xfa\
\x50\xfb\x8e\xcb\x19\xb2\x0e\x9b\xb1\x62\x13\xbd\x0d\xfe\x78\xba\
\xbd\x1e\x97\x9e\x41\x67\x41\xa9\xb3\x0a\x73\x4a\xbe\x45\x81\xab\
\x1a\xab\x47\x3e\x2f\xd7\x8a\xfd\xda\x9f\xdd\x3d\x03\xab\x59\x47\
\x2e\xd6\xb6\xd6\x97\xa8\x55\x95\x2b\xd7\xe0\xb5\xe1\x4f\x05\x45\
\xfa\x9f\xf9\x3f\xe3\xdd\x5d\xd3\xa0\x6f\x66\x0f\xf9\x23\x09\xbb\
\xd9\x99\x4c\xbe\xcc\x66\xdf\x77\x17\x7a\x14\xa0\x5e\x1f\x86\xd5\
\x65\xeb\xb1\xba\xe8\x37\x9c\x1c\xdd\x1b\x53\x3a\x1d\x87\x9e\x96\
\x64\x99\xb2\xf6\xea\xda\x67\xfd\xa3\x3f\xe3\x7d\x0b\x3e\x26\x9e\
\x55\xee\x45\xf7\xce\x67\xe1\x4c\x7f\x7f\x74\xee\xf5\xb4\x95\x8f\
\xa0\xcc\x56\x0a\x53\x58\x72\xbb\x26\xd0\x8d\x1a\xe5\xbb\x91\xcf\
\x9e\x3d\x4f\xc6\x5d\xee\xdb\x97\x8b\xde\xbd\x7b\xa2\x5b\xb7\xae\
\xe8\xd4\x29\x41\x2c\x6f\x0a\x31\xb3\xbb\x77\xef\xde\x83\x2d\x5b\
\xb6\x21\x3f\xbf\x40\xac\x3d\xce\xab\xce\xc9\xc9\xc5\x27\x9f\xfc\
\x17\x17\x5e\x38\x49\x06\x73\xb4\x14\x0a\x10\x85\x85\x75\xd2\x74\
\x47\x53\xa4\x68\x55\xd2\x52\x6c\x0c\xba\x92\x29\x56\x14\x57\x26\
\x72\xfd\xfd\xef\x7f\x97\xa6\x26\x1c\x5e\xc1\xf8\x2d\xdd\xd9\x6c\
\x90\x42\x8b\x9b\xb1\x56\x0e\xdd\xa0\xf8\x33\x0e\x4b\x0b\xb2\xa6\
\x25\x4c\x41\xe6\x83\xc2\xcf\xcc\xf2\xc0\x83\xc2\xc5\xec\x69\xba\
\xc4\x99\xd9\xcd\x78\x31\xdd\xc5\xac\x3f\xae\xd9\x47\xbb\x29\x50\
\x0c\x69\xc1\x32\xf6\x1b\x48\xe8\x62\xc9\x16\xe3\xe4\x4c\x18\xa3\
\x5b\x9c\x75\xdc\x5c\x44\xb0\x26\x9a\xbd\xb5\x69\x29\xb3\x9d\x29\
\x2d\x68\x96\x5b\x31\x79\x8d\x25\x5a\x8c\x31\x53\x98\x03\x82\xcc\
\xf3\x62\x02\x1a\xc3\x05\x3c\x56\x5a\xd9\x97\x5f\x7e\xf9\x61\x8d\
\x55\xf8\x7d\xb2\x99\xca\x4b\x2f\xbd\x54\xe7\x68\xe9\xd6\x66\xf9\
\x57\xc0\x92\xe6\xc2\x89\x0f\xbe\x9f\x0b\x06\x2e\x20\x98\xdd\xde\
\xd4\xb6\xaa\x8a\xba\xb4\x9d\x2f\x82\xd6\x66\x78\x3a\xca\xac\xf9\
\x38\x61\xc9\x6d\x98\x31\xf4\x51\x5c\xd0\x40\x56\x76\x6d\xec\x5e\
\xa7\xb4\xdd\x3c\x1a\xf8\xe2\xa2\x6d\x1b\xcb\x0c\x8c\x92\xa4\xf0\
\x07\xc7\x12\x06\x2e\x95\xff\xff\xe9\x46\x5e\x30\xfc\x19\x8c\xf6\
\xd7\xc4\x8e\x4d\x18\x8a\xb9\xdb\xdf\xc3\x4b\x03\xee\x43\x92\xbf\
\xc9\x4b\x9f\xb0\x54\x3c\xdc\xfd\x4a\x3c\xb9\xe1\x65\xb8\xa3\xba\
\xb7\xc9\x51\xd6\x94\x8a\xd3\xd7\x3e\x87\x0d\x07\xb7\xc0\xd2\xc8\
\x44\x33\xab\x35\x0f\x77\x64\x4d\xc1\xcb\xfe\xb8\x79\xab\x8f\x41\
\x5a\x6b\xea\xe1\x8d\x1b\x84\x35\x25\x2b\x70\xc6\x9a\xa7\x31\x77\
\xf0\x83\x18\x12\xd1\x15\x07\x46\x7f\x88\xb3\x56\x3f\x81\x9f\xe9\
\x49\x60\x8d\xbe\xad\x18\x89\xf1\x83\xf1\xcd\x90\x47\x70\xbc\xbf\
\x89\xcb\x8c\x92\x95\xb8\x79\xed\x54\x19\x66\xc2\x89\x5a\xcd\xed\
\x30\xd7\xd1\x10\x6f\x4d\x58\x0a\x6c\x8e\x30\x5c\xb1\xea\x51\xc4\
\x8c\x7c\x01\x13\xe3\x06\xe0\x95\xee\x57\x62\x78\x64\x16\xae\x5a\
\x37\x15\x70\x3b\x64\x2a\x1c\x13\xd4\x6e\x18\x70\x2f\xde\xae\x31\
\x3a\xf3\xd4\x95\x8f\x60\x7b\xd9\x7a\x18\x79\x7d\x8e\xc0\xb5\xa0\
\x58\x53\x0c\x7e\xfc\xf1\x67\x19\xd6\x41\x8b\x99\x13\xb1\x68\x31\
\x07\x84\x9a\x31\xd2\xea\x6a\xab\xb8\xb8\x7b\xf7\xee\x85\xb3\xce\
\x1a\x2f\x62\xfd\xc5\x17\x5f\xc9\x40\x8e\x69\xd3\xbe\x94\xd8\xe7\
\x90\x21\x4d\x73\x15\xd7\x86\x62\xc0\xfe\xd8\x6c\xdc\xc1\xce\x58\
\x6c\x68\xd2\x54\x37\x36\xfc\x4d\x4a\x28\x2e\x53\xa7\x4e\x95\x58\
\x2d\xc5\x97\xa5\x57\x14\x45\x5a\x97\xec\x1a\x46\xd8\x65\x8b\xe3\
\x23\xb9\x4d\x43\x02\x4b\x57\xef\x15\x57\x5c\x21\x16\x3a\x1f\xb4\
\x6c\xf9\xa0\x60\xd2\xb2\x64\xfb\x4f\x5a\xe4\x14\xd4\xe6\x96\x2c\
\xd1\x92\xa5\xa0\x72\x01\x11\xa8\x4b\xa6\x95\x4d\xf1\x84\x3f\x1b\
\x3d\x14\xdc\x96\xae\x6f\xc6\xeb\x79\x9e\x84\xde\x01\x0a\x78\x4d\
\xe8\xf2\xe6\x3c\x6b\x7a\x02\xae\xbb\xee\x3a\x99\xa4\xc5\x38\x76\
\x20\x11\x8d\xc7\x9d\x9b\x9b\x2b\xff\x86\xca\x3c\xaf\xd9\x1b\x9c\
\x0b\x02\xba\xd1\xb9\xe8\xe1\x67\xf3\x7c\x99\x09\xde\x94\x05\x94\
\x22\x34\x6d\x1a\x34\xe0\xcd\xc6\x12\x96\x0a\x6b\xd9\x06\x7c\x55\
\xbc\xbc\x59\x42\xcd\xac\xe0\xdf\x2b\x76\x1d\x95\xaf\xe9\x00\x2d\
\x56\xaf\xab\xed\xb2\x83\xbd\xce\xc3\x26\x5e\x39\xe4\xc6\x19\xd8\
\xb9\x37\x68\xbd\xb2\x93\x58\xa0\x9b\xd8\x83\xbb\xa7\xe3\x99\x75\
\xcf\x21\xbd\xf3\x99\xb8\xbd\xb3\x2f\x63\xfa\x80\xc7\x89\x18\xad\
\x01\x4f\x74\xbd\x00\x1f\xe4\x2f\x14\x57\xb3\x39\xbc\x73\x8b\x87\
\x8a\x84\xa2\x82\xfd\xb0\x9d\x55\xb0\x6a\x1b\x19\x8c\xe0\xac\xf4\
\x59\xd1\x6d\x8c\x96\xd9\xd9\x11\x5d\x30\x2f\x77\x3e\xce\x84\x17\
\x73\x06\x3f\x04\xb3\x46\x87\x05\x43\x1f\xc7\xbd\x51\xdd\xf1\xe2\
\x8e\x4f\x30\x21\x75\x3c\x3e\xea\x7f\x17\xe2\xfc\x3d\xb2\x3f\x2c\
\xfc\x05\x57\xaf\x99\x2a\x39\x05\x52\x56\xd7\x86\xd7\xe3\x68\xc2\
\xc5\x86\xd9\x18\x03\x9b\xc7\x85\xb3\x57\xfc\x0d\xd3\x86\x3c\x82\
\xc9\x89\x23\x70\x65\xd2\x28\xf4\x3e\xe1\x55\x9c\xbf\xe6\x19\x14\
\xda\xf7\xe3\x9d\x61\x8f\xe3\x1a\x7f\x03\x18\x9b\xd7\x8d\xf1\xab\
\x1e\xc3\xe2\x82\x5f\x60\x8c\xca\x94\xeb\x79\xa4\x86\x09\x66\x66\
\x76\x45\x66\xe6\x35\x32\x5f\x9a\x09\x66\x1c\xda\x41\x57\x38\xc5\
\x81\xa2\xc7\x7e\xde\x49\x49\x89\xe2\x2e\x1f\x34\x68\x60\xf0\xef\
\xeb\xb2\xcb\x26\x8b\x35\x4b\x6b\x3c\x20\xd6\x9c\xaa\xd5\x5c\x68\
\xa1\xd2\xc2\xa3\xbb\x99\x96\x71\x4b\x86\x7b\xb0\x64\x89\xf1\xd7\
\xd5\xab\x57\x8b\x38\xc3\x6f\x61\xb2\x64\x89\x6e\x66\x8a\xef\xa4\
\x49\x93\xc4\x32\x6c\x0a\x7c\x0f\x5b\x6b\x32\xe1\x8a\x56\x2a\xc5\
\x8e\xcf\x51\xa4\xe1\x9f\xfd\x4c\xeb\xbd\x2d\x3a\x75\x31\x91\xad\
\xb1\x4c\x6d\xba\x9d\xf9\xf9\x14\x57\x5a\xf5\x0c\x0f\xd4\x6c\x50\
\x52\x13\xba\xa7\x99\x60\x46\xa1\x66\x28\xa0\xe6\x31\xf6\xed\xdb\
\x57\xbe\xb3\xa6\x1c\x37\x3d\x1b\x1c\xe5\x49\xcb\x9d\x59\xf7\x14\
\x68\xba\xe1\xeb\xeb\x91\xae\x68\x9c\x36\x8f\xee\x3b\xdc\x4e\xc0\
\x18\x27\x35\xc4\xb5\x71\x4b\xe6\x73\x68\xab\x79\x62\xfc\x10\x7c\
\x53\xbc\x4c\x4a\x86\x1a\x73\xc3\xb6\x35\x9b\x65\x66\xb3\xa7\x6d\
\x06\x1a\xf0\x17\xd9\xeb\xc1\x80\x08\x5f\xa2\x05\x33\xb8\x0f\x38\
\x2b\x0f\xf5\xb0\xf6\xb8\x90\x58\xcb\x7a\xbd\x6e\xf3\x5b\x78\x6f\
\xfb\x07\x52\x97\xfc\x51\x5f\xdf\xdc\xda\xfd\xae\x4a\x8c\x5c\x7a\
\x2f\xe6\x0d\x7d\x1c\x99\x96\x4e\x98\xde\xff\x4e\x1c\xf7\xeb\x4d\
\xb0\xbb\x2a\x61\xd2\x85\xb5\x59\x8d\xac\x0c\xb5\xd0\x87\xc1\xa4\
\x0f\xdd\x61\x2c\x80\x9d\x03\x27\x42\xc4\x9a\x5b\x0b\xcf\x43\x6a\
\xd0\x23\xbb\x61\x6e\xee\x7c\x64\x54\xe5\xe0\xdd\xbe\xb7\x62\x7c\
\x6c\x7f\xbc\xd0\xfd\x4a\xdc\x94\x3a\x0e\x59\xfe\xce\x77\xd9\xf6\
\x52\x3c\xb5\x7b\x06\xde\xd9\x3d\x5d\x4a\x93\x28\x6a\xc7\xa2\x48\
\x37\xf4\xdd\x89\x65\x6d\x49\x84\xcd\x56\x82\x8b\x97\xdf\x87\xbf\
\x64\x5d\x81\xc7\x33\x2f\xc1\x88\x88\xae\xd8\x3a\xea\x0d\x94\xb9\
\x2a\xd1\xd9\xff\xfb\x33\xb3\x64\x25\x6e\xd9\xfa\x2f\xe4\x1e\xdc\
\x01\x53\x54\x96\x34\xed\x39\x72\x32\x7d\x88\x7e\xfd\xfa\xc8\x83\
\xc2\x41\xa1\xa6\x35\x4d\x81\x88\x8c\x8c\x08\x99\x40\xc4\x8c\x60\
\x66\x8c\xf3\xa6\x4f\xb1\x9e\x3e\xfd\x2b\x79\xef\xf0\xe1\x4d\x1b\
\xb8\x53\x13\x66\x4e\xf3\xd1\x1a\x6a\x0f\xb9\xe0\xf1\xbd\xff\xfe\
\xfb\xad\x2a\x29\xe2\xb9\xd1\xa5\xce\x47\xcd\x8c\xee\xd6\x74\x74\
\x6b\x09\x5c\xc0\xf0\xf3\x59\x53\xce\x64\xbb\xc6\xe0\x82\x82\x43\
\x35\x6a\x43\x57\x7e\x53\xe1\xc2\x80\x09\x67\x8a\xb6\xa3\xcd\x6b\
\x57\xdc\x6e\x2b\x2c\x61\x29\x18\x1b\xdb\x2f\xf8\x5c\x91\xb3\x02\
\x37\x6e\x79\x1b\x19\xbf\xde\x8c\x71\x6b\x9f\xc5\xe3\x7b\xbe\xc4\
\x8c\xe2\xe5\x58\x53\x99\x2d\x93\xa2\x38\x7c\x81\x3d\xa0\x69\xb5\
\xfd\x7a\x70\xfb\x11\xff\x7a\x7f\x60\xf2\x93\xbe\xf5\x75\x7e\x22\
\xf4\xce\x2a\xe9\xc8\xc5\x3a\x68\xb2\xae\x2a\x5b\x4a\xa1\xf4\xfe\
\xb6\x99\x1a\x53\x1c\xbe\xce\x5f\x88\x22\xa7\xcf\x65\x35\x61\xed\
\xb3\x78\x6f\xd7\xe7\x92\xfd\x7d\x55\xcf\x6b\x71\x6a\x74\x0f\x79\
\xfe\x89\xbd\xb3\xb0\x73\xdf\x77\xb8\x9f\xa2\xc4\xe1\xf3\x91\xdd\
\x70\x7f\xef\xeb\xe1\xad\xca\x41\xc7\x96\x26\x5f\x1c\xdd\xee\x71\
\xf8\x1a\xaa\xb8\xaa\x50\xe2\xac\xf0\xbf\x72\x08\xaf\xc4\xbd\xdd\
\x52\x3b\x6c\xb3\x16\xc2\xc9\xfa\x71\x43\x14\xf6\x55\xe5\xe2\xbb\
\xd2\x35\x41\xc1\xc9\xaa\xd1\x9e\x76\xde\xfe\xf5\x78\x67\xcf\x97\
\x3e\xd7\xae\xb3\x1c\x36\x6b\x81\xbc\x9f\xfd\xc2\x8f\x86\x40\x35\
\x0d\x8d\x94\x93\xd9\xdd\x0e\x5f\x5f\x73\x47\x79\xb0\x85\xa8\x16\
\xda\xe0\x55\x61\xc2\x21\xb7\x61\xde\x83\xad\x62\xaf\x6f\xe8\x86\
\x3e\x02\xdf\x94\x2c\xc3\x3e\xbb\xaf\x07\x78\xb8\xd6\x10\x14\x69\
\x0e\x69\x79\x66\xef\xd7\xc8\x95\xe1\x2d\x7a\xd8\xab\x72\x60\x73\
\x94\xc9\x08\xd0\x76\x1f\x42\x52\x0f\x14\xb8\x98\x98\x68\x24\x26\
\x26\x48\x2c\xba\xb1\x2c\xdf\x2b\xaf\xbc\x0c\xc3\x86\x0d\x91\xf8\
\xef\x57\x5f\x7d\x83\xc5\x8b\x97\xb4\x6b\xef\xef\xe6\xd0\x96\x75\
\xbf\x47\xb3\xcf\x35\x93\xeb\xda\xa3\xa9\x8c\xe2\xc8\xd2\xf6\xf9\
\xf2\xf6\xfd\x38\x2f\x65\xb4\xaf\xef\x31\x80\x17\xb3\x67\xe1\xc9\
\xdd\x5f\xa0\x9c\x82\x65\x8c\x41\x5e\x55\x0e\x7e\x64\xec\x91\x09\
\x59\xfa\x08\x18\xf5\xbe\xe4\x2a\x29\x3d\xf2\x38\xf1\x51\xd1\x12\
\x9c\x5a\x4f\x1f\xe3\xf6\xe0\x87\xb2\x8d\xd8\x5d\xb6\x01\xda\x46\
\xca\xac\x1a\x23\x30\x68\x01\xd6\x7c\x5c\xdf\xf7\xb6\xa0\x4b\xfb\
\xd3\xa2\x65\x80\xc7\xe7\x25\xa0\x98\x18\x8d\x31\xd8\x5b\x9d\x8b\
\x13\x56\x3d\x8e\x54\x63\x0c\x16\x17\xfd\x26\x96\x74\xa7\x98\x3e\
\x78\xd7\xdf\x3b\x7c\xbb\xb5\x10\xaf\xec\xfc\x0c\x9a\xc4\x91\xf8\
\x62\xef\x4c\xfc\x98\x3a\x06\x63\x63\xfa\x60\x6a\xd7\x8b\x30\xa7\
\x64\x15\xd6\x16\x2d\x85\x49\x62\x91\xad\x97\x6c\xd6\x78\xa3\x6a\
\x1f\xec\x74\x81\x37\x44\x65\xb6\x34\x2e\x69\x1c\xaf\xb4\xb0\xf4\
\xe5\x4d\x39\xa4\x4c\xe8\xb4\x98\xbe\xf2\x2e\x19\x7f\x49\xab\x9c\
\x03\x46\xd8\xd9\xcc\x10\x8e\x18\x53\x02\x06\xc6\xf6\xc7\xf1\x51\
\x59\x38\x2d\xa6\x1f\xc6\xc4\x0d\x84\xb1\x46\x56\x74\xae\xad\x04\
\x69\xfe\x6b\xc9\xde\xd8\x97\x74\x3a\x1e\x0b\xca\x36\x48\xf9\xd7\
\xd2\xf2\x9d\xd8\x50\x95\x8b\x83\xf6\x52\x38\x18\xbe\x30\x27\xc2\
\xac\x33\x77\x38\x2b\x9b\x39\x10\x5c\x4a\xb0\xc4\xce\x14\x91\x7e\
\xe8\x7a\xf0\x3a\xb9\xad\x70\xca\x7c\x6e\xa3\xfc\x7d\x64\x46\x76\
\xc3\x88\xc8\x4c\x29\x47\x1b\x1b\xdb\x5f\x72\x14\x02\x94\x33\x93\
\x5e\xa3\x93\xa1\x2f\xf1\x86\x08\x2c\x1d\xf6\x24\x36\x56\xe7\xe2\
\x87\xfd\xeb\xb0\xf8\xe0\x36\xac\xaa\xdc\x8b\x5d\xd5\x05\x70\xd9\
\x4b\x25\x2b\xdc\xec\x4f\xb2\xeb\xb8\x8b\x18\x8a\xf5\xa5\xd2\xf4\
\x64\xe1\xc2\xc5\x98\x3d\x7b\xae\x58\xe6\x14\x79\x85\x42\x71\x88\
\x36\x17\x6a\x8d\x3e\x0c\x55\x6e\xab\xd4\xd2\x3e\x93\x3d\x13\x6b\
\x8b\x96\x48\xb7\x25\x73\x64\xe6\x61\x09\x3f\x5c\xf5\x73\x30\xbe\
\xc3\x6d\x87\x83\xd6\x83\xd4\x9c\xc6\xe2\xfd\x9c\x39\x78\xa6\xdb\
\x85\xe8\x64\x38\x32\xa9\xfc\x7f\xa3\xc5\xca\x86\x21\x5a\x5d\xb3\
\xa7\x68\xd5\x84\x6e\xe1\x62\x47\x19\xfa\xa5\x8e\xc3\xbf\x7a\x5e\
\x2b\xaf\x50\x70\x67\xe4\xfe\x20\xad\x56\x83\x37\x4b\xce\xb4\xb6\
\x24\x61\x57\xe5\x1e\xec\xf2\xb8\xa0\x33\xc6\xc0\xed\xac\xc0\xec\
\x41\x7f\xf3\x27\xb5\x01\x93\x37\xbe\x2a\x37\x75\x63\x58\xb2\x08\
\xe8\x94\x8d\xaf\xa0\xe0\x44\x5f\xed\xe6\x9c\x81\xf7\xa3\xff\xef\
\x77\x60\xbf\xe3\x20\x8c\x0d\x4c\x8f\x6a\x2a\xf7\xa4\x4f\xc0\xde\
\x84\xa1\xc1\xa9\x55\xf5\x51\xee\x38\x80\xb3\xe3\x1b\x6f\x50\xe1\
\x70\x56\x20\x23\x3c\x1d\xb3\x06\xdc\x2d\xa1\x8e\x08\x7d\x38\xba\
\xfb\x6b\xeb\x79\x7d\x8b\xed\xfb\x71\x56\xe7\xb3\x70\x46\xfc\x10\
\x0c\x8a\x48\xc7\x80\xf0\xf4\x3a\x59\xfc\xb4\xc4\xe9\xd6\x7d\x37\
\xef\x27\x2c\xd8\xbf\x06\x13\x12\x47\xe2\xd6\xb4\x33\x30\x2e\x6e\
\x80\xcc\x70\x3e\x37\x61\xb8\x3c\x20\x73\xb1\x2b\xa4\x8c\x69\x73\
\x75\x1e\x1e\xdd\xf3\x25\x0a\xad\x45\x30\x34\x72\x2e\x47\x12\x87\
\xad\x08\x67\xa5\x8c\xc6\x8b\x59\x97\xc3\xea\xb6\x21\xd5\xdc\x09\
\xc9\xfe\xef\x6d\x9b\xad\x00\x61\xe6\x38\xdc\x93\x79\x39\x7a\x85\
\xa5\x4a\xbb\xd8\xbe\xe1\x69\x75\x8e\x8e\x3d\xcb\xa7\x15\x2d\xc1\
\x9b\x39\xf3\x64\xb1\xf3\xd7\xf4\x89\xb8\x22\xf9\x14\xa9\x56\xe8\
\x17\x96\x26\x8f\x3b\x3a\x9f\x25\xdb\x6e\xaa\xce\xc5\x96\xaa\x3c\
\xac\xae\xdc\x83\x67\xb2\xbf\x95\xc5\x9c\xa1\x1d\x42\x16\x6d\xc9\
\x94\x29\x97\x48\xfc\x73\xe0\xc0\xfe\x4a\xa4\x15\x8a\x10\xb4\xb9\
\x50\x1b\xcd\x9d\xf0\x4d\xf1\x0a\x7c\x43\xab\x99\x6d\x26\x23\xba\
\x89\x83\xaf\x76\x56\xae\x58\x57\x9a\x5a\x99\x8f\x1a\x2d\xec\x95\
\xd9\x52\x3b\x3a\x77\x60\xe3\xf1\x94\xd6\xf2\xaf\x82\x45\x58\x5e\
\xb0\x08\xfa\x88\x8c\x56\x89\x34\xfc\x42\xcd\x78\xaf\x87\x96\x9d\
\x1f\x0e\xc8\xa0\xc5\x64\x34\xc7\x1d\x96\x89\x2b\x19\xcf\x86\x28\
\x59\xa8\xb8\xed\x07\xf0\xf5\xf0\x67\x30\xcc\xdf\x62\xf2\xd9\x7d\
\xb3\xb1\xb6\x70\x31\x0c\x1c\x5b\xe8\x71\xc2\x60\x49\x46\x61\xd9\
\x26\xdc\xb2\xe3\x63\xbc\xd1\xfd\x0a\x49\xba\xfb\x70\xc0\x7d\x98\
\xb8\xf4\x2e\xb8\xf4\x61\x41\x71\x6f\x32\x5e\x4f\xb0\x35\x27\x79\
\xb2\xdb\x45\x2d\x3a\x5f\xf1\x80\x84\x70\xb1\x7a\x35\x3a\x19\x30\
\x41\x01\xae\x0d\x1b\xb9\xd8\x4a\xd7\xe2\xd6\xfe\x77\x63\x42\xdc\
\xe1\x65\x39\x0c\x05\x2c\x2c\xdb\x8c\xd9\xfb\x57\x63\x4e\xe9\x5a\
\x14\xb0\xb7\x39\xbf\x13\x53\x0c\x66\xe6\xce\xc7\xcc\xfc\x85\x18\
\x10\xdb\x0f\x93\x12\x86\x4a\x49\xd2\xa8\xe8\x9e\x12\x6a\x60\xad\
\x39\xfb\x96\xf3\xf1\xe0\xce\xcf\xe1\xf1\x76\x0c\xd7\x69\x00\xfe\
\xde\xd3\x4d\x5d\xd3\x32\x0e\xf0\x7e\xde\x8f\xd0\x98\x12\xf1\x78\
\x88\xef\x60\x55\xc5\x6e\xcc\xdb\xbf\x0e\xdf\xef\x5f\x8b\x45\x65\
\x1b\x7d\x63\x49\x4d\x31\x72\x4d\xee\xdf\xf8\x2a\x1e\xdb\xfb\x25\
\x26\xc4\x0d\x96\x05\xcb\xe8\x98\x3e\xc8\xf0\x7b\x1d\xfa\x86\xa5\
\xc9\x63\x44\x54\x16\x9e\xda\xfe\x81\x34\x5c\x39\x16\x68\x49\x97\
\x33\x85\xe2\x8f\x42\xdb\xbb\xbe\xbd\x2e\x18\x75\x46\x78\xb4\xf1\
\xfe\x49\x4f\x4d\x77\xbc\xb1\x6f\xb5\x3e\x3c\x0d\xf3\x72\xbe\xc7\
\xd3\x31\xbd\xf1\x60\x46\xd3\x07\xb0\x37\x97\x55\x95\xfb\x70\x23\
\x85\xd4\x14\xe7\x73\x4b\xb7\x24\xae\xe7\x75\x89\x85\x17\x20\xc3\
\xd2\x09\x9b\xf2\x7f\xc6\xc8\x55\x8f\xa0\xa7\x25\x05\x1b\xca\x36\
\xc2\x48\x0b\xa9\xde\x7d\xfb\x86\x28\xd0\x15\x8e\xb8\x81\xd8\x5a\
\x9d\x8f\xbf\x6d\x7a\x1d\x1a\x4b\x92\xaf\x09\x06\xdd\xc8\x5e\x2f\
\xdc\xe1\x9d\xf1\xe6\x8e\x0f\x71\x57\xda\x78\xe9\x48\xf5\x26\x6b\
\x8c\xf5\xe1\xa1\x6b\xbe\xd9\x4e\x52\xa3\x09\x66\x9d\xd3\x45\x8a\
\x9a\xe2\xa5\x35\x48\x4e\x00\xdb\x97\xb2\xd5\xa7\xb6\x19\x09\x74\
\x81\xb6\x96\x8c\xb9\x55\x7b\x1c\x87\x12\xe4\x6a\xa0\xd7\x5b\xc4\
\x43\x32\x65\xf3\x9b\x18\x18\x9e\x01\xa7\x57\x9c\xbe\x58\x5f\xb5\
\x0f\xd3\x0b\x7e\x05\x4c\xb1\x78\x2b\x77\x3e\x8e\x8f\xcc\x94\x41\
\x23\x8b\x0e\x6c\x91\x09\x57\x4b\x2b\x76\xa0\x8a\x49\x7d\x8c\xe3\
\x1a\xa3\xa0\xb7\x24\x41\xef\x1f\x3f\xaa\x09\x0f\x93\x63\x5d\x5f\
\xb6\x09\xeb\x4b\x56\xe0\x29\x43\x24\xba\x45\x74\xc1\x71\x91\x59\
\x38\x3e\xba\x3b\xc6\xc4\xf6\x97\x71\xab\xc5\xd5\xb9\xd0\x87\x68\
\xb3\x7a\x34\xd1\x99\xe2\x65\x90\xcc\x03\xbb\x3e\x47\x14\x7b\x92\
\x7b\xbd\xb0\x79\x9c\xd2\x16\xb6\x84\x61\x07\x8d\x56\x62\xef\x3d\
\xc3\x92\xf1\x73\xd9\x46\x71\x61\xff\x76\x70\x1b\x36\x73\xa1\xc2\
\x3a\x7b\xba\xc4\x4d\xb1\x30\x49\xc7\x32\xaf\xaf\x96\xde\x18\x23\
\x2d\x6f\x67\xe4\x7c\x8f\x19\x39\x73\xe4\x5a\x0d\x8f\xcc\xc4\xa8\
\xa8\x1e\xfe\x71\xa1\xdd\xf1\x6c\xf6\x2c\xa9\xd1\x37\x4a\x6e\x44\
\xc7\x75\x7d\x2b\x14\x8a\xc6\xd1\x98\x16\x5e\xb3\x0c\x40\xfd\x6d\
\x79\x8e\x30\x1a\x5a\xd5\xae\x6a\x78\x6d\xc5\xf8\x7b\xff\xbb\x71\
\x77\xfa\x84\x36\x3f\x80\x95\x55\xd9\x38\x69\xd9\x7d\xb0\x39\x2b\
\x25\x8e\xd7\xd2\x98\xa6\xc3\x56\x8c\x89\x29\x63\xf0\x48\xd7\xf3\
\xe0\xf4\xb8\x71\xc9\xa6\xd7\x91\x6b\x2b\x82\x97\x7d\xbe\xdd\x76\
\x98\xc2\x52\x1a\xcc\xc4\xd5\x48\xa2\x91\x13\x4e\x5b\x09\xae\xef\
\x7a\x11\x96\x55\xec\xc4\xda\xfd\xeb\xea\x1c\x53\x60\xbc\xe6\x90\
\xd8\x81\xd2\xec\xe3\xdb\xdc\x79\x30\x86\x77\x86\xd6\xeb\xa9\xb3\
\x67\x8a\x64\xac\x29\x06\x9f\xf5\xbd\x45\xc4\xfa\xd3\xc2\xdf\xf0\
\xca\xae\xff\xc2\x60\x8e\x0f\x36\x4c\x61\xe6\xbd\x45\x63\x90\x26\
\x26\xcd\x25\x20\xd4\x14\x4e\x76\xdc\xae\x9d\x29\xaf\xf1\x4f\x92\
\x62\xa2\x17\xaf\xc1\xa1\x17\x0c\xd2\xb0\xc3\x48\x6b\x5e\xac\x7a\
\x23\xca\xaa\x0b\x01\xce\x68\xa6\x98\x18\x22\x61\xd0\x5b\x9a\x30\
\x1b\x5c\x23\x35\xf7\x92\xb4\xc7\xf7\xd2\xea\x36\x27\xc0\xa0\x33\
\xf9\xc6\x4d\x36\x67\xb6\xf8\x11\x80\xbf\xcf\x2e\x8f\x4b\xe6\x86\
\x1f\x1a\x94\xe1\x9f\xb4\x65\xe9\x04\xaf\xc7\x29\x62\xca\xeb\xe9\
\xb4\xe6\xf9\xce\x87\x8b\x2c\x43\x84\xcc\x4c\x6f\x48\x64\x35\xfe\
\xf6\xa3\x0e\xfe\xbe\x31\x61\x8f\xff\x6a\x8d\x30\xf8\x27\xd9\x69\
\x02\xdf\x47\x08\xd8\x49\xf0\x82\xf4\x09\xd2\xca\x56\xa1\x50\xb4\
\x2f\xac\x66\xca\xfc\xfd\x6e\xec\xab\xca\x81\xc9\xd8\xfc\xb0\x6e\
\x87\x13\x6a\xf8\x6f\x6e\x14\x51\xce\x34\xbe\x35\x6b\x0a\x5e\xeb\
\x79\x5d\x9b\xed\xfb\xf3\xa2\x25\xb8\xcc\xef\x92\x36\x5b\x52\xa5\
\x73\x54\xcb\x0f\x54\x2b\x59\xb6\x72\x83\x84\x17\x06\x7d\xa4\xbf\
\x3d\x64\xd3\x85\x42\x6e\xe4\x5e\x37\x5c\xb4\xae\x58\x17\x6c\x8a\
\xad\x3b\x09\x4a\x3e\x4a\x0f\x9b\xad\x54\xdc\xe1\xbc\xc1\xd7\xbb\
\x00\xd0\x68\xc4\x3b\xe0\x08\x74\x1f\x63\x0f\x71\x96\x5e\x05\xda\
\x4f\xf2\xda\xf2\x98\x69\x11\xb7\xb8\x1c\xcd\x2b\x83\x24\x0c\x2d\
\x88\xeb\xcb\x42\xcc\xed\x80\xd7\x63\x87\x56\x67\xf6\xf7\x2f\x6f\
\x39\x92\x3d\xce\x73\xa1\x5e\xfb\x87\x54\x1c\x4b\x70\xd1\x63\x73\
\x56\xcb\xf7\x66\x6c\x83\x56\xa8\x32\xe0\x84\xbf\x8f\x1a\xbd\xef\
\x7a\xd4\xe3\xcd\x51\x42\xad\x50\x1c\x39\x5a\x2b\xd4\x1d\xb2\x4b\
\xba\x34\x7e\x30\x44\xc0\xae\x35\xe0\xf5\xed\x1f\x89\x4b\x70\x6a\
\xf7\x29\x98\x18\xdf\xfc\x3a\xcb\x9a\x58\xdd\x0e\x5c\xb6\xf1\x35\
\xb1\x3e\xd8\x45\xad\x55\x22\x8d\x9a\xa3\x14\x2d\x3e\xeb\xa5\x56\
\x07\xb2\xa6\xed\xc2\x23\x6e\x6e\xad\x25\xc9\x37\xa8\x30\x84\x48\
\x23\xd0\x9a\x33\x30\xd6\x30\x84\x25\x5d\xf3\x98\x64\x40\x87\xce\
\x22\xdb\x48\x0c\xbb\x86\x98\x4a\x0b\x54\x26\x17\xb5\x41\x82\x51\
\x4b\xe2\xfa\xfc\x7c\x8e\xe1\x0c\xe5\x36\x6f\x09\x32\x93\xdb\x5f\
\xfa\x76\xac\x89\x34\xfc\xd7\xb0\xb1\x1a\xf6\xe6\x40\x8f\x87\xc9\
\x3f\x28\xe5\x58\xef\xd6\xa6\x50\x28\x7c\x1c\xa1\x19\x80\xcd\xc7\
\x27\x28\x46\x18\xa3\xba\x61\xc3\x81\xcd\x38\x7b\xc5\x43\x18\xb3\
\xea\x71\xcc\x28\x5a\xda\xe0\x18\xc7\x9a\xb8\x6b\xb9\xb4\x4d\x3a\
\x03\x8e\xe3\x14\x26\x8a\x44\x9b\x94\xf0\xf8\x7a\x83\xb3\x15\x26\
\xc5\xa7\x35\xd6\x50\x5b\x57\x5a\xf2\x78\x78\x5c\xba\x66\x5a\xf8\
\x0a\x85\x42\xa1\xe8\x58\x74\xe8\xb9\x63\x92\x48\x44\x97\x66\x58\
\x8a\x94\xec\x2c\x28\x5a\x82\x05\xc5\x4b\xd1\x35\xba\x27\x4e\x89\
\xe9\x8d\x41\x11\x5d\xd0\xcd\xdc\x09\xf1\xfa\x70\xc9\x64\x66\x82\
\x13\x07\x4e\x70\xe2\xd4\xea\x8a\xdd\x58\x58\xb6\x1e\x89\x86\x28\
\x4c\xed\x7e\x05\xc6\xc4\xf6\x13\x6b\x83\x1d\xd3\x96\x16\xfe\x0a\
\x4f\x3b\x88\xa3\x42\xa1\x50\x28\x14\x6d\xcd\x31\x31\x20\x54\xdc\
\xa5\x1a\x1d\x34\xe1\xa9\x92\xb4\xb5\xa7\x32\x1b\x7b\xd8\x91\x49\
\xe3\x8b\xc1\x82\x7d\xaa\xe9\xca\x65\x92\x91\xc4\x5f\x9d\x3e\x2b\
\x52\x1f\x81\x3d\xae\x1d\x18\xbb\x7f\x1d\xa6\x74\x3e\x1d\x6f\xf6\
\xbe\x01\x93\x13\x47\xe2\x01\x7d\x98\x8c\xa7\x3c\x5a\x43\x40\x14\
\x0a\x85\x42\xa1\x68\x2a\xc7\xd4\x24\x6f\xc6\xf3\x18\x73\xd5\x33\
\x56\xcb\x61\x0c\xf0\xca\x68\x4a\x97\xb8\xc2\xf9\x30\xc8\xfc\x5f\
\xba\xa3\x03\xd9\xae\x1a\x53\xac\x24\x4f\x7d\xb2\x77\x26\x16\x94\
\xef\x40\x96\x39\x09\x06\x63\xcc\x51\x6d\xeb\xa7\x50\x1c\x7d\xbc\
\xfe\xb0\x88\x42\xa1\x68\x6f\xf8\xb7\xd6\x9c\x52\xd8\xda\x1c\x53\
\x42\x5d\x1b\x8a\xb1\x4e\xa3\x69\xf0\x86\x13\x48\x9e\xd2\x44\x76\
\x43\x5e\x75\x3e\xf2\x2a\xb3\x61\x66\xad\x6d\x43\x09\x59\x0a\xc5\
\xff\x3a\x5e\x6f\xbd\x03\x72\x14\x0a\x45\xdb\x22\xa5\xb8\xad\xc8\
\x17\x3a\xa6\x85\xba\x39\x88\x60\xfb\x5b\x55\xaa\x6c\x58\xc5\x1f\
\x1e\x43\x14\x7e\x65\xc7\x33\x85\x42\xd1\xee\xac\xab\xca\x41\x6e\
\xe5\x6e\x68\x5b\x38\xfc\xe9\x0f\x23\xd4\x0a\x85\xe2\x10\x46\x53\
\x0c\xf6\x55\xee\x91\xe9\x6d\x4f\x65\x5e\x86\x34\x53\x9c\xba\x3a\
\x0a\x45\x3b\xb0\xa6\x72\x0f\x6e\xdc\xfa\x2f\x5f\x29\x66\x03\xbd\
\x0d\x1a\xa2\x43\x36\x3c\x51\x28\x14\xed\x8b\x74\xa9\x63\x87\xb9\
\xea\x02\xe9\x82\x66\x69\x83\x01\x2f\x0a\x85\xa2\x2e\x56\x7b\xa9\
\xaf\x21\x93\x25\xb1\xde\x3e\x19\x8d\xa1\x2c\x6a\x85\xe2\x0f\x88\
\x34\x87\xe1\x0a\xdf\x92\x24\xb3\xc3\xad\xce\x2a\xf5\x6b\xa0\x50\
\xb4\x03\x5a\x43\x94\x34\xc6\x6a\xa9\x48\x43\x09\xb5\x42\xf1\x47\
\xc7\xdb\x66\x9d\xea\x14\x0a\x45\x7d\xb4\x2e\x75\x59\xd5\x67\x28\
\x14\x0a\x85\x42\xd1\x81\x51\x42\xad\x50\x28\x14\x0a\x45\x07\x46\
\x09\xb5\x42\xa1\x50\x28\x14\x1d\x18\x25\xd4\x0a\x85\x42\xa1\x50\
\x74\x60\xb4\x1e\xaf\xa7\x9b\x1a\x4f\xa1\x50\x28\x14\x0a\x45\xc7\
\x44\xab\xd5\x68\x13\xd4\x18\x44\x85\x42\xa1\x50\x28\x3a\x26\xca\
\xf5\xad\x50\x28\x14\x0a\x45\x07\x46\x09\xb5\x42\xa1\x50\x28\x14\
\x1d\x18\x25\xd4\x0a\x85\x42\xa1\x50\x74\x60\x94\x50\x2b\x14\x0a\
\x85\x42\xd1\x81\x51\x42\xad\x50\x28\x14\x0a\x45\x07\x46\x09\xb5\
\x42\xa1\x50\x28\x14\x1d\x15\x00\xff\x0f\x34\x40\x01\x04\xfd\x27\
\xf6\x00\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x88\xa5\xa5\
\x00\x71\
\x00\x72\x00\x63\x00\x6f\x00\x64\x00\x65\
\x00\x11\
\x09\xa0\x87\xe7\
\x00\x77\
\x00\x65\x00\x63\x00\x68\x00\x61\x00\x74\x00\x5f\x00\x73\x00\x65\x00\x61\x00\x72\x00\x63\x00\x68\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x73\x9f\xc0\x48\x4f\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
# Basic OOP
class Student:
"""
THe Student class defines a student in terms of full name, sex and gpa.
"""
def __init__(self, first_name, last_name, sex, gpa):
self.first_name = first_name
self.last_name = last_name
self.sex = sex
self.gpa = gpa
def get_name(self):
return 'Full Name: ' + self.first_name + ' ' + self.last_name
def get_sex(self):
return 'Sex: ' + self.sex
def get_gpa(self):
return 'GPA: ' + self.gpa
def get_all(self):
return Student.get_name(self) + '\n' + Student.get_sex(self) + '\n' \
+ Student.get_gpa(self)
def main():
student_1 = Student('David', 'Diaz', 'Male', '3')
print(isinstance(student_1, Student))
print(student_1.get_all())
print(student_1.sex)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
rsc.py
======
Context decorator for producing figures which is ready to publish
in Royal Society of Chemistry.
Reference:
RSC Authour Guidelines
EPS format for image file are used, because of its high quality and working
in actual physical size rahter than pixel unit.
"""
from .general import MPLdecorator
from .colors import default_color_cycler
from .layout import GOLDEN_RATIO
from .styles import latex_preamble
__all__ = ['rsc_decorator', ]
# Constants from RSC Authour Guidelines.
width_single_column = 3.26 # 8.3 cm
width_double_column = 6.73 # 17.1 cm
# Default ratio for a single plot figure
# I prefer a little higher than goden ratio, from 0.618 to about 0.68
height_width_ratio = GOLDEN_RATIO * 1.1 # = height / width
_width = width_single_column
_height = width_single_column * height_width_ratio
_params = {'font.family': 'sans-serif',
'font.serif': ['Times', 'Computer Modern Roman'],
'font.sans-serif': ['Helvetica', 'Arial',
'Computer Modern Sans serif'],
'font.size': 7,
'text.usetex': True,
# To force LaTeX use Helvetica fonts.
'text.latex.preamble': latex_preamble,
'axes.prop_cycle': default_color_cycler,
'axes.labelsize': 7,
'axes.linewidth': 1,
'figure.figsize': (_width, _height),
'figure.subplot.left': 0.125,
'figure.subplot.right': 0.95,
'figure.subplot.bottom': 0.1,
'figure.subplot.top': 0.95,
'savefig.dpi': 300,
'savefig.format': 'eps',
# 'savefig.bbox': 'tight',
# this will crop white spaces around images that will make
# width/height no longer the same as the specified one.
'legend.fontsize': 7,
'legend.frameon': False,
'legend.numpoints': 1,
'legend.handlelength': 2,
'legend.scatterpoints': 1,
'legend.labelspacing': 0.5,
'legend.markerscale': 0.9,
'legend.handletextpad': 0.5, # pad between handle and text
'legend.borderaxespad': 0.5, # pad between legend and axes
'legend.borderpad': 0.5, # pad between legend and legend content
'legend.columnspacing': 1, # pad between each legend column
# 'text.fontsize' : 7, # use font.size for Matplotlib 1.4.2+
'xtick.labelsize': 7,
'ytick.labelsize': 7,
'lines.linewidth': 1,
'lines.markersize': 4,
# 'lines.markeredgewidth' : 0,
# 0 will make line-type markers, such as '+', 'x', invisible
# Revert some properties to mpl v1 which is more suitable for publishing
'axes.autolimit_mode': 'round_numbers',
'axes.xmargin': 0,
'axes.ymargin': 0,
'xtick.direction': 'in',
'xtick.top': True,
'ytick.direction' : 'in',
'ytick.right': True,
}
rsc_decorator = MPLdecorator(_params)
|
'''
Author: Mahnoor Anjum
Description:
Intersection Test
Reference:
www.geeksforgeeks.org
For a given point triplet (A, B, C),
we can determine the orientation by
inspecting at the sign of the cross product of AB × BC
If AB × BC > 0, counterclockwise.
If AB × BC < 0, clockwise.
Otherwise, if AB × BC = 0, collinear.
'''
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
def orientation(a,b,c):
val = (float(b[1] - a[1]) * (c[0] - b[0])) - \
(float(b[0] - a[0]) * (c[1] - b[1]))
if (val > 0):
return 1 #anticlockwise
elif (val < 0):
return 2 #clockwise
else:
return 0
def onSegment(a, b, p):
if ( (b[0] <= max(a[0], p[0])) and (b[0] >= min(a[0], p[0])) and
(b[1] <= max(a[1], p[1])) and (b[1] >= min(a[1], p[1]))):
return True
return False
def check_intersect(a,b,c,d):
# a b c, a b d, c d a, c d b
o1 = orientation(a, b, c)
o2 = orientation(a, b, d)
o3 = orientation(c, d, a)
o4 = orientation(c, d, b)
# General case
if ((o1 != o2) and (o3 != o4)):
return 1
# Special Cases
# a , b and c are colinear and c lies on segment ab
if ((o1 == 0) and onSegment(a, c, b)):
return 1
# a , b and d are colinear and d lies on segment ab
if ((o2 == 0) and onSegment(a, d, b)):
return 1
# c , d and a are colinear and a lies on segment cd
if ((o3 == 0) and onSegment(c, a, d)):
return 1
# c , d and b are colinear and b lies on segment cd
if ((o4 == 0) and onSegment(c, b, d)):
return 1
# If none of the cases
return 0
mat = mpimg.imread('data/img.png')
fig, ax = plt.subplots(1,1,figsize=(5,5))
ax.imshow(mat)
x = fig.ginput(4)
plt.close('all')
points = []
for i in x:
points.append(i)
# x, y = [], []
# for i in points:
# x.append(i[0])
# y.append(i[1])
intersect = check_intersect(points[0], points[1],\
points[2], points[3])
|
import os
import torch
def get_step_from_checkpoint(fname):
return int(fname.split("_")[0][1:])
def load_model_from_cpd(Model,chekpoint_dir):
if not os.path.exists(chekpoint_dir):
return 1
checkpoints=os.listdir(chekpoint_dir)
newest_step=1
newest_model_fname=""
for checkpoint in checkpoints:
step=get_step_from_checkpoint(checkpoint)
if step>newest_step:
newest_step=step
newest_model_fname=checkpoint
if newest_model_fname=="":
return 1
else:
print("resuming at step",newest_step)
newest_model_savepath=os.path.join(chekpoint_dir,newest_model_fname)
Model.load_state_dict(torch.load(newest_model_savepath, map_location='cpu'))
return newest_step
if __name__ == '__main__':
cpd="/home/yufei/HUW4/models/test_A/newest_model_saved/densenet169_nonlocal_dot_amsoftmax_dropout"
load_model_from_cpd([],cpd)
|
"""
Exercise: using strides to create fake dimensions
==================================================
Use strides to create fake dimensions.
"""
import numpy as np
from numpy.lib.stride_tricks import as_strided
x = np.array([1, 2, 3, 4], dtype=np.int8)
#
# Mini-exercise:
#
# 1. How to create a new array that shares the data, but looks like
#
# array([[1, 2, 3, 4],
# [1, 2, 3, 4],
# [1, 2, 3, 4]], dtype=int8)
#
|
import unittest
from aoc2020.day_2 import *
class ParseRowTestCase(unittest.TestCase):
def setUp(self):
self.rows = [
"1-3 a: abcde",
"1-3 b: cdefg",
"2-9 c: ccccccccc",
]
def test_parse_row(self):
result = list(map(lambda x: parse_row(x), self.rows))
expected = [
(1, 3, "a", "abcde"),
(1, 3, "b", "cdefg"),
(2, 9, "c", "ccccccccc"),
]
self.assertEqual(result, expected)
class ValidatePasswordTestCase(unittest.TestCase):
def setUp(self):
self.parsed_rows = [
(1, 3, "a", "abcde"),
(1, 3, "b", "cdefg"),
(2, 9, "c", "ccccccccc"),
]
def test_validate_password(self):
result = list(map(lambda x: validate_password(*x), self.parsed_rows))
expected = [True, False, True]
self.assertEqual(result, expected)
class NewValidatePasswwordTestCase(unittest.TestCase):
def setUp(self):
self.parsed_rows = [
(1, 3, "a", "abcde"),
(1, 3, "b", "cdefg"),
(2, 9, "c", "ccccccccc"),
]
def test_new_validate_password(self):
result = list(map(lambda x: new_validate_password(*x), self.parsed_rows))
expected = [True, False, False]
self.assertEqual(result, expected)
class MainTestCase(unittest.TestCase):
def setUp(self):
self.args = {
"-": [
"1-3 a: abcde",
"1-3 b: cdefg",
"2-9 c: ccccccccc",
]
}
def test_main_old(self):
result = main({**self.args, "--new": False})
expected = 2
self.assertEqual(result, expected)
|
"""Provide common Renault fixtures."""
import contextlib
from types import MappingProxyType
from typing import Any
from unittest.mock import patch
import pytest
from renault_api.kamereon import exceptions, schemas
from renault_api.renault_account import RenaultAccount
from homeassistant.components.renault.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER, ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from .const import MOCK_ACCOUNT_ID, MOCK_CONFIG, MOCK_VEHICLES
from tests.common import MockConfigEntry, load_fixture
@pytest.fixture(name="vehicle_type", params=MOCK_VEHICLES.keys())
def get_vehicle_type(request: pytest.FixtureRequest) -> str:
"""Parametrize vehicle type."""
return request.param
@pytest.fixture(name="config_entry")
def get_config_entry(hass: HomeAssistant) -> ConfigEntry:
"""Create and register mock config entry."""
config_entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data=MOCK_CONFIG,
unique_id=MOCK_ACCOUNT_ID,
options={},
entry_id="123456",
)
config_entry.add_to_hass(hass)
return config_entry
@pytest.fixture(name="patch_renault_account")
async def patch_renault_account(hass: HomeAssistant) -> RenaultAccount:
"""Create a Renault account."""
renault_account = RenaultAccount(
MOCK_ACCOUNT_ID,
websession=aiohttp_client.async_get_clientsession(hass),
)
with patch("renault_api.renault_session.RenaultSession.login"), patch(
"renault_api.renault_client.RenaultClient.get_api_account",
return_value=renault_account,
):
yield renault_account
@pytest.fixture(name="patch_get_vehicles")
def patch_get_vehicles(vehicle_type: str):
"""Mock fixtures."""
with patch(
"renault_api.renault_account.RenaultAccount.get_vehicles",
return_value=(
schemas.KamereonVehiclesResponseSchema.loads(
load_fixture(f"renault/vehicle_{vehicle_type}.json")
)
),
), patch(
"renault_api.renault_vehicle.RenaultVehicle.supports_endpoint",
side_effect=MOCK_VEHICLES[vehicle_type]["endpoints_available"],
), patch(
"renault_api.renault_vehicle.RenaultVehicle.has_contract_for_endpoint",
return_value=True,
):
yield
def _get_fixtures(vehicle_type: str) -> MappingProxyType:
"""Create a vehicle proxy for testing."""
mock_vehicle = MOCK_VEHICLES.get(vehicle_type, {"endpoints": {}})
return {
"battery_status": schemas.KamereonVehicleDataResponseSchema.loads(
load_fixture(f"renault/{mock_vehicle['endpoints']['battery_status']}")
if "battery_status" in mock_vehicle["endpoints"]
else load_fixture("renault/no_data.json")
).get_attributes(schemas.KamereonVehicleBatteryStatusDataSchema),
"charge_mode": schemas.KamereonVehicleDataResponseSchema.loads(
load_fixture(f"renault/{mock_vehicle['endpoints']['charge_mode']}")
if "charge_mode" in mock_vehicle["endpoints"]
else load_fixture("renault/no_data.json")
).get_attributes(schemas.KamereonVehicleChargeModeDataSchema),
"cockpit": schemas.KamereonVehicleDataResponseSchema.loads(
load_fixture(f"renault/{mock_vehicle['endpoints']['cockpit']}")
if "cockpit" in mock_vehicle["endpoints"]
else load_fixture("renault/no_data.json")
).get_attributes(schemas.KamereonVehicleCockpitDataSchema),
"hvac_status": schemas.KamereonVehicleDataResponseSchema.loads(
load_fixture(f"renault/{mock_vehicle['endpoints']['hvac_status']}")
if "hvac_status" in mock_vehicle["endpoints"]
else load_fixture("renault/no_data.json")
).get_attributes(schemas.KamereonVehicleHvacStatusDataSchema),
"location": schemas.KamereonVehicleDataResponseSchema.loads(
load_fixture(f"renault/{mock_vehicle['endpoints']['location']}")
if "location" in mock_vehicle["endpoints"]
else load_fixture("renault/no_data.json")
).get_attributes(schemas.KamereonVehicleLocationDataSchema),
}
@pytest.fixture(name="fixtures_with_data")
def patch_fixtures_with_data(vehicle_type: str):
"""Mock fixtures."""
mock_fixtures = _get_fixtures(vehicle_type)
with patch(
"renault_api.renault_vehicle.RenaultVehicle.get_battery_status",
return_value=mock_fixtures["battery_status"],
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_charge_mode",
return_value=mock_fixtures["charge_mode"],
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_cockpit",
return_value=mock_fixtures["cockpit"],
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_hvac_status",
return_value=mock_fixtures["hvac_status"],
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_location",
return_value=mock_fixtures["location"],
):
yield
@pytest.fixture(name="fixtures_with_no_data")
def patch_fixtures_with_no_data():
"""Mock fixtures."""
mock_fixtures = _get_fixtures("")
with patch(
"renault_api.renault_vehicle.RenaultVehicle.get_battery_status",
return_value=mock_fixtures["battery_status"],
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_charge_mode",
return_value=mock_fixtures["charge_mode"],
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_cockpit",
return_value=mock_fixtures["cockpit"],
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_hvac_status",
return_value=mock_fixtures["hvac_status"],
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_location",
return_value=mock_fixtures["location"],
):
yield
@contextlib.contextmanager
def _patch_fixtures_with_side_effect(side_effect: Any):
"""Mock fixtures."""
with patch(
"renault_api.renault_vehicle.RenaultVehicle.get_battery_status",
side_effect=side_effect,
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_charge_mode",
side_effect=side_effect,
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_cockpit",
side_effect=side_effect,
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_hvac_status",
side_effect=side_effect,
), patch(
"renault_api.renault_vehicle.RenaultVehicle.get_location",
side_effect=side_effect,
):
yield
@pytest.fixture(name="fixtures_with_access_denied_exception")
def patch_fixtures_with_access_denied_exception():
"""Mock fixtures."""
access_denied_exception = exceptions.AccessDeniedException(
"err.func.403",
"Access is denied for this resource",
)
with _patch_fixtures_with_side_effect(access_denied_exception):
yield
@pytest.fixture(name="fixtures_with_invalid_upstream_exception")
def patch_fixtures_with_invalid_upstream_exception():
"""Mock fixtures."""
invalid_upstream_exception = exceptions.InvalidUpstreamException(
"err.tech.500",
"Invalid response from the upstream server (The request sent to the GDC is erroneous) ; 502 Bad Gateway",
)
with _patch_fixtures_with_side_effect(invalid_upstream_exception):
yield
@pytest.fixture(name="fixtures_with_not_supported_exception")
def patch_fixtures_with_not_supported_exception():
"""Mock fixtures."""
not_supported_exception = exceptions.NotSupportedException(
"err.tech.501",
"This feature is not technically supported by this gateway",
)
with _patch_fixtures_with_side_effect(not_supported_exception):
yield
|
# coding=utf-8
from __future__ import print_function
import sys
import traceback
import os
def decode(examples, model, args, verbose=False, **kwargs):
if verbose:
print('evaluating %d examples' % len(examples))
was_training = model.training
model.eval()
if args.lang == 'wikisql':
from asdl.lang.sql.lib.dbengine import DBEngine
from asdl.lang.sql.utils import detokenize_query
decode_results = []
count = 0
for example in examples:
if args.lang == 'wikisql':
hyps = model.parse(example.src_sent, context=example.table, beam_size=args.beam_size)
else:
hyps = model.parse(example.src_sent, context=None, beam_size=args.beam_size)
decoded_hyps = []
for hyp_id, hyp in enumerate(hyps):
try:
hyp.code = model.transition_system.ast_to_surface_code(hyp.tree)
if args.lang == 'wikisql' and args.answer_prune:
# try execute the code, if fails, skip this example!
# if the execution returns null, also skip this example!
detokenized_hyp_query = detokenize_query(hyp.code, example.meta, example.table)
hyp_answer = kwargs['execution_engine'].execute_query(example.meta['table_id'],
detokenized_hyp_query,
lower=True)
if len(hyp_answer) == 0: continue
decoded_hyps.append(hyp)
except:
if verbose:
print("Exception in converting tree to code:", file=sys.stdout)
print('-' * 60, file=sys.stdout)
print('example id: %d, hypothesis id: %d' % (example.idx, hyp_id), file=sys.stdout)
traceback.print_exc(file=sys.stdout)
print('-' * 60, file=sys.stdout)
count += 1
if verbose and count % 50 == 0:
print('decoded %d examples...' % count, file=sys.stdout)
decode_results.append(decoded_hyps)
if was_training: model.train()
return decode_results
def evaluate(examples, parser, args, verbose=False, return_decode_result=False, eval_top_pred_only=False):
cum_oracle_acc = cum_acc = 0.0
kwargs = dict()
if args.lang == 'wikisql':
from asdl.lang.sql.lib.dbengine import DBEngine
from asdl.lang.sql.utils import detokenize_query
if args.mode == 'train':
table_file = os.path.splitext(args.dev_file)[0] + '.db'
else:
table_file = os.path.splitext(args.test_file)[0] + '.db'
execution_engine = DBEngine(table_file)
kwargs['execution_engine'] = execution_engine
decode_results = decode(examples, parser, args, verbose=verbose, **kwargs)
for example, hyps in zip(examples, decode_results):
if hyps:
cur_oracle = 0.
hyp_code_set = set()
# if args.lang == 'wikisql': # FIXME: this is not elegant
# print('Source: %s' % ' '.join(example.src_sent), file=sys.stderr)
# print('Reference: %s' % example.tgt_code, file=sys.stderr)
for hyp_id, hyp in enumerate(hyps):
try:
if args.lang == 'wikisql':
result = parser.transition_system.hyp_correct(hyp, example, execution_engine)
# print('Hyp %d: %s ||| %s' % (hyp_id, detokenize_query(hyp.code, example.meta, example.table), result),
# file=sys.stderr)
else:
result = parser.transition_system.hyp_correct(hyp, example)
if hyp_id == 0 and result:
cum_acc += 1
if cur_oracle == 0. and result:
cur_oracle = 1.
hyp.correct = result
except:
print('-' * 60, file=sys.stdout)
print('Error in evaluating Example %s, hyp %d {{ %s }}' % (example.idx, hyp_id, hyp.code), file=sys.stdout)
hyp.correct = False
print('example id: %d, hypothesis id: %d' % (example.idx, hyp_id), file=sys.stdout)
traceback.print_exc(file=sys.stdout)
print('-' * 60, file=sys.stdout)
continue
if args.lang in ['lambda_dcs', 'python', 'prolog']:
if hyp.code in hyp_code_set:
print('Duplicate Hyp Example [%d], Code %s' % (example.idx, hyp.code), file=sys.stdout)
hyp_code_set.add(hyp.code)
if eval_top_pred_only: break
# if verbose:
# if hyp_id == 0 and hyp.correct:
# print('', file=sys.stderr)
# print('Hyp %d: %s ||| %s' % (hyp_id, hyp.code, hyp.correct), file=sys.stderr)
cum_oracle_acc += cur_oracle
eval_result = {'accuracy': cum_acc / len(examples),
'oracle_accuracy': cum_oracle_acc / len(examples)}
if return_decode_result:
return eval_result, decode_results
else:
return eval_result
|
from kernel_tuner import tune_kernel
import numpy
import argparse
import json
def generate_code(tuning_parameters):
code = \
"__global__ void fct_ale_b1_vertical(const int maxLevels, const int * __restrict__ nLevels, const <%REAL_TYPE%> * __restrict__ fct_adf_v, <%REAL_TYPE%> * __restrict__ fct_plus, <%REAL_TYPE%> * __restrict__ fct_minus)\n" \
"{\n" \
"const <%INT_TYPE%> node = (blockIdx.x * maxLevels);\n" \
"const <%INT_TYPE%> maxNodeLevel = nLevels[blockIdx.x] - 1;\n" \
"\n" \
"for ( <%INT_TYPE%> level = threadIdx.x; level < maxNodeLevel; level += <%BLOCK_SIZE%> )\n" \
"{\n" \
"<%REAL_TYPE%> fct_adf_v_level = 0.0;\n" \
"<%REAL_TYPE%> fct_adf_v_nlevel = 0.0;\n" \
"<%COMPUTE_BLOCK%>" \
"}\n" \
"}\n"
compute_block = \
"fct_adf_v_level = fct_adf_v[node + level + <%OFFSET%>];\n" \
"fct_adf_v_nlevel = fct_adf_v[node + (level + 1) + <%OFFSET%>];\n" \
"fct_plus[node + level + <%OFFSET%>] = <%FMAX%>(0.0, fct_adf_v_level) + <%FMAX%>(0.0, -fct_adf_v_nlevel);\n" \
"fct_minus[node + level + <%OFFSET%>] = <%FMIN%>(0.0, fct_adf_v_level) + <%FMIN%>(0.0, -fct_adf_v_nlevel);\n"
if tuning_parameters["tiling_x"] > 1:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"] * tuning_parameters["tiling_x"]))
else:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"]))
compute = str()
for tile in range(0, tuning_parameters["tiling_x"]):
if tile == 0:
compute = compute + compute_block.replace(" + <%OFFSET%>", "")
else:
offset = tuning_parameters["block_size_x"] * tile
compute = compute + "if ( level + {} < maxNodeLevel )\n{{\n{}}}\n".format(str(offset), compute_block.replace("<%OFFSET%>", str(offset)))
code = code.replace("<%COMPUTE_BLOCK%>", compute)
if tuning_parameters["real_type"] == "float":
code = code.replace("<%FMAX%>", "fmaxf")
code = code.replace("<%FMIN%>", "fminf")
elif tuning_parameters["real_type"] == "double":
code = code.replace("<%FMAX%>", "fmax")
code = code.replace("<%FMIN%>", "fmin")
else:
raise ValueError
code = code.replace("<%INT_TYPE%>", tuning_parameters["int_type"].replace("_", " "))
code = code.replace("<%REAL_TYPE%>", tuning_parameters["real_type"])
return code
def generate_code_shared(tuning_parameters):
code = \
"__global__ void fct_ale_b1_vertical(const int maxLevels, const int * __restrict__ nLevels, const <%REAL_TYPE%> * __restrict__ fct_adf_v, <%REAL_TYPE%> * __restrict__ fct_plus, <%REAL_TYPE%> * __restrict__ fct_minus)\n" \
"{\n" \
"const <%INT_TYPE%> node = (blockIdx.x * maxLevels);\n" \
"const <%INT_TYPE%> maxNodeLevel = nLevels[blockIdx.x];\n" \
"extern __shared__ <%REAL_TYPE%> fct_adf_v_local[];\n" \
"\n" \
"for ( <%INT_TYPE%> level = threadIdx.x; level < maxNodeLevel; level += <%BLOCK_SIZE%> )\n" \
"{\n" \
"<%LOAD_BLOCK%>" \
"}\n" \
"__syncthreads();\n" \
"for ( <%INT_TYPE%> level = threadIdx.x; level < maxNodeLevel - 1; level += <%BLOCK_SIZE%> )\n" \
"{\n" \
"<%COMPUTE_BLOCK%>" \
"}\n" \
"}\n"
load_block = \
"fct_adf_v_local[level + <%OFFSET%>] = fct_adf_v[node + level + <%OFFSET%>];\n"
compute_block = \
"fct_plus[node + level + <%OFFSET%>] = <%FMAX%>(0.0, fct_adf_v_local[level + <%OFFSET%>]) + <%FMAX%>(0.0, -fct_adf_v_local[level + <%OFFSET%> + 1]);\n" \
"fct_minus[node + level + <%OFFSET%>] = <%FMIN%>(0.0, fct_adf_v_local[level + <%OFFSET%>]) + <%FMIN%>(0.0, -fct_adf_v_local[level + <%OFFSET%> + 1]);\n"
if tuning_parameters["tiling_x"] > 1:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"] * tuning_parameters["tiling_x"]))
else:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"]))
compute = str()
load = str()
for tile in range(0, tuning_parameters["tiling_x"]):
if tile == 0:
load = load + load_block.replace(" + <%OFFSET%>", "")
compute = compute + compute_block.replace(" + <%OFFSET%>", "")
else:
offset = tuning_parameters["block_size_x"] * tile
load = load + "if ( level + {} < maxNodeLevel )\n{{\n{}}}\n".format(str(offset), load_block.replace("<%OFFSET%>", str(offset)))
compute = compute + "if ( level + {} < maxNodeLevel - 1 )\n{{\n{}}}\n".format(str(offset), compute_block.replace("<%OFFSET%>", str(offset)))
code = code.replace("<%LOAD_BLOCK%>", load)
code = code.replace("<%COMPUTE_BLOCK%>", compute)
if tuning_parameters["real_type"] == "float":
code = code.replace("<%FMAX%>", "fmaxf")
code = code.replace("<%FMIN%>", "fminf")
elif tuning_parameters["real_type"] == "double":
code = code.replace("<%FMAX%>", "fmax")
code = code.replace("<%FMIN%>", "fmin")
else:
raise ValueError
code = code.replace("<%INT_TYPE%>", tuning_parameters["int_type"].replace("_", " "))
code = code.replace("<%REAL_TYPE%>", tuning_parameters["real_type"])
return code
def reference(nodes, levels, max_levels, fct_adf_v, fct_plus, fct_minus):
for node in range(0, nodes):
for level in range(0, levels[node] - 1):
item = (node * max_levels) + level
fct_plus[item] = 0.0
fct_minus[item] = 0.0
for node in range(0, nodes):
for level in range(0, levels[node] - 1):
item = (node * max_levels) + level
fct_plus[item] = fct_plus[item] + (max(0.0, fct_adf_v[item]) + max(0.0, -fct_adf_v[(node * max_levels) + level + 1]))
fct_minus[item] = fct_minus[item] + (min(0.0, fct_adf_v[item]) + min(0.0, -fct_adf_v[(node * max_levels) + level + 1]))
def tune(nodes, max_levels, max_tile, real_type, quiet=True):
numpy_real_type = None
if real_type == "float":
numpy_real_type = numpy.float32
elif real_type == "double":
numpy_real_type = numpy.float64
else:
raise ValueError
# Tuning and code generation parameters
tuning_parameters = dict()
tuning_parameters["shared_memory"] = [False]
tuning_parameters["int_type"] = ["unsigned_int", "int"]
tuning_parameters["real_type"] = [real_type]
tuning_parameters["max_levels"] = [str(max_levels)]
tuning_parameters["block_size_x"] = [32 * i for i in range(1, 33)]
tuning_parameters["tiling_x"] = [i for i in range(1, max_tile)]
constraints = list()
constraints.append("block_size_x * tiling_x <= max_levels")
# Memory allocation and initialization
fct_adf_v = numpy.random.randn(nodes * max_levels).astype(numpy_real_type)
fct_plus = numpy.zeros(nodes * max_levels).astype(numpy_real_type)
fct_minus = numpy.zeros_like(fct_plus).astype(numpy_real_type)
fct_plus_control = numpy.zeros_like(fct_plus).astype(numpy_real_type)
fct_minus_control = numpy.zeros_like(fct_minus).astype(numpy_real_type)
levels = numpy.zeros(nodes).astype(numpy.int32)
used_levels = 0
for node in range(0, nodes):
levels[node] = numpy.random.randint(3, max_levels)
used_levels = used_levels + (levels[node] - 1)
arguments = [numpy.int32(max_levels), levels, fct_adf_v, fct_plus, fct_minus]
# Reference
reference(nodes, levels, max_levels, fct_adf_v, fct_plus_control, fct_minus_control)
arguments_control = [None, None, None, fct_plus_control, fct_minus_control]
# Tuning
results, _ = tune_kernel("fct_ale_b1_vertical", generate_code, "{} * block_size_x".format(nodes), arguments, tuning_parameters, lang="CUDA", answer=arguments_control, restrictions=constraints, quiet=quiet)
# Memory bandwidth
memory_bytes = ((nodes * 4) + (used_levels * 4 * numpy.dtype(numpy_real_type).itemsize))
for result in results:
result["memory_bandwidth"] = memory_bytes / (result["time"] / 10**3)
# Shared memory version
shared_memory_args = dict()
tuning_parameters["shared_memory"] = [True]
shared_memory_args["size"] = max_levels * numpy.dtype(numpy_real_type).itemsize
results_shared, _ = tune_kernel("fct_ale_b1_vertical", generate_code_shared, "{} * block_size_x".format(nodes), arguments, tuning_parameters, smem_args=shared_memory_args, lang="CUDA", answer=arguments_control, restrictions=constraints, quiet=quiet)
# Memory bandwidth shared memory version
memory_bytes = ((nodes * 4) + (used_levels * 3 * numpy.dtype(numpy_real_type).itemsize))
for result in results_shared:
result["memory_bandwidth"] = memory_bytes / (result["time"] / 10**3)
return results + results_shared
def parse_command_line():
parser = argparse.ArgumentParser(description="FESOM2 FCT ALE B1 VERTICAL")
parser.add_argument("--nodes", help="The number of nodes.", type=int, required=True)
parser.add_argument("--max_levels", help="The maximum number of vertical levels per node.", type=int, required=True)
parser.add_argument("--max_tile", help="The maximum tiling factor.", type=int, default=2)
parser.add_argument("--real_type", help="The floating point type to use.", choices=["float", "double"], type=str, required=True)
parser.add_argument("--verbose", help="Print all kernel configurations.", default=True, action="store_false")
parser.add_argument("--store", help="Store performance results in a JSON file.", default=False, action="store_true")
return parser.parse_args()
if __name__ == "__main__":
command_line = parse_command_line()
results = tune(command_line.nodes, command_line.max_levels, command_line.max_tile, command_line.real_type, command_line.verbose)
best_configuration = min(results, key=lambda x : x["time"])
print("/* Memory bandwidth: {:.2f} GB/s */".format(best_configuration["memory_bandwidth"] / 10**9))
print("/* Block size X: {} */".format(best_configuration["block_size_x"]))
if best_configuration["shared_memory"]:
print(generate_code_shared(best_configuration))
else:
print(generate_code(best_configuration))
if command_line.store:
try:
with open("fct_ale_b1_vertical_{}_{}_{}.json".format(command_line.nodes, command_line.max_levels, command_line.real_type), "x") as fp:
json.dump(results, fp)
except FileExistsError:
print("Impossible to save the results, a results file already exists for a similar experiment.") |
# Generated by Django 2.2.6 on 2019-10-20 09:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('award', '0003_project_collaborators'),
]
operations = [
migrations.AlterField(
model_name='project',
name='tag',
field=models.ManyToManyField(related_name='tags', to='award.Tag'),
),
migrations.AlterField(
model_name='project',
name='technologies',
field=models.ManyToManyField(related_name='technologies', to='award.Technology'),
),
]
|
# Su Doku
import os
import numpy as np
from itertools import chain
def solve():
with open(os.path.join("..", "data", "p096.txt"), encoding="utf-8") as f:
rows = np.delete(np.array(f.readlines()), slice(None, None, 10))
sudokus = np.split(np.genfromtxt(rows, np.uint8, delimiter=[1] * 9),
50)
digits = set(range(1, 10))
coords = list(np.ndindex(9, 9))
def solution(board):
if 0 not in chain.from_iterable(board):
return int("".join([str(i) for i in board[0, 0:3]]))
candidates = {(y, x): digits - set([
*board[y, :],
*board[:, x],
*chain.from_iterable(board[y // 3 * 3:y // 3 * 3 + 3,
x // 3 * 3:x // 3 * 3 + 3]),
])
for (y, x) in coords if board[y, x] == 0}
coord = min(candidates, key=lambda x: len(candidates[x]))
for i in candidates[coord]:
board[coord] = i
number = solution(board)
if number:
return number
board[coord] = 0
return sum(solution(sudokus[n]) for n in range(50))
if __name__ == "__main__":
print(solve())
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils import normalize
from utils import compute_bleu_rouge
import json
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
# /DATA/disk1/wangyongbo/lic2019/DuReader/official_data/extracted/results_dev
BASE_PATH_PRED = "/DATA/disk1/wangyongbo/lic2019/DuReader/official_data/extracted/"
BASE_PATH_REF = "/DATA/disk1/wangyongbo/lic2019/DuReader/data/extracted/"
def prepare_data():
"""
On DuReader 2.0 `devset`.
Prepare data for eval,
1.overall eval.
2.eval `search` and `zhidao`, respectively.
"""
# -------------------------- for pred answers (start) --------------------------
with open(BASE_PATH_PRED + "results_dev/test_result_merge_best_rm.json", "r") as f1:
pred_lines = f1.readlines()
with open(BASE_PATH_PRED + "results_dev/test_result_search_best.json", "r") as f1_1:
pred_lines_search = f1_1.readlines()
with open(BASE_PATH_PRED + "results_dev/test_result_zhidao_best.json", "r") as f1_2:
pred_lines_zhidao = f1_2.readlines()
# -------------------------- for pred answers (end) --------------------------
# -------------------------- for ref answers (start) --------------------------
# search dev
with open(BASE_PATH_REF + "devset/search.dev.json", "r") as f2:
ref_lines_search = f2.readlines()
# zhidao dev
with open(BASE_PATH_REF + "devset/zhidao.dev.json", "r") as f3:
ref_lines_zhidao = f3.readlines()
# -------------------------- for ref answers (end) --------------------------
pred_answers, ref_answers = [], []
pred_answers_search, pred_answers_zhidao, ref_answers_search, ref_answers_zhidao = [], [], [], []
for line in pred_lines:
sample = json.loads(line) # type is <class 'dict'>
pred_answers.append(sample)
for line in pred_lines_search:
sample = json.loads(line) # type is <class 'dict'>
pred_answers_search.append(sample)
for line in pred_lines_zhidao:
sample = json.loads(line) # type is <class 'dict'>
pred_answers_zhidao.append(sample)
for line in ref_lines_search:
sample = json.loads(line) # type is <class 'dict'>
ref_answers.append(sample)
ref_answers_search.append(sample)
for line in ref_lines_zhidao:
sample = json.loads(line) # type is <class 'dict'>
ref_answers.append(sample)
ref_answers_zhidao.append(sample)
return pred_answers, ref_answers, pred_answers_search, pred_answers_zhidao, ref_answers_search, ref_answers_zhidao
# acquire eval data
pred_answers, ref_answers, pred_answers_search, pred_answers_zhidao, ref_answers_search, ref_answers_zhidao = prepare_data()
def run_eval(pred_answers_list, ref_answers_list):
"""
Run eval.
"""
pred_answers = pred_answers_list
ref_answers = ref_answers_list
# compute the bleu and rouge scores if reference answers is provided
if len(ref_answers) > 0:
pred_dict, ref_dict = {}, {}
for pred, ref in zip(pred_answers, ref_answers):
question_id = ref['question_id']
if len(ref['answers']) > 0:
pred_dict[question_id] = normalize(pred['answers'])
ref_dict[question_id] = normalize(ref['answers'])
bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict)
else:
bleu_rouge = None
return bleu_rouge
# run overall
bleu_rouge = run_eval(pred_answers, ref_answers)
# run respectively
bleu_rouge_search = run_eval(pred_answers_search, ref_answers_search)
bleu_rouge_zhidao = run_eval(pred_answers_zhidao, ref_answers_zhidao)
"""On dureader2.0 `dev`
{
'Bleu-1': 0.5837681460393821,
'Bleu-2': 0.5226142125975742,
'Bleu-3': 0.48347404434526725,
'Bleu-4': 0.45561738017705655,
'Rouge-L': 0.5022681316295654
}
"""
logger.info('(Overall) Dev eval result: \n {}'.format(bleu_rouge))
logger.info('(search) Dev eval result: \n {}'.format(bleu_rouge_search))
logger.info('(zhidao) Dev eval result: \n {}'.format(bleu_rouge_zhidao))
|
# import the logging library
import logging
# django modules
from django.contrib.auth.decorators import login_required, permission_required
from django.db.transaction import atomic
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render, redirect
# my models
from places.models import Place
# Get an instance of a logger
logger: logging.Logger = logging.getLogger(__name__)
def set_deleted(deleted: bool, pk: int) -> HttpResponse:
try:
place = Place.objects.get(id=pk)
place.deleted = deleted
place.save()
return redirect('places:detail', pk)
except Place.DoesNotExist:
return HttpResponse(status=404, content=f'place with id {pk} does not exist')
@atomic
@permission_required('places.delete_place')
@login_required(login_url='/traveller/login/')
def delete_place(request: HttpRequest, pk: int) -> HttpResponse:
"""set a place to deleted = true (soft delete)"""
return set_deleted(True, pk)
@atomic
@permission_required('places.change_place')
@login_required(login_url='/traveller/login/')
def undelete_place(request: HttpRequest, pk: int) -> HttpResponse:
"""set a place to deleted = true (soft delete)"""
return set_deleted(False, pk)
def show_intro(request: HttpRequest) -> HttpResponse:
""" just show the introduction, currently without database access"""
return render(request, 'places/intro.html')
|
from datetime import datetime
from scrapy import Request
from scrapy.http import TextResponse
from scrapy.utils.test import get_crawler
from kingfisher_scrapy.base_spider import BaseSpider
class ExpectedError(Exception):
pass
def response_fixture(meta=None, url_path='', **kwargs):
if meta is None:
meta = {'file_name': 'test'}
request = Request(f'http://example.com{url_path}', meta=meta)
kwargs.setdefault('status', 200)
return TextResponse(request.url, encoding='utf-8', request=request, **kwargs)
def spider_with_crawler(spider_class=BaseSpider, *, settings=None, **kwargs):
crawler = get_crawler(spider_class, settings)
start_time = datetime(2001, 2, 3, 4, 5, 6)
crawler.stats.set_value('start_time', start_time)
spider = crawler.spidercls.from_crawler(crawler, name='test', **kwargs)
return spider
def spider_with_files_store(files_store, settings=None, **kwargs):
crawler_settings = {
'FILES_STORE': files_store,
'KINGFISHER_API_URI': 'http://httpbin.org/anything/',
'KINGFISHER_API_KEY': 'xxx',
'KINGFISHER_API2_URL': 'http://httpbin.org/anything/',
}
if settings:
crawler_settings.update(settings)
spider = spider_with_crawler(settings=crawler_settings, **kwargs)
return spider
|
"""
Copyright (c) 2020 Chris Ohk
I am making my contributions/submissions to this project solely in our
personal capacity and am not conveying any rights to any intellectual
property of any third parties.
"""
import pyYahtzee
def test_player_roll_dices():
player = pyYahtzee.Player()
player.roll_dices(True)
values = player.get_dice_values()
for value in values:
assert 1 <= value <= 6
def test_player_calculate_scores():
player = pyYahtzee.Player()
player.set_dice_values([6, 1, 2, 3, 4])
player.calculate_scores()
scores = player.get_scores()
assert scores[pyYahtzee.Category.ACES] == 1
assert scores[pyYahtzee.Category.TWOS] == 2
assert scores[pyYahtzee.Category.THREES] == 3
assert scores[pyYahtzee.Category.FOURS] == 4
assert scores[pyYahtzee.Category.FIVES] == 0
assert scores[pyYahtzee.Category.SIXES] == 6
assert scores[pyYahtzee.Category.THREE_OF_A_KIND] == 0
assert scores[pyYahtzee.Category.FOUR_OF_A_KIND] == 0
assert scores[pyYahtzee.Category.FULL_HOUSE] == 0
assert scores[pyYahtzee.Category.SMALL_STRAIGHT] == 30
assert scores[pyYahtzee.Category.LARGE_STRAIGHT] == 0
assert scores[pyYahtzee.Category.YAHTZEE] == 0
assert scores[pyYahtzee.Category.CHANCE] == 16
player.set_dice_values([2, 5, 5, 2, 5])
player.calculate_scores()
scores = player.get_scores()
assert scores[pyYahtzee.Category.ACES] == 0
assert scores[pyYahtzee.Category.TWOS] == 4
assert scores[pyYahtzee.Category.THREES] == 0
assert scores[pyYahtzee.Category.FOURS] == 0
assert scores[pyYahtzee.Category.FIVES] == 15
assert scores[pyYahtzee.Category.SIXES] == 0
assert scores[pyYahtzee.Category.THREE_OF_A_KIND] == 19
assert scores[pyYahtzee.Category.FOUR_OF_A_KIND] == 0
assert scores[pyYahtzee.Category.FULL_HOUSE] == 25
assert scores[pyYahtzee.Category.SMALL_STRAIGHT] == 0
assert scores[pyYahtzee.Category.LARGE_STRAIGHT] == 0
assert scores[pyYahtzee.Category.YAHTZEE] == 0
assert scores[pyYahtzee.Category.CHANCE] == 19
player.set_dice_values([6, 6, 6, 6, 6])
player.calculate_scores()
scores = player.get_scores()
assert scores[pyYahtzee.Category.ACES] == 0
assert scores[pyYahtzee.Category.TWOS] == 0
assert scores[pyYahtzee.Category.THREES] == 0
assert scores[pyYahtzee.Category.FOURS] == 0
assert scores[pyYahtzee.Category.FIVES] == 0
assert scores[pyYahtzee.Category.SIXES] == 30
assert scores[pyYahtzee.Category.THREE_OF_A_KIND] == 30
assert scores[pyYahtzee.Category.FOUR_OF_A_KIND] == 30
assert scores[pyYahtzee.Category.FULL_HOUSE] == 0
assert scores[pyYahtzee.Category.SMALL_STRAIGHT] == 0
assert scores[pyYahtzee.Category.LARGE_STRAIGHT] == 0
assert scores[pyYahtzee.Category.YAHTZEE] == 50
assert scores[pyYahtzee.Category.CHANCE] == 30
|
# ---------------------------------------------------------------------
# inv.interface application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
from mongoengine import Q
# NOC modules
from noc.lib.app.extapplication import ExtApplication, view
from noc.sa.models.managedobject import ManagedObject
from noc.inv.models.interface import Interface
from noc.inv.models.subinterface import SubInterface
from noc.inv.models.interfaceprofile import InterfaceProfile
from noc.sa.interfaces.base import (
StringParameter,
ListOfParameter,
DocumentParameter,
ModelParameter,
)
from noc.main.models.resourcestate import ResourceState
from noc.project.models.project import Project
from noc.vc.models.vcdomain import VCDomain
from noc.core.text import alnum_key
from noc.core.translation import ugettext as _
from noc.config import config
from noc.core.comp import smart_text
class InterfaceAppplication(ExtApplication):
"""
inv.interface application
"""
title = _("Interfaces")
menu = _("Interfaces")
mrt_config = {
"get_mac": {
"map_script": "get_mac_address_table",
"timeout": config.script.timeout,
"access": "get_mac",
}
}
implied_permissions = {
"get_mac": [
"inv:inv:read",
"inv:interface:view",
"sa:managedobject:lookup",
"sa:managedobject:read",
]
}
@view(url=r"^(?P<managed_object>\d+)/$", method=["GET"], access="view", api=True)
def api_get_interfaces(self, request, managed_object):
"""
GET interfaces
:param managed_object:
:return:
"""
def sorted_iname(s):
return list(sorted(s, key=lambda x: alnum_key(x["name"])))
def get_style(i):
profile = i.profile
if profile:
try:
return style_cache[profile.id]
except KeyError:
pass
if profile.style:
s = profile.style.css_class_name
else:
s = ""
style_cache[profile.id] = s
return s
else:
return ""
def get_link(i):
link = i.link
if not link:
return None
if link.is_ptp:
# ptp
o = link.other_ptp(i)
label = "%s:%s" % (o.managed_object.name, o.name)
elif link.is_lag:
# unresolved LAG
o = [ii for ii in link.other(i) if ii.managed_object.id != i.managed_object.id]
label = "LAG %s: %s" % (o[0].managed_object.name, ", ".join(ii.name for ii in o))
else:
# Broadcast
label = ", ".join(
"%s:%s" % (ii.managed_object.name, ii.name) for ii in link.other(i)
)
return {"id": str(link.id), "label": label}
# Get object
o = self.get_object_or_404(ManagedObject, id=int(managed_object))
if not o.has_access(request.user):
return self.response_forbidden("Permission denied")
# Physical interfaces
# @todo: proper ordering
default_state = ResourceState.get_default()
style_cache = {} # profile_id -> css_style
l1 = [
{
"id": str(i.id),
"name": i.name,
"description": i.description,
"mac": i.mac,
"ifindex": i.ifindex,
"lag": (i.aggregated_interface.name if i.aggregated_interface else ""),
"link": get_link(i),
"profile": str(i.profile.id) if i.profile else None,
"profile__label": smart_text(i.profile) if i.profile else None,
"enabled_protocols": i.enabled_protocols,
"project": i.project.id if i.project else None,
"project__label": smart_text(i.project) if i.project else None,
"state": i.state.id if i.state else default_state.id,
"state__label": smart_text(i.state if i.state else default_state),
"vc_domain": i.vc_domain.id if i.vc_domain else None,
"vc_domain__label": smart_text(i.vc_domain) if i.vc_domain else None,
"row_class": get_style(i),
}
for i in Interface.objects.filter(managed_object=o.id, type="physical")
]
# LAG
lag = [
{
"id": str(i.id),
"name": i.name,
"description": i.description,
"members": [
j.name
for j in Interface.objects.filter(
managed_object=o.id, aggregated_interface=i.id
)
],
"profile": str(i.profile.id) if i.profile else None,
"profile__label": smart_text(i.profile) if i.profile else None,
"enabled_protocols": i.enabled_protocols,
"project": i.project.id if i.project else None,
"project__label": smart_text(i.project) if i.project else None,
"state": i.state.id if i.state else default_state.id,
"state__label": smart_text(i.state if i.state else default_state),
"vc_domain": i.vc_domain.id if i.vc_domain else None,
"vc_domain__label": smart_text(i.vc_domain) if i.vc_domain else None,
"row_class": get_style(i),
}
for i in Interface.objects.filter(managed_object=o.id, type="aggregated")
]
# L2 interfaces
l2 = [
{
"name": i.name,
"description": i.description,
"untagged_vlan": i.untagged_vlan,
"tagged_vlans": i.tagged_vlans,
}
for i in SubInterface.objects.filter(managed_object=o.id, enabled_afi="BRIDGE")
]
# L3 interfaces
q = Q(enabled_afi="IPv4") | Q(enabled_afi="IPv6")
l3 = [
{
"name": i.name,
"description": i.description,
"ipv4_addresses": i.ipv4_addresses,
"ipv6_addresses": i.ipv6_addresses,
"enabled_protocols": i.enabled_protocols,
"vlan": i.vlan_ids,
"vrf": i.forwarding_instance.name if i.forwarding_instance else "",
}
for i in SubInterface.objects.filter(managed_object=o.id).filter(q)
]
return {
"l1": sorted_iname(l1),
"lag": sorted_iname(lag),
"l2": sorted_iname(l2),
"l3": sorted_iname(l3),
}
@view(
url=r"^link/$",
method=["POST"],
validate={
"type": StringParameter(choices=["ptp"]),
"interfaces": ListOfParameter(element=DocumentParameter(Interface)),
},
access="link",
api=True,
)
def api_link(self, request, type, interfaces):
if type == "ptp":
if len(interfaces) == 2:
interfaces[0].link_ptp(interfaces[1])
return {"status": True}
else:
raise ValueError("Invalid interfaces length")
return {"status": False}
@view(url=r"^unlink/(?P<iface_id>[0-9a-f]{24})/$", method=["POST"], access="link", api=True)
def api_unlink(self, request, iface_id):
i = Interface.objects.filter(id=iface_id).first()
if not i:
return self.response_not_found()
try:
i.unlink()
return {"status": True, "msg": "Unlinked"}
except ValueError as why:
return {"status": False, "msg": str(why)}
@view(url=r"^unlinked/(?P<object_id>\d+)/$", method=["GET"], access="link", api=True)
def api_unlinked(self, request, object_id):
def get_label(i):
if i.description:
return "%s (%s)" % (i.name, i.description)
else:
return i.name
o = self.get_object_or_404(ManagedObject, id=int(object_id))
r = [
{"id": str(i.id), "label": get_label(i)}
for i in Interface.objects.filter(managed_object=o.id, type="physical").order_by("name")
if not i.link
]
return list(sorted(r, key=lambda x: alnum_key(x["label"])))
@view(
url=r"^l1/(?P<iface_id>[0-9a-f]{24})/change_profile/$",
validate={"profile": DocumentParameter(InterfaceProfile)},
method=["POST"],
access="profile",
api=True,
)
def api_change_profile(self, request, iface_id, profile):
i = Interface.objects.filter(id=iface_id).first()
if not i:
return self.response_not_found()
if i.profile != profile:
i.profile = profile
i.profile_locked = True
i.save()
return True
@view(
url=r"^l1/(?P<iface_id>[0-9a-f]{24})/change_state/$",
validate={"state": ModelParameter(ResourceState)},
method=["POST"],
access="profile",
api=True,
)
def api_change_state(self, request, iface_id, state):
i = Interface.objects.filter(id=iface_id).first()
if not i:
return self.response_not_found()
if i.state != state:
i.state = state
i.save()
return True
@view(
url=r"^l1/(?P<iface_id>[0-9a-f]{24})/change_project/$",
validate={"project": ModelParameter(Project, required=False)},
method=["POST"],
access="profile",
api=True,
)
def api_change_project(self, request, iface_id, project):
i = Interface.objects.filter(id=iface_id).first()
if not i:
return self.response_not_found()
if i.project != project:
i.project = project
i.save()
return True
@view(
url=r"^l1/(?P<iface_id>[0-9a-f]{24})/change_vc_domain/$",
validate={"vc_domain": ModelParameter(VCDomain, required=False)},
method=["POST"],
access="profile",
api=True,
)
def api_change_vc_domain(self, request, iface_id, vc_domain):
i = Interface.objects.filter(id=iface_id).first()
if not i:
return self.response_not_found()
if i.vc_domain != vc_domain:
i.vc_domain = vc_domain
i.save()
return True
|
#!/usr/bin/env python3
# encoding=utf8
import traits.observation.api as ob_api
import traits.observation.events as ob_evt
import traits.observation.expression as ob_exp
from traits.api import *
from .i1_util import *
def __refer_sth():
return HasTraits, TraitType, ob_api, ob_evt, ob_exp
|
import glob
import os
import pathlib
import plistlib
import sqlite3
import json
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows
def get_healthFlights(files_found, report_folder, seeker):
file_found = str(files_found[0])
db = sqlite3.connect(file_found)
cursor = db.cursor()
cursor.execute(
"""
SELECT
DATETIME(SAMPLES.START_DATE + 978307200, 'UNIXEPOCH') AS "START DATE",
DATETIME(SAMPLES.END_DATE + 978307200, 'UNIXEPOCH') AS "END DATE",
QUANTITY AS "FLIGHTS CLIMBED",
(SAMPLES.END_DATE-SAMPLES.START_DATE) AS "TIME IN SECONDS",
SAMPLES.DATA_ID AS "SAMPLES TABLE ID"
FROM
SAMPLES
LEFT OUTER JOIN
QUANTITY_SAMPLES
ON SAMPLES.DATA_ID = QUANTITY_SAMPLES.DATA_ID
WHERE
SAMPLES.DATA_TYPE = 12
"""
)
all_rows = cursor.fetchall()
usageentries = len(all_rows)
data_list = []
if usageentries == 0:
logfunc('No data available in table')
else:
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4] ))
description = ''
report = ArtifactHtmlReport('Health Flights Climbed')
report.start_artifact_report(report_folder, 'Flights Climbed', description)
report.add_script()
data_headers = ('Start Date','End Date','Flights Climbed','Time in Seconds','Samples Table ID' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Health Flights'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'Health Flights'
timeline(report_folder, tlactivity, data_list, data_headers)
|
#imports section
import json
import sys
import requests
#function return Ticker Data for EUR market for passed currency
#inputs are
#currency - name of currency for getting ticker data
#return JSON string with Ticker Data
def getTickerData(self, currency):
#wrapped in try_catch
try:
#pushing http request to alphavantage with choosed currency and storing only json part of answer
rq = requests.get('https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_DAILY&symbol='+
currency +'&market=EUR&apikey=VEWJ1K6W3ENPI4NB').json()
except:
#Something went wrong
print("Something went wrong")
finally:
try:
#try to find time series array in json
return rq['Time Series (Digital Currency Daily)']
except:
#return empty string if there isn't time series array
return ''
#function return targeted tickers from input list
#inputs are
#tickers - source list of data
#target - behavior to match, could throw to 'pos' or 'neg'
def getSomeTickers(self, tickers, target):
#temp array for matched tickers
founded = []
#avoid empty input array and wrong target input
if tickers == [] or (target != 'pos' and target != 'neg'):
#return empty array
return founded
#iterate over input array
for ticker in tickers:
#calc gain of current ticker
tickerGain = float(tickers[ticker]['4a. close (EUR)']) - float(tickers[ticker]['1a. open (EUR)'])
#compare calculated gain with target
if (tickerGain > 0 and 'pos' == target) or (tickerGain < 0 and 'neg' == target):
#push to ticker his timeStamp
tickers[ticker]['timeStamp'] = ticker
#push ticker to founded array
founded.append(tickers[ticker])
return founded
#getting BCH tickers
BCHtkrs = getTickerData(0, 'BCH')
#getting ETH tickers
ETHtkrs = getTickerData(0, 'ETH')
#getting LTC tickers
LTCtkrs = getTickerData(0, 'LTC')
#getting VOX tickers
VOXtkrs = getTickerData(0, 'VOX')
#avoid empty arrays
if BCHtkrs == '' or ETHtkrs == '' or LTCtkrs == '' or VOXtkrs == '':
sys.exit("Don't have data to proccess")
#maybe for better performance filtering has to be done with storing what already sorted
#finding positive ends
BCHtkrsPos = getSomeTickers(0, BCHtkrs, 'pos')
ETHtkrsPos = getSomeTickers(0, ETHtkrs, 'pos')
LTCtkrsPos = getSomeTickers(0, LTCtkrs, 'pos')
VOXtkrsPos = getSomeTickers(0, VOXtkrs, 'pos')
#adding currency mark to each row and put all in one by JSON way
positiveTickerData = '['
for row in BCHtkrsPos:
row['currency'] = 'BCH'
positiveTickerData = positiveTickerData + str(row) + ','
for row in ETHtkrsPos:
row['currency'] = 'ETH'
positiveTickerData = positiveTickerData + str(row) + ','
for row in LTCtkrsPos:
row['currency'] = 'LTC'
positiveTickerData = positiveTickerData + str(row) + ','
for row in VOXtkrsPos:
row['currency'] = 'VOX'
positiveTickerData = positiveTickerData + str(row) + ','
positiveTickerData = positiveTickerData[:-1] + ']'
#replace char ' by "
positiveTickerData = positiveTickerData.replace("'", '"', -1)
#writing positive ends
f = open("positiveTickerData.txt", "w")
f.write(positiveTickerData)
f.close()
#finding negative ends
BCHtkrsNeg = getSomeTickers(0, BCHtkrs, 'neg')
ETHtkrsNeg = getSomeTickers(0, ETHtkrs, 'neg')
LTCtkrsNeg = getSomeTickers(0, LTCtkrs, 'neg')
VOXtkrsNeg = getSomeTickers(0, VOXtkrs, 'neg')
#adding currency mark to each row and put all in one by JSON way
negativeTickerData = '['
for row in BCHtkrsNeg:
row['currency'] = 'BCH'
negativeTickerData = negativeTickerData + str(row) + ','
for row in ETHtkrsNeg:
row['currency'] = 'ETH'
negativeTickerData = negativeTickerData + str(row) + ','
for row in LTCtkrsNeg:
row['currency'] = 'LTC'
negativeTickerData = negativeTickerData + str(row) + ','
for row in VOXtkrsNeg:
row['currency'] = 'VOX'
negativeTickerData = negativeTickerData + str(row) + ','
negativeTickerData = negativeTickerData[:-1] + ']'
#replace char ' by "
negativeTickerData = negativeTickerData.replace("'", '"', -1)
#writing negative ends
f = open("negativeTickerData.txt", "w")
f.write(negativeTickerData)
f.close() |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import re
import argparse
from random import shuffle
from .amr_utils import write_to_file
from .var_free_amrs import delete_wiki, delete_amr_variables, single_line_convert
'''Script that augments the data to get the best AMR permutation based on word order
INPUT SHOULD INCLUDE ALIGNMENTS
It outputs the normal variable-free AMR as well as the best AMR permutation. Each AMR on a single line.
Sample input:
# ::id PROXY_AFP_ENG_20071228_0377.18 ::amr-annotator SDL-AMR-09 ::preferred
# ::tok Opium is the raw material used to make heroin .
# ::alignments 0-1.2 1-1.2.r 3-1.1 4-1 5-1.3 7-1.3.1 8-1.3.1.1
(m / material~e.4
:mod (r / raw~e.3)
:domain~e.1 (o / opium~e.0)
:ARG1-of (u / use-01~e.5
:ARG2 (m2 / make-01~e.7
:ARG1 (h / heroin~e.8)
:ARG2 o)))
Sample output best order (note that some nodes are swapped!):
(material :domain (opium) :mod (raw) :ARG1-of (use-01 :ARG2 (make-01 :ARG2 (opium) :ARG1 (heroin))))
Sample output sent:
Opium is the raw material used to make heroin .'''
def create_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-f", required=True, type=str, help="folder that contains to be processed files")
parser.add_argument("-amr_ext", default='.txt', type=str,
help="AMR extension (default .txt) - should have alignments")
parser.add_argument("-cut_off", default=15, type=int, help="When to cut-off number of permutations")
parser.add_argument("-double", action='store_true', help="Add best permutation AMR AND normal AMR?")
args = parser.parse_args()
return args
def process_var_line(line, var_dict):
"""Function that processes line with a variable in it. Returns the string without
variables and the dictionary with var-name + var - value"""
curr_var_name = False
curr_var_value = False
var_value = ''
var_name = ''
for idx, ch in enumerate(line):
if ch == '/': # we start adding the variable value
curr_var_value = True
curr_var_name = False
var_value = ''
continue
if ch == '(': # we start adding the variable name
curr_var_name = True
curr_var_value = False
if var_value and var_name: # we already found a name-value pair, add it now
var_dict[var_name.strip()] = var_value.strip().replace(')', '').replace(' :name', '').replace(
' :dayperiod', '').replace(' :mod', '')
var_name = ''
continue
if curr_var_name: # add to variable name
var_name += ch
if curr_var_value: # add to variable value
var_value += ch
var_dict[var_name.strip()] = var_value.strip().replace(')', '')
deleted_var_string = re.sub(r'\((.*?/)', '(', line).replace('( ', '(') # delete variables from line
return deleted_var_string, var_dict
def get_tokenized_sentences(f):
sents = [l.replace('# ::snt', '').replace('# ::tok', '').strip() for l in open(f, 'r') if
(l.startswith('# ::snt') or l.startswith('# ::tok'))]
return sents
def remove_alignment(string):
"""Function that removes alignment information from AMR"""
string = re.sub('~e\.[\d,]+', '', string)
return string
def get_word_and_sense(line):
"""Character based extraction because I couldn't figure it out using regex"""
quotes = 0
adding = False
comb = []
word = ''
if '"' in line:
for idx, ch in enumerate(line):
if ch == '"':
quotes += 1
if quotes % 2 != 0:
adding = True
else: # finished quotations
comb.append([word])
word = ''
adding = False
elif ch == '~':
if adding:
word += ch
elif ':op' in "".join(
line[idx - 4:idx - 1]): # bugfix for strange constructions, e.g. name :op1~e.4 "Algeria"~e.2
continue
else:
if idx + 4 < len(line):
sense_line = line[idx + 1] + line[idx + 2] + line[idx + 3] + line[idx + 4]
else:
sense_line = line[idx + 1] + line[idx + 2] + line[idx + 3]
sense = int("".join([s for s in sense_line if s.isdigit()]))
try:
comb[-1].append(sense)
except:
pass
else:
if adding:
word += ch
else:
continue
elif ':op' not in line:
return [['', '']]
else:
try:
tmp = line.split()[2]
sense, word = get_sense(tmp)
comb = [[word, sense]]
except:
print('Strange occurrence in AMR, ignore')
return [['', '']]
return comb
def get_sense(word):
"""Function that gets the sense of a certain word in aligned AMR"""
if '~' in word:
sense = word.split('~')[-1].split('.')[-1] # extract 16 in e.g. house~e.16
if ',' in sense: # some amr-words refer to multiple tokens. If that's the case, we take the average for calculating distance
# although this means that the actual sense does not refer to the tokens anymore (# e.g. the sense of house~e.4,12 becomes 8)
sense = round((float(sum([int(i) for i in sense.split(',')]))) / (float(len(sense.split(',')))), 0)
else:
sense = int(sense)
word = word.split('~')[0] # remove sense information to process rest of the word
else:
sense = ''
return sense, word
def find_words(line):
"""Finds all words in the AMR structure"""
comb = []
spl_line = line.split('(')
if '(' not in line:
if line.count('~') > 0 and len(line.split()) > 1:
sense, word = get_sense(line.split()[1])
return [[word, sense]]
else:
return [['none-found', 0]]
else:
for idx in range(1, len(spl_line)):
if spl_line[idx]:
word = spl_line[idx].strip().split()[0].replace(')', '')
if word == 'name': # name gets special treatment by AMRs
cut_word = spl_line[idx].split(')')[0]
comb += get_word_and_sense(cut_word)
else:
sense, word = get_sense(word)
num_digits = sum(c.isdigit() for c in word)
if word.count(
'-') == 1 and num_digits < 3 and num_digits > 0: # tricky: we want to change break-01 to break, but do not want to screw up dates (08-09-2016 or 28-10)
word = word.split('-')[0]
comb.append([word, sense])
for idx in range(len(comb)):
if len(comb[idx]) < 2:
comb[idx].append('') # add empty sense
return comb
def matching_words(permutations):
"""Finds all words in different order for all the permutations"""
all_found = []
for per in permutations:
found_words = find_words(per)
if found_words:
all_found.append(find_words(per))
return all_found
def calc_distance(l):
"""Calculates distance between list items in two lists"""
# l needs to start from zero, get lowest number and substract it from all numbers
min_l = min([x[1] for x in l if x[1] != ''], default=0)
l = [[x[0], (x[1] - min_l)] for x in l if x[1] != '']
distance = 0
for idx, item in enumerate(l):
if len(item) > 1 and item[1] != '': # check if we found a sense
diff = abs(item[1] - idx) # check how far away we are in our token list
distance += diff
return distance
def calc_distance_full_amr(l):
"""Calculates distance between list items in 2 two lists"""
# l needs to start from zero, get lowerst number and substract it from all numbers
distance = 0
l = [x for x in l if (x[1] != '' and len(x) > 1)]
sorted_l = sorted(l, key=lambda x: x[1])
# calculate difference between where optimal position is (in sorted) and where the item is now
for idx, item in enumerate(l):
rank_sorted = sorted_l.index(item)
diff = abs(idx - rank_sorted)
distance += diff
return distance
def do_swap(w_list1, w_list2):
"""Checks if we should swap two list items"""
distance_now = calc_distance(w_list1 + w_list2)
distance_swap = calc_distance(w_list2 + w_list1)
return distance_now > distance_swap # true or false
def filter_colons(part):
"""Funtion to filter out timestamps (e.g. 08:30) and websites (e.g. http://site.com)"""
new_parts = []
split_part = part.split(':')
for idx in range(0, len(split_part)):
if idx == 0:
new_parts.append(split_part[idx])
elif split_part[idx][0].isalpha():
new_parts.append(split_part[idx])
else:
new_parts[-1] += ':' + split_part[idx] # not actually a new part, just add to last one
return new_parts
def get_add_string(search_part):
"""Get the initial permutations and add_string"""
paren_count = 0
start_adding = False
permutations = []
add_string = ''
for idx, ch in enumerate(search_part):
if ch == '(': # parenthesis found
if start_adding:
add_string += ch
paren_count += 1
elif ch == ':':
start_adding = True
add_string += ch
elif ch == ')':
paren_count -= 1
if start_adding:
add_string += ch
if paren_count == 0: # we closed one of the permutations now
permutations.append(add_string.strip())
add_string = ''
elif start_adding:
add_string += ch
if add_string and ':' in add_string:
permutations.append(add_string.replace(')', '').strip())
for idx, p in enumerate(permutations):
while permutations[idx].count(')') < permutations[idx].count('('):
permutations[idx] += ')'
# permutate without brackets (e.g. :op1 "hoi" :op2 "hai" :op3 "ok"
for p in permutations:
if ')' not in p or '(' not in p:
if p.count(':') > 2:
p_split = p.split(':')[1:]
new_perms = [':' + x.strip() for x in p_split]
return add_string, new_perms
return add_string, permutations
def get_keep_string(new_parts, level):
"""Obtain string we keep, it differs for level 1"""
if level > 1:
keep_string = ':' + ":".join(new_parts[:1])
else:
keep_string = ":".join(new_parts[:1])
search_part = ':' + ":".join(new_parts[1:])
return keep_string, search_part
def combine_permutations(permutations, cut_off):
"""Combine permutations if they exceed the cut-off specified"""
if len(permutations) > cut_off:
shuffle(permutations)
permutations = permutations[0:cut_off - 1] + [
" ".join(permutations[cut_off - 1:])] # just add extra permutations to the last permutation
return permutations
def change_possible(part):
"""Check if there is anything to permute"""
if ':' not in part or (part.count(':') == 1 and ('http:' in part or 'https:' in part)):
return False
else:
return True
def get_permutations(part, level, sent_amr, all_perms, type_script, cut_off):
"""Function that returns the permutations in the best order"""
part = part[1:] # make life easier by skipping first '(' or ':'
sent_words = [w.lower() for w in sent_amr.split()] # get all words in sentence in lower case
if not change_possible(part): # if there is nothing to change then we return
if level == 1:
return [part], '', all_perms
else:
return [':' + part], '', all_perms
new_parts = filter_colons(part) # remove non-arguments that have a colon such as timestamps and websites
keep_string, search_part = get_keep_string(new_parts, level)
add_string, permutations = get_add_string(search_part)
permutations = combine_permutations(permutations, cut_off)
word_list = matching_words(permutations) # find the list of lists that contain word-sense pairs
# Two possibilities here, ordering or pruning. This script only does ordering, delete_double_args.py does pruning and uses this function.
if type_script == 'prune':
permutations_set = []
for p in permutations:
if p in permutations_set: # remove all nodes with same parent
continue
elif p not in all_perms:
permutations_set.append(p)
elif all_perms.count(p) < 2: # if we saw the node twice, stop adding
permutations_set.append(p)
all_perms.append(p)
return permutations_set, keep_string, all_perms
else:
if len(word_list) != len(
permutations): # something strange is going on here, just ignore it and do nothing to avoid errors
print('Strange AMR part')
all_perms += permutations
return permutations, keep_string, all_perms
else:
for p in range(len(permutations)):
for idx in range(len(permutations) - 1):
if do_swap(word_list[idx], word_list[
idx + 1]): # permuting takes place here, check if swapping results in better order
permutations[idx], permutations[idx + 1] = permutations[idx + 1], permutations[idx]
word_list[idx], word_list[idx + 1] = word_list[idx + 1], word_list[idx]
all_perms += permutations
return permutations, keep_string, all_perms
def do_string_adjustments(permutations_new, keep_string2):
add_string = keep_string2 + ' ' + " ".join(permutations_new) + ' '
while add_string.count(')') < add_string.count('('): ## check if we need to add a parenthesis
add_string += ')' ## avoid extra unnecessary space
return add_string
def create_final_line(final_string):
"""Do final adjustments for line"""
add_to = final_string.replace(' ', ' ').strip()
while ' )' in add_to:
add_to = add_to.replace(' )', ')')
add_to = fix_paren(add_to)
add_to = remove_alignment(add_to)
add_to = add_to.replace('):', ') :').replace(' :)', ')').replace(': :', ':') # fix some layout stuff
return add_to
def fix_paren(string):
while string.count('(') > string.count(')'):
string += ')'
return string
def get_best_perm(permutations, keep_str, sent, final_string, all_perms, type_script, cut_off):
"""This must also be possible recursive - I tried..."""
for indx2, p2 in enumerate(permutations):
permutations_2, keep_string2, all_perms = get_permutations(p2, 2, sent, all_perms, type_script, cut_off)
for indx3, p3 in enumerate(permutations_2):
permutations_3, keep_string3, all_perms = get_permutations(p3, 3, sent, all_perms, type_script, cut_off)
for indx4, p4 in enumerate(permutations_3):
permutations_4, keep_string4, all_perms = get_permutations(p4, 4, sent, all_perms, type_script, cut_off)
for indx5, p5 in enumerate(permutations_4):
permutations_5, keep_string5, all_perms = get_permutations(p5, 5, sent, all_perms, type_script,
cut_off)
for indx6, p6 in enumerate(permutations_5):
permutations_6, keep_string6, all_perms = get_permutations(p6, 6, sent, all_perms, type_script,
cut_off)
for indx7, p7 in enumerate(permutations_6):
permutations_7, keep_string7, all_perms = get_permutations(p7, 7, sent, all_perms,
type_script, cut_off)
for indx8, p8 in enumerate(permutations_7):
permutations_8, keep_string8, all_perms = get_permutations(p8, 8, sent, all_perms,
type_script, cut_off)
for indx9, p9 in enumerate(permutations_8):
permutations_9, keep_string9, all_perms = get_permutations(p9, 9, sent, all_perms,
type_script, cut_off)
for indx10, p10 in enumerate(permutations_9):
permutations_10, keep_string10, all_perms = get_permutations(p10, 10, sent,
all_perms,
type_script,
cut_off)
for indx11, p11 in enumerate(permutations_10):
permutations_11, keep_string11, all_perms = get_permutations(p11, 11, sent,
all_perms,
type_script,
cut_off)
for indx12, p12 in enumerate(permutations_11):
permutations_12, keep_string12, all_perms = get_permutations(p12, 12,
sent,
all_perms,
type_script,
cut_off)
add_string = do_string_adjustments(permutations_12, keep_string12)
keep_string11 += add_string.replace(' ', ' ')
keep_string10 += fix_paren(keep_string11)
keep_string9 += fix_paren(keep_string10)
keep_string8 += fix_paren(keep_string9)
keep_string7 += fix_paren(keep_string8)
keep_string6 += fix_paren(keep_string7)
keep_string5 += fix_paren(keep_string6)
keep_string4 += fix_paren(keep_string5)
keep_string3 += fix_paren(keep_string4)
keep_string2 += fix_paren(keep_string3)
final_string += fix_paren(keep_string2)
final_string = fix_paren(final_string)
return final_string
def process_file_best(amrs, sent_amrs, cut_off):
"""Permute AMR so that it best matches the word order"""
save_all_amrs = []
assert len(amrs) == len(sent_amrs)
for idx, amr in enumerate(amrs):
if amr.count(':') > 1: ## only try to do something if we can actually permutate
permutations, keep_string1, _ = get_permutations(amr, 1, sent_amrs[idx], [], 'order', cut_off)
final_string = get_best_perm(permutations, '(' + keep_string1, sent_amrs[idx], '(' + keep_string1, [],
'order', cut_off)
save_all_amrs.append(create_final_line(final_string)) ## add final string + final parenthesis
else:
save_all_amrs.append(remove_alignment(amr)) ## else just add AMR without alignment information
for idx, a in enumerate(amrs):
amrs[idx] = amrs[idx].replace(' )', ')')
amrs[idx] = remove_alignment(amrs[idx])
changed_amrs = len(amrs) - len([i for i, j in zip(amrs, save_all_amrs) if i == j])
print('Changed {0} out of {1} amrs'.format(changed_amrs, len(amrs)))
return save_all_amrs, amrs
def preprocess(f_path):
"""Preprocess the AMR file, deleting variables/wiki-links and tokenizing"""
no_wiki_amrs = delete_wiki(f_path)
del_amrs = delete_amr_variables(no_wiki_amrs)
old_amrs, sent_amrs = single_line_convert(del_amrs) # old amrs with deleted wiki and variables
return sent_amrs, old_amrs
def create_output(f, old_amrs, new_amrs, sent_amrs):
"""Print output to the correct files - also keep no-var AMR"""
permuted_amr, no_var_amr, sent_file, double_sent_file, double_amr_file = get_filenames(f, args.amr_ext)
write_to_file(old_amrs, no_var_amr)
write_to_file(new_amrs, permuted_amr)
write_to_file(sent_amrs, sent_file)
if args.double:
write_to_file(old_amrs + new_amrs, double_amr_file)
write_to_file(sent_amrs + sent_amrs, double_sent_file)
def get_filenames(f, amr_ext):
permuted_amr = f.replace(amr_ext, '.tf.best')
no_var_amr = f.replace(amr_ext, '.tf')
sent_file = f.replace(amr_ext, '.sent')
double_sent = f.replace(amr_ext, '.sent.double')
double_amr = f.replace(amr_ext, '.tf.double')
return permuted_amr, no_var_amr, sent_file, double_sent, double_amr
if __name__ == '__main__':
args = create_arg_parser()
print('Processing {0}'.format(args.f))
sent_amrs, old_amrs = preprocess(args.f)
new_amrs, old_amrs = process_file_best(old_amrs, sent_amrs, args.cut_off)
create_output(args.f, old_amrs, new_amrs, sent_amrs)
|
import rigidbody3d
import numpy
def restitutionCoefficient():
cor = rigidbody3d.cor()
ncollisions = rigidbody3d.numCollisions()
assert cor.shape == (ncollisions,)
for col_idx in range( 0, ncollisions ):
indices = rigidbody3d.collisionIndices( col_idx )
assert indices[0] <= 2
assert indices[1] == -1
if indices[0] == 0:
cor[col_idx] = 0.0
elif indices[0] == 1:
cor[col_idx] = 0.5
else:
cor[col_idx] = 1.0
|
import requests
from bs4 import BeautifulSoup
SITE_NAME = 'programmers'
SITE_URL = 'https://programmers.co.kr/'
SITE_LOGIN_FORM = {
'utf8': '',
'authenticity_token': '',
'user[email]': '',
'user[password]': '',
'button': '',
}
"""
"""
with requests.Session() as sess:
req = sess.get(SITE_URL + 'users/login')
# print(req.headers)
print(req.ok, req.status_code)
soup = BeautifulSoup(req.text, 'lxml')
login_form = SITE_LOGIN_FORM.copy()
login_form['utf8'] = soup.select_one('#new_user > input[type=hidden]:nth-child(1)').get('value')
login_form['authenticity_token'] = soup.select_one('#new_user > input[type=hidden]:nth-child(2)').get('value')
login_form['user[email]'] = 'nyk700@naver.com'
login_form['user[password]'] = '567849a10806'
req = sess.post(SITE_URL + 'users/login', data=login_form)
print(req.headers)
print(req.ok, req.status_code)
print(req.text)
|
from itertools import product
from dataclasses import dataclass
from typing import Optional
from pathlib import Path
import numpy as np
import pandas as pd
from .meta import GRMMeta
@dataclass()
class GRMOutputs:
meta: GRMMeta
a_array: np.ndarray
b_array: np.ndarray
t_array: np.ndarray
level_mean_array: Optional[np.ndarray] = None
level_std_array: Optional[np.ndarray] = None
def __post_init__(self):
assert self.a_array.shape == (self.meta.n_items,)
assert self.b_array.shape == (self.meta.n_items, self.meta.n_grades - 1)
assert self.t_array.shape == (self.meta.n_persons,)
if self.level_mean_array is not None:
assert self.level_mean_array.shape == (self.meta.n_levels, self.meta.n_grades - 1)
assert self.level_std_array.shape == (self.meta.n_levels, self.meta.n_grades - 1)
def make_a_df(self) -> pd.DataFrame:
"""
:return: columns=(item, a)
"""
return pd.DataFrame().assign(item=self.meta.item_category.categories, a=self.a_array)
def make_b_df(self) -> pd.DataFrame:
"""
|item|grade| b |
|foo | 2 | |
|foo | 3 | |
|foo | 4 | |
|bar | 2 | |
|bar | 3 | |
|bar | 4 | |
...
:return: columns=(item, grade, b)
"""
return pd.DataFrame(
product(self.meta.item_category.categories,
np.arange(2, self.meta.n_grades + 1)),
columns=["item", "grade"])\
.assign(b=self.b_array.flatten())
def make_t_df(self) -> pd.DataFrame:
"""
:return: columns=(person, t)
"""
return pd.DataFrame().assign(person=self.meta.person_category.categories, t=self.t_array)
def make_level_df(self) -> pd.DataFrame:
"""
|level|grade| b |
| foo | 2 | |
| foo | 3 | |
| foo | 4 | |
| bar | 2 | |
| bar | 3 | |
| bar | 4 | |
...
:return: columns=(level, grade, mean, std)
"""
return pd.DataFrame(
product(self.meta.level_category.categories,
np.arange(2, self.meta.n_grades + 1)),
columns=["level", "grade"]) \
.assign(mean=self.level_mean_array.flatten(),
std=self.level_std_array.flatten())
def to_csvs(self, dir_path: str):
dir_path = Path(dir_path)
dir_path.mkdir(parents=True, exist_ok=True)
self.make_a_df().to_csv(dir_path / "a.csv", index=False)
self.make_b_df().to_csv(dir_path / "b.csv", index=False)
self.make_t_df().to_csv(dir_path / "t.csv", index=False)
if self.level_mean_array is not None:
self.make_level_df().to_csv(dir_path / "b_prior.csv", index=False)
|
import os
import cgi
import json
import urllib.parse
import urllib.request
from typing import Any, List, Mapping, Iterable
from pathlib import Path
from functools import partial
from collections import defaultdict
import aiohttp
import parfive
import astropy.units as u
from astropy.table import TableAttribute
from astropy.time import Time
from sunpy import config
from sunpy.net import attr
from sunpy.net import attrs as sattrs
from sunpy.net.base_client import (BaseClient, QueryResponseRow,
QueryResponseTable, convert_row_to_table)
from . import attrs as dattrs
from .attr_walker import walker
__all__ = ['DKISTQueryReponse', 'DKISTDatasetClient']
class DefaultMap(defaultdict):
"""
A tweak of default dict where the default value is the key that's missing.
"""
def __missing__(self, key):
return key
class DKISTQueryResponseTable(QueryResponseTable):
"""
Results of a DKIST Dataset search.
"""
# Define some class properties to better format the results table.
hide_keys: List[str] = ["Storage Bucket", "Full Stokes", "asdf Filename", "Recipie Instance ID",
"Recipie Run ID", "Recipe ID", "Movie Filename", "Level 0 Frame count",
"Creation Date", "Last Updated", "Experiment IDs", "Proposal IDs",
"Preview URL"]
# These keys are shown in the repr and str representations of this class.
_core_keys = TableAttribute(default=["Start Time", "End Time", "Instrument", "Wavelength"])
# Map the keys in the response to human friendly ones.
key_map: Mapping[str, str] = DefaultMap(None, {
"asdfObjectKey": "asdf Filename",
"boundingBox": "Bounding Box",
"browseMovieObjectKey": "Movie Filename",
"browseMovieUrl": "Preview URL",
"bucket": "Storage Bucket",
"contributingExperimentIds": "Experiment IDs",
"contributingProposalIds": "Proposal IDs",
"createDate": "Creation Date",
"datasetId": "Dataset ID",
"datasetSize": "Dataset Size",
"embargoEndDate": "Embargo End Date",
"endTime": "End Time",
"experimentDescription": "Experiment Description",
"exposureTime": "Exposure Time",
"filterWavelengths": "Filter Wavelengths",
"frameCount": "Number of Frames",
"hasAllStokes": "Full Stokes",
"instrumentName": "Instrument",
"isDownloadable": "Downloadable",
"isEmbargoed": "Embargoed",
"observables": "Observables",
"originalFrameCount": "Level 0 Frame count",
"primaryExperimentId": "Primary Experiment ID",
"primaryProposalId": "Primary Proposal ID",
"qualityAverageFriedParameter": "Average Fried Parameter",
"qualityAveragePolarimetricAccuracy": "Average Polarimetric Accuracy",
"recipeId": "Recipe ID",
"recipeInstanceId": "Recipie Instance ID",
"recipeRunId": "Recipie Run ID",
"startTime": "Start Time",
"stokesParameters": "Stokes Parameters",
"targetTypes": "Target Types",
"updateDate": "Last Updated",
"wavelengthMax": "Wavelength Max",
"wavelengthMin": "Wavelength Min",
})
@staticmethod
def _process_table(results: "DKISTQueryResponseTable") -> "DKISTQueryResponseTable":
times = ["Creation Date", "End Time", "Start Time", "Last Updated", "Embargo End Date"]
units = {"Exposure Time": u.s, "Wavelength Min": u.nm,
"Wavelength Max": u.nm, "Dataset Size": u.Gibyte,
"Filter Wavelengths": u.nm}
for colname in times:
if colname not in results.colnames:
continue # pragma: no cover
if not any([v is None for v in results[colname]]):
results[colname] = Time(results[colname])
for colname, unit in units.items():
if colname not in results.colnames:
continue # pragma: no cover
results[colname] = u.Quantity(results[colname], unit=unit)
if results:
results["Wavelength"] = u.Quantity([results["Wavelength Min"], results["Wavelength Max"]]).T
results.remove_columns(("Wavelength Min", "Wavelength Max"))
return results
@classmethod
def from_results(cls, results: Iterable[Mapping[str, Any]], *, client: "DKISTDatasetClient") -> "DKISTQueryResponseTable":
"""
Construct the results table from the API results.
"""
new_results = defaultdict(list)
for result in results:
for key, value in result.items():
new_results[cls.key_map[key]].append(value)
data = cls._process_table(cls(new_results, client=client))
data = data._reorder_columns(cls._core_keys.default, remove_empty=True)
return data
class DKISTDatasetClient(BaseClient):
"""
Search DKIST datasets and retrieve metadata files describing them.
.. note::
This class is not intended to be used directly. You should use `~sunpy.net.Fido` to search and download data, see :ref:`sunpy:fido_guide`.
"""
_BASE_SEARCH_URL = os.environ.get("DKIST_DATASET_ENDPOINT", "https://api.dkistdc.nso.edu/datasets/v1")
_BASE_DOWNLOAD_URL = os.environ.get("DKIST_DOWNLOAD_ENDPOINT", "https://api.dkistdc.nso.edu/download")
def search(self, *args) -> DKISTQueryResponseTable:
"""
Search for datasets provided by the DKIST data centre.
"""
query = attr.and_(*args)
queries = walker.create(query)
results = []
for url_parameters in queries:
query_string = urllib.parse.urlencode(url_parameters)
full_url = f"{self._BASE_SEARCH_URL}?{query_string}"
data = urllib.request.urlopen(full_url)
data = json.loads(data.read())
results += data["searchResults"]
return DKISTQueryResponseTable.from_results(results, client=self)
@staticmethod
def _make_filename(path: os.PathLike, row: QueryResponseRow, resp: aiohttp.ClientResponse, url: str):
"""
Generate a filename for a file based on the Content Disposition header.
"""
# The fallback name is just the dataset id.
name = f"{row['Dataset ID']}.asdf"
if resp:
cdheader = resp.headers.get("Content-Disposition", None)
if cdheader:
_, params = cgi.parse_header(cdheader)
name = params.get('filename', "")
return str(path).format(file=name, **row.response_block_map)
@convert_row_to_table
def fetch(self, query_results: QueryResponseTable, *, path: os.PathLike = None, downloader: parfive.Downloader, **kwargs):
"""
Fetch asdf files describing the datasets.
Parameters
----------
query_results:
Results to download.
path : `str` or `pathlib.Path`, optional
Path to the download directory
downloader : `parfive.Downloader`
The download manager to use.
"""
# This logic is being upstreamed into Fido hopefully in 2.1rc4
if path is None:
path = Path(config.get('downloads', 'download_dir')) / '{file}' # pragma: no cover
elif isinstance(path, (str, os.PathLike)) and '{file}' not in str(path):
path = Path(path) / '{file}' # pragma: no cover
else:
path = Path(path) # pragma: no cover
path = path.expanduser()
if not len(query_results):
return
for row in query_results:
url = f"{self._BASE_DOWNLOAD_URL}/asdf?datasetId={row['Dataset ID']}"
# Set max_splits here as the metadata streamer doesn't like accept-range at the moment.
downloader.enqueue_file(url, filename=partial(self._make_filename, path, row), max_splits=1)
@classmethod
def _can_handle_query(cls, *query) -> bool:
# This enables the client to register what kind of searches it can
# handle, to prevent Fido using the incorrect client.
from sunpy.net import attrs as a
supported = set(walker.applymm.registry)
# This function is only called with arguments of the query where they are assumed to be ANDed.
supported.remove(attr.AttrAnd)
query_attrs = set(type(x) for x in query)
# The DKIST client only requires that one or more of the support attrs be present.
if not query_attrs.issubset(supported) or len(query_attrs.intersection(supported)) < 1:
return False
for x in query:
if isinstance(x, a.Instrument):
# TODO: Obviously "inst" shouldn't be here, but it's in the test data.
if x.value.lower() not in ("inst", "vbi", "vtf", "visp", "cryo-nirsp", "dl-nirsp"):
return False
if isinstance(x, a.Physobs):
if x.value.lower() not in ("stokes_parameters", "intensity", "spectral_axis", "temporal_axis"):
return False
if isinstance(x, a.Level):
if x.value not in (1, "1", "one"):
return False
return True
@classmethod
def _attrs_module(cls):
return 'dkist', 'dkist.net.attrs'
@classmethod
def register_values(cls):
"""
Known search values for DKIST data, currently manually specified.
"""
return {
sattrs.Provider: [("DKIST", "Data provided by the DKIST Data Center")],
# instrumentNames
sattrs.Instrument: [("VBI", "Visible Broadband Imager"),
("VISP", "Visible Spectro-Polarimeter"),
("VTF", "Visible Tunable Filter"),
("Cryo-NIRSP", "Cryogenic Near Infrared SpectroPolarimiter"),
("DL-NIRSP", "Diffraction-Limited Near-InfraRed Spectro-Polarimeter")],
# hasAllStokes
sattrs.Physobs: [("stokes_parameters", "Stokes I, Q, U and V are provided in the dataset"),
("intensity", "Only Stokes I is provided in the dataset.")],
# isEmbargoed
dattrs.Embargoed: [("True", "Data is subject to access restrictions."),
("False", "Data is not subject to access restrictions.")],
# targetTypes
#dattrs.TargetType: [], # This should be a controlled list.
# Completeness
sattrs.Level: [("1", "DKIST data calibrated to level 1.")],
}
|
from dymos.transcriptions.common.timeseries_output_comp import TimeseriesOutputCompBase
class SolveIVPTimeseriesOutputComp(TimeseriesOutputCompBase):
"""
Class definition for SolveIVPTimeseriesOutputComp.
Parameters
----------
**kwargs : dict
Dictionary of optional arguments.
"""
def initialize(self):
"""
Declare component options.
"""
super(SolveIVPTimeseriesOutputComp, self).initialize()
self.options.declare('output_nodes_per_seg', default=None, types=(int,), allow_none=True,
desc='If None, results are provided at the all nodes within each'
'segment. If an int (n) then results are provided at n '
'equally distributed points in time within each segment.')
def setup(self):
"""
Define the independent variables as output variables.
"""
grid_data = self.options['input_grid_data']
if self.options['output_nodes_per_seg'] is None:
self.num_nodes = grid_data.num_nodes
else:
self.num_nodes = grid_data.num_segments * self.options['output_nodes_per_seg']
for (name, kwargs) in self._timeseries_outputs:
units = kwargs['units']
desc = kwargs['units']
shape = kwargs['shape']
self._add_output_configure(name, units, shape, desc)
def _add_output_configure(self, name, units, shape, desc):
"""
Add a single timeseries output.
Can be called by parent groups in configure.
Parameters
----------
name : str
name of the variable in this component's namespace.
shape : int or tuple or list or None
Shape of this variable, only required if val is not an array.
Default is None.
units : str or None
Units in which the output variables will be provided to the component during execution.
Default is None, which means it has no units.
desc : str
description of the timeseries output variable.
"""
num_nodes = self.num_nodes
input_name = f'all_values:{name}'
self.add_input(input_name,
shape=(num_nodes,) + shape,
units=units, desc=desc)
output_name = name
self.add_output(output_name,
shape=(num_nodes,) + shape,
units=units, desc=desc)
self._vars[name] = (input_name, output_name, shape)
def compute(self, inputs, outputs):
"""
Compute component outputs.
Parameters
----------
inputs : `Vector`
`Vector` containing inputs.
outputs : `Vector`
`Vector` containing outputs.
"""
for (input_name, output_name, _) in self._vars.values():
outputs[output_name] = inputs[input_name]
|
#!/usr/bin/python2
import time
def format_time(t):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
if __name__ == "__main__":
print format_time(time.time()) |
# Copyright 2016-2021 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Operational tuning costs that prevent erratic dispatch in case of degeneracy.
Tuning costs can be applied to hydro up and down ramps (gen_hydro
and gen_hydro_must_take operational types) and to storage up-ramps (
stor operational type) in order to force smoother dispatch.
"""
import os
from pyomo.environ import Var, NonNegativeReals, Constraint, Expression
def add_model_components(m, d, scenario_directory, subproblem, stage):
# Tuning cost can be applied on exports from a load zone to prioritize
# meeting local load first
m.LZ_Exports = Var(
m.LOAD_ZONES, m.TMPS,
within=NonNegativeReals
)
def exports_tuning_cost_constraint_rule(mod, lz, tmp):
return mod.LZ_Exports[lz, tmp] \
>= \
mod.Transmission_from_Zone_MW[lz, tmp] \
- mod.Transmission_to_Zone_MW[lz, tmp]
m.Positive_Exports_Tuning_Cost_Constraint = Constraint(
m.LOAD_ZONES, m.TMPS,
rule=exports_tuning_cost_constraint_rule
)
def export_penalty_cost_rule(mod, lz, tmp):
return mod.LZ_Exports[lz, tmp] * mod.export_penalty_cost_per_mwh[lz]
m.Export_Penalty_Cost = Expression(
m.LOAD_ZONES, m.TMPS,
rule=export_penalty_cost_rule
)
|
from exception import InvalidPasswordException, InvalidUserNameException, NotLoggedInErrorException, \
NotPermittedErrorException, PermissionErrorException
from model import Authenticator, Authorizor
authenticator = Authenticator()
authorizor = Authorizor(authenticator)
authenticator.add_user("tuan", "tuan123haha")
authorizor.add_permission("test program")
authorizor.add_permission("change program")
authorizor.permit_user("test program", "tuan")
class AuthMenu:
def __init__(self):
self.username = None
self.menu_map = {
"login": self.login,
"test": self.test,
"change": self.change,
"quit": self.quit,
}
def login(self):
logged_in = False
while not logged_in:
username = input('username: ')
password = input('password: ')
try:
logged_in = authenticator.login(username, password)
except InvalidUserNameException:
print('Sorry! that username does not exists')
except InvalidPasswordException:
print('Sorry! incorrect password')
else:
self.username = username
def is_permitted(self, permission):
try:
authorizor.check_permission(permission, self.username)
except NotLoggedInErrorException as e:
print('{} is not logged'.format(e.username))
return False
except NotPermittedErrorException as e:
print('{} can not {}'.format(e.username, permission))
return False
except PermissionErrorException as e:
print(e.username)
return False
else:
return True
def test(self):
if self.is_permitted('test program'):
print('Testing program now ...')
def change(self):
if self.is_permitted('change program'):
print('Change program now ...')
def quit(self):
raise SystemExit()
def menu(self):
try:
answer = ''
while True:
print("""
please enter command:
\tlogin\tLogin
\ttest\tTest
\tchange\tChange
\tquit\tQuit""")
answer = input('enter command : ').lower()
try:
func = self.menu_map[answer]
except KeyError:
raise print('{} is not valid option'.format(answer))
else:
func()
finally:
print('Thank for testing auth')
AuthMenu().menu()
|
#by sandakelum priyamantha
from pytube import YouTube,Playlist
import os
from tkinter import *
import tkinter as tk
def make_folder():
home = os.path.expanduser("~")
path = os.path.join(home,"Videos")
path_to_y2v = os.path.join(path,"y2v")
try:
os.mkdir(path_to_y2v)
except:
# print("folder allready exists")
pass
return path_to_y2v
def get_list(link,path,text_obb):
text_obb.insert(END,"starting....\n")
text_obb.see(END)
path_ = make_folder()
path_ = os.path.join(path_,path)
try:
os.mkdir(path_)
except:
text_obb.insert(END,str(path_)+" is allready exists\n")
text_obb.see(END)
try:
list_ = Playlist(link).video_urls
# print(list_)
text_obb.insert(END,str(len(list_))+" videos in this list.\n")
text_obb.see(END)
except:
text_obb.insert(END,"[*]invalide link\n")
text_obb.see(END)
text_obb.insert(END,"\nDownloading path : %s\n"%(str(path_)))
text_obb.see(END)
for video in list_:
try:
# text_obb.insert(END,str(video)+"\n")
# text_obb.see(END)
opened_video = YouTube(video)
load_video = opened_video.streams.first()
file_size = str(int((load_video.filesize/1000)/1000))
text_obb.insert(END,str(opened_video.title)+" downloading...\n")
text_obb.see(END)
load_video.download(path_)
text_obb.insert(END,"Done..\n")
text_obb.see(END)
except:
text_obb.insert(END,"con't open this link"+str(video)+"\n")
text_obb.see(END)
def get_one(link,text_obb):
text_obb.insert(END,"starting...\n")
text_obb.see(END)
path = make_folder()
text_obb.insert(END,"\nDownloading path : %s\n"%(str(path)))
text_obb.see(END)
try:
opened_video = YouTube(link)
load_video = opened_video.streams.first()
file_size=str(int((load_video.filesize/1000)/1000))
text_obb.insert(END,str(opened_video.title)+" downloading...\n")
text_obb.see(END)
load_video.download(path)
text_obb.insert(END,"Done....\n ")
text_obb.see(END)
except:
text_obb.insert(END,"[*]invalide link.\n")
# run
make_folder() |
_base_ = [
"./_base_/models/faster_rcnn_r50_fpn.py",
"./_base_/datasets/visdrone_dataset.py",
"./_base_/default_runtime.py",
]
gpu_count = 1
batch_size = 2
epoch_count = 12
class_label_count = 12
model = {"roi_head": {"bbox_head": {"num_classes": class_label_count}}}
gpu_ids = range(gpu_count)
data = {"samples_per_gpu": batch_size, "workers_per_gpu": batch_size}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 26 11:55:32 2021
@author: Diloz
"""
import cv2
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy import optimize
import matplotlib.pylab as plt
from joblib import Parallel, delayed
from Detect_colorChecker import paralell_search
import plotting
# colrLable = ['blu', 'grn', 'red']
# lbl_illum = ['blu_L', 'grn_L', 'red_L']
eps = np.finfo(float).eps
#%% Obtain Checker
def getChecker(potCrp, cardBW, search_scale, search_degree, num_cores):
(H_card, W_card) = cardBW.shape[:2]
gray = cv2.cvtColor(potCrp, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(gray,30,30)
results = Parallel(n_jobs = num_cores, backend = "threading")(delayed(paralell_search)(search_scale, degree, edged, H_card, W_card, cardBW) for degree in search_degree)
maxVal_all, _, _, _ = zip(*results)
ind = np.argmax(maxVal_all)
maxVal, maxLoc, r, SCALE = results[ind]
deg = search_degree[ind]
(startX, startY) = (int(round(maxLoc[0]*r)), int(round(maxLoc[1]*r)))
(endX, endY) = (int(round((maxLoc[0] + W_card)*r)), int(round((maxLoc[1] + H_card) * r)))
if deg != 0:
checkerImg = ndimage.rotate(potCrp, deg)
else:
checkerImg = potCrp
return checkerImg[startY:endY,startX:endX,:],startY, endY, startX, endX, deg
#%% 3.1 function:Samples the 70% of the area of every patch on the Macbeth colorChercker
# Calculate the mean RGB color of each patch area
# Calculate the color difference between the sample and ideal value in the LAB color space
# the mode of the sample for the three image channels (RGB)
def cardSampler(colorChecker, checkIMG, colrLable, posn, rows, cols):
colorTable = colorChecker.loc[:, ['position', 'label', 'red', 'grn', 'blu']]
rowSiz = checkIMG.shape[0] /rows
colSiz = checkIMG.shape[1] /cols
winSiz = int((rowSiz * 0.70)/2)
cardImg = np.zeros(((2*4*winSiz), (2*6*winSiz), 3), dtype=np.uint8)
cardLabel = np.ones_like(cardImg)
for cntRow in range(rows):
crow = int((cntRow + 0.5) * rowSiz)
for cntCol in range(cols):
ccol = int((cntCol + 0.5) * colSiz)
img_patch = checkIMG[crow-winSiz : crow+winSiz, ccol-winSiz : ccol+winSiz]
cardImg[int(2*winSiz*cntRow): int(2*winSiz*(1+cntRow)),
int(2*winSiz*cntCol): int(2*winSiz*(1+cntCol))] = img_patch
indx = int((cntRow*6) + cntCol)
labl_patch = np.ones_like(img_patch)
img_LAB = cv2.cvtColor(img_patch, cv2.COLOR_BGR2LAB)
L = np.mean(img_LAB[:, :, 0])
A = np.mean(img_LAB[:, :, 1])
B = np.mean(img_LAB[:, :, 2])
diff = colorChecker.loc[indx, ['L', 'A', 'B']].values - [L, A, B]
colorTable.loc[indx, 'delta' + '_' + posn] = np.sqrt(np.sum(diff**2))
for cnt2 in range(len(colrLable)):
y0_patch = []
# y0_patch = colorChecker[cnt2, indx]
y0_patch = colorChecker.loc[indx, colrLable[cnt2]]
labl_patch[:, :, cnt2] = labl_patch[:, :, cnt2]*y0_patch
colorTable.loc[indx, colrLable[cnt2] + '_' + posn] = np.mean(img_patch[:, :, cnt2])
cardLabel[int(2*winSiz*cntRow): int(2*winSiz*(1+cntRow)),
int(2*winSiz*cntCol): int(2*winSiz*(1+cntCol))] = labl_patch
return cardImg, cardLabel, colorTable
#%%
def illum_check(colorChecker, df_main, imgSRC, check_rot, colrLable, imgName):
tableLable = colorChecker.loc[:, ['position', 'label', 'red', 'grn', 'blu']].copy()
colorTableAll = tableLable.copy()
# lbl_illum = [sub + "_L" for sub in colrLable]
height = imgSRC.shape[0]
width = imgSRC.shape[1]
padLeft = 20
imgPad = np.zeros((height + 100, width + 100 + padLeft, 3), np.uint8) # Image padding
imgPad[0:height, padLeft:width+padLeft] = imgSRC.copy()
# winCol, winRow = 150, 100
winRow, winCol = 70, 200
df = df_main.copy()
checkers = df.loc[(df['name']=='Checker')].reset_index(drop=True)
checkers_fit = checkers.copy()
for cnt2 in range(len(checkers)):
colorTable = pd.DataFrame([])
posn, name, top, left, wd, ht = checkers.loc[cnt2, ['position', 'name',
'top', 'left', 'width', 'height']].values
bottom = top + winRow + ht
right = left+winCol + wd
potCrp = imgPad[top : bottom, left : right]
sqrsiz = 48
cardBW = np.zeros(((sqrsiz * 4) +1 , (sqrsiz *6) + 1), dtype=np.uint8)
for row in range(4 + 1):
lineRow = int(sqrsiz * row)
cardBW[lineRow, :] = 255
for col in range(6 + 1):
lineCol = int(sqrsiz * col)
cardBW[:, lineCol] = 255
search_scale = np.linspace(0.8, 1.6, 5)
search_degree = np.linspace(-2.5,2.5,11)
num_cores = 1
checkerImg, _, _, _, _, _ = getChecker(potCrp, cardBW,
search_scale, search_degree, num_cores)
checkerImg = ndimage.rotate(checkerImg, check_rot)
cardImg, cardLabel, colorTable = cardSampler(colorChecker, checkerImg, colrLable, posn, rows=4, cols=6)
colorTableAll = pd.concat([colorTableAll, colorTable], axis=1)
colorTableAll = colorTableAll.loc[:, ~colorTableAll.columns.duplicated()]
illum_img = illum_greyWorld(colorTable, colrLable, posn)
# illum_fit, offset_fit = illum_fitting(cardImg, cardLabel, colrLable)
illum_fit = illum_fitting(cardImg, cardLabel, colrLable)
checkers.loc[checkers["position"] ==posn, colrLable] = illum_img
checkers_fit.loc[checkers_fit["position"] ==posn, colrLable] = illum_fit
# magClor = checkers_fit.loc[:, ['blu', 'grn', 'red']].sum(axis=1).values
# checkers_fit.loc[:, "magClor"] = magClor
# for cnt3 in colrLable:
# checkers_fit.loc[:, cnt3 + "_perc"] = checkers_fit.loc[:, cnt3] / checkers_fit.loc[:, "magClor"]
checkers_fit = addXYZ(checkers_fit)
return checkers, checkers_fit, colorTableAll
#%%
def illum_greyWorld(colorTable, colrLable, posn):
BGR_labl = [sub + "_" + posn for sub in colrLable]
greyPatch_img = colorTable.loc[colorTable['position'] >=19, BGR_labl]
illum_img = greyPatch_img.mean().values
return illum_img
#%%
def illum_fitting(imgColr, lablColr, colrLable):
cardImg = imgColr.copy()
cardLabel = lablColr.copy()
illum = []
for cnt in range(len(colrLable)):
illuInitial = (1.0, 0.0)
y0_card = cardLabel[:,:, cnt].flatten()
y1_card = cardImg[:,:, cnt].flatten()
indx = np.argsort(y0_card)
y0_card = y0_card[indx]
y1_card = y1_card[indx]
# Estimate the illuminant of the scene
ErrorFunc = lambda tpl,y0,y1 : (tpl[0]/255) * y0 - y1
l1Final, success = optimize.leastsq(ErrorFunc,illuInitial, args=(y0_card,y1_card), maxfev = 20000)
illum.append(l1Final[0])
return illum
#%%
def addXYZ(dfColor):
df = dfColor.copy()
M = np.array([[0.4124564, 0.3575761, 0.1804375],
[0.2126729, 0.7151522, 0.0721750],
[0.0193339, 0.1191920, 0.9503041]])
for sq in range(len(df)):
RGB_val = df.loc[sq, ['red', 'grn', 'blu']].values
df.loc[sq, ['X', 'Y', 'Z']] = np.matmul(M, RGB_val)
mag = df.loc[sq, ['X', 'Y', 'Z']].sum()
if mag == 0: mag = eps
df.loc[sq, ['x', 'y', 'z']] = df.loc[sq, ['X', 'Y', 'Z']].values/mag
# df.loc[sq, ['X', 'Y', 'Z']] = cv2.cvtColor( np.uint8([[RGB_val]] ), cv2.COLOR_RGB2XYZ)[0][0]
df.loc[sq, ['L', 'A', 'B']] = cv2.cvtColor( np.uint8([[RGB_val]] ), cv2.COLOR_RGB2LAB)[0][0]
# plt.scatter(df.loc[:, 'x'], df.loc[:, 'y'])
# plt.xlim(0, 0.8)
# plt.ylim(0, 0.8)
# plt.show()
return df |
# These classes implement a doctest runner plugin for nose, a "known failure"
# error class, and a customized TestProgram for NumPy.
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
import os
import doctest
import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src
import numpy
from nosetester import get_package_name
import inspect
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, http://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
#print '_fm C1' # dbg
return True
elif inspect.isfunction(object):
#print '_fm C2' # dbg
return module.__dict__ is object.func_globals
elif inspect.isbuiltin(object):
#print '_fm C2-1' # dbg
return module.__name__ == object.__module__
elif inspect.isclass(object):
#print '_fm C3' # dbg
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
#print '_fm C3-1' # dbg
return module.__name__ == object.im_class.__module__
elif inspect.getmodule(object) is not None:
#print '_fm C4' # dbg
#print 'C4 mod',module,'obj',object # dbg
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
#print '_fm C5' # dbg
return module.__name__ == object.__module__
elif isinstance(object, property):
#print '_fm C6' # dbg
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
doctest.DocTestFinder._find(self,tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import isroutine, isclass, ismodule, isfunction, \
ismethod
# Look for tests in a module's contained objects.
if ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname1 = '%s.%s' % (name, valname)
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val) ):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
#print 'RECURSE into class:',obj # dbg
for valname, val in obj.__dict__.items():
#valname1 = '%s.%s' % (name, valname) # dbg
#print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
ismethod(val) or isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# second-chance checker; if the default comparison doesn't
# pass, then see if the expected output string contains flags that
# tell us to ignore the output
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret:
if "#random" in want:
return True
# it would be useful to normalize endianness so that
# bigendian machines don't fail all the tests (and there are
# actually some bigendian examples in the doctests). Let's try
# making them all little endian
got = got.replace("'>","'<")
want= want.replace("'>","'<")
# try to normalize out 32 and 64 bit default int sizes
for sz in [4,8]:
got = got.replace("'<i%d'"%sz,"int")
want= want.replace("'<i%d'"%sz,"int")
ret = doctest.OutputChecker.check_output(self, want,
got, optionflags)
return ret
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
class NumpyDocTestCase(npd.DocTestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
doctest.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
print_state = numpy.get_printoptions()
class NumpyDoctest(npd.Doctest):
name = 'numpydoctest' # call nosetests with --with-numpydoctest
score = 1000 # load late, after doctest builtin
# always use whitespace and ellipsis options for doctests
doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
# files that should be ignored for doctests
doctest_ignore = ['generate_numpy_api.py',
'scons_support.py',
'setupscons.py',
'setup.py']
# Custom classes; class variables to allow subclassing
doctest_case_class = NumpyDocTestCase
out_check_class = NumpyOutputChecker
test_finder_class = NumpyDocTestFinder
# Don't use the standard doctest option handler; hard-code the option values
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
# Test doctests in 'test' files / directories. Standard plugin default
# is False
self.doctest_tests = True
# Variable name; if defined, doctest results stored in this variable in
# the top-level namespace. None is the standard default
self.doctest_result_var = None
def configure(self, options, config):
# parent method sets enabled flag from command line --with-numpydoctest
Plugin.configure(self, options, config)
self.finder = self.test_finder_class()
self.parser = doctest.DocTestParser()
if self.enabled:
# Pull standard doctest out of plugin list; there's no reason to run
# both. In practice the Unplugger plugin above would cover us when
# run from a standard numpy.test() call; this is just in case
# someone wants to run our plugin outside the numpy.test() machinery
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != 'doctest']
def set_test_context(self, test):
""" Configure `test` object to set test context
We set the numpy / scipy standard doctest namespace
Parameters
----------
test : test object
with ``globs`` dictionary defining namespace
Returns
-------
None
Notes
-----
`test` object modified in place
"""
# set the namespace for tests
pkg_name = get_package_name(os.path.dirname(test.filename))
# Each doctest should execute in an environment equivalent to
# starting Python and executing "import numpy as np", and,
# for SciPy packages, an additional import of the local
# package (so that scipy.linalg.basic.py's doctests have an
# implicit "from scipy import linalg" as well.
#
# Note: __file__ allows the doctest in NoseTester to run
# without producing an error
test.globs = {'__builtins__':__builtins__,
'__file__':'__main__',
'__name__':'__main__',
'np':numpy}
# add appropriate scipy import for SciPy tests
if 'scipy' in pkg_name:
p = pkg_name.split('.')
p2 = p[-1]
test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
# Override test loading to customize test context (with set_test_context
# method), set standard docstring options, and install our own test output
# checker
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
npd.log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
# nose allows module.__test__ = False; doctest does not and
# throws AttributeError
return
if not tests:
return
tests.sort()
module_file = src(module.__file__)
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
# Set test namespace; test altered in place
self.set_test_context(test)
yield self.doctest_case_class(test,
optionflags=self.doctest_optflags,
checker=self.out_check_class(),
result_var=self.doctest_result_var)
# Add an afterContext method to nose.plugins.doctests.Doctest in order
# to restore print options to the original state after each doctest
def afterContext(self):
numpy.set_printoptions(**print_state)
# Ignore NumPy-specific build files that shouldn't be searched for tests
def wantFile(self, file):
bn = os.path.basename(file)
if bn in self.doctest_ignore:
return False
return npd.Doctest.wantFile(self, file)
class Unplugger(object):
""" Nose plugin to remove named plugin late in loading
By default it removes the "doctest" plugin.
"""
name = 'unplugger'
enabled = True # always enabled
score = 4000 # load late in order to be after builtins
def __init__(self, to_unplug='doctest'):
self.to_unplug = to_unplug
def options(self, parser, env):
pass
def configure(self, options, config):
# Pull named plugin out of plugins list
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != self.to_unplug]
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
class KnownFailure(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
knownfail = ErrorClass(KnownFailureTest,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailureTest '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
# Class allows us to save the results of the tests in runTests - see runTests
# method docstring for details
class NumpyTestProgram(nose.core.TestProgram):
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and
sets self.success to the same value.
Because nose currently discards the test result object, but we need
to return it to the user, override TestProgram.runTests to retain
the result
"""
if self.testRunner is None:
self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
self.result = self.testRunner.run(self.test)
self.success = self.result.wasSuccessful()
return self.success
|
import numpy as np
import scipy.io as sio
np.random.seed(0)
VGG_MEAN = [103.939, 116.779, 123.68]
import os
import tensorflow as tf
import cv2
import json
from collections import namedtuple
from tqdm import tqdm
def read_mat(path):
return np.load(path)
def write_mat(path, m):
np.save(path, m)
def read_ids(path):
return [line.rstrip('\n') for line in open(path)]
class Batch_Feeder:
def __init__(self, dataset_path, indices, subset, batchSize, padWidth=None, padHeight=None, flip=False, keepEmpty=True, train=True, img_shape=(384,384)):
assert subset in ['train', 'val', 'test'], "wrong name of subset"
self._epochs_completed = 0
self._index_in_epoch = 0
self._dataset_path = dataset_path
self._indices = indices
self._subset = subset
self._train = train
self._batchSize = batchSize
self._padWidth = padWidth
self._padHeight = padHeight
self._flip = flip
self._keepEmpty = keepEmpty
self.img_shape = img_shape
mask_path = os.path.join(self._dataset_path, 'polygons.json')
with open(mask_path) as f:
self.polygons = json.load(f)
# TO DO: implement shuffling
# TO DO: support batch wise inference
def set_paths(self):
self.root = os.path.join(self._dataset_path, self._subset)
print('scanning {}'.format(self.root))
self._paths = []
imgs = sorted([i for i in os.listdir(self.root) if i.endswith('.png')])
gt_DT = sorted([i for i in os.listdir(self.root) if i.endswith('.npy') and 'DT' in i])
gt_angle = sorted([i for i in os.listdir(self.root) if i.endswith('.npy') and 'angle' in i])
# if self._train:
# TO DO: support batch wise inference
entry = namedtuple("gt", "index img angle dt")
for index, (img, angle, dt) in enumerate(zip(imgs, gt_angle, gt_DT)):
self._paths.append(entry(index, img, angle, dt))
self.shuffle()
# else:
# for id in idList:
# self._paths.append([id, imageDir + '/' + id + '_leftImg8bit.png',
# ssDir + '/' + id + '_unified_ss.mat'])
self._numData = len(self._paths)
if self._numData < self._batchSize:
self._batchSize = self._numData
def shuffle(self):
np.random.shuffle(self._paths)
def next_batch(self):
idBatch = []
dirBatch = []
# imageBatch = np.zeros((self._batchSize, self.img_shape[0], self.img_shape[1], 3), dtype=np.int32)
# gtBatch = np.zeros((self._batchSize, self.img_shape[0], self.img_shape[1]), dtype=np.float32)
# angleBatch = np.zeros((self._batchSize, self.img_shape[0], self.img_shape[1], 2), dtype=np.float32)
# ssBatch = np.zeros((self._batchSize, self.img_shape[0], self.img_shape[1]))
dirBatch = np.zeros((self._batchSize, self.img_shape[0], self.img_shape[1], 2), dtype=np.float32)
gtBatch = np.zeros((self._batchSize, self.img_shape[0], self.img_shape[1]))
ssBatch = np.zeros((self._batchSize, self.img_shape[0], self.img_shape[1]))
tmp = 0
if self._train:
while(len(idBatch) < self._batchSize):
current_tuple = self._paths[self._index_in_epoch]
rgb = self.load_rgb(os.path.join(self.root, current_tuple.img))
#angle = self.load_npy(os.path.join(self.root, current_tuple.angle))
dt = self.load_npy(os.path.join(self.root, current_tuple.dt))
# not using the calculated gt, check pount 3 of the paper, first equation
t, t_norm = np.gradient(dt)
unit_vector_angle = np.stack([t, t_norm], axis=-1)
polygons = self.polygons[current_tuple.img]['polygons']
mask = self.load_mask(rgb, polygons)
dirBatch[tmp] = unit_vector_angle
gtBatch[tmp] = dt
ssBatch[tmp] = mask
idBatch.append(current_tuple.index)
tmp+=1
if tmp==self._batchSize-1:
tmp=0
self._index_in_epoch += 1
if self._index_in_epoch == self._numData:
self._index_in_epoch = 0
self.shuffle()
if self._flip and np.random.uniform() > 0.5:
for i in range(len(imageBatch)):
for j in range(3):
imageBatch[i,:,:,j] = np.fliplr(imageBatch[i,:,:,j])
weightBatch[i] = np.fliplr(weightBatch[i])
ssBatch[i] = np.fliplr(ssBatch[i])
ssMaskBatch[i] = np.fliplr(ssMaskBatch[i])
for j in range(2):
gtBatch[i,:,:,j] = np.fliplr(gtBatch[i,:,:,j])
gtBatch[i,:,:,0] = -1 * gtBatch[i,:,:,0]
return dirBatch, gtBatch, ssBatch
else:
pass
self._index_in_epoch += self._batchSize
return imageBatch, ssBatch
def total_samples(self):
return self._numData
def image_scaling(self, rgb_in):
if rgb_in.dtype == np.float32:
rgb_in = rgb_in*255
elif rgb_in.dtype == np.uint8:
rgb_in = rgb_in.astype(np.float32)
# VGG16 was trained using opencv which reads images as BGR, but skimage reads images as RGB
rgb_out = np.zeros(rgb_in.shape).astype(np.float32)
rgb_out[:,:,0] = rgb_in[:,:,2] - VGG_MEAN[2]
rgb_out[:,:,1] = rgb_in[:,:,1] - VGG_MEAN[1]
rgb_out[:,:,2] = rgb_in[:,:,0] - VGG_MEAN[0]
return rgb_out
def pad(self, data):
if self._padHeight and self._padWidth:
if data.ndim == 3:
npad = ((0,self._padHeight-data.shape[0]),(0,self._padWidth-data.shape[1]),(0,0))
elif data.ndim == 2:
npad = ((0, self._padHeight - data.shape[0]), (0, self._padWidth - data.shape[1]))
padData = np.pad(data, npad, mode='constant', constant_values=0)
else:
padData = data
return padData
@staticmethod
def load_rgb(path):
return cv2.cvtColor(cv2.imread(path), cv2.COLOR_RGB2BGR)
@staticmethod
def load_mask(img, polygons):
"""
Transforms polygons of a single image into a 2D binary numpy array
:param img: just to get the corresponding shape of the output array
:param polygons: - dict
:return mask: numpy array with drawn over and touching polygons
"""
mask = np.zeros([img.shape[0], img.shape[1]], dtype=np.uint8)
for curr_pol in polygons:
cv2.fillPoly(mask, [np.array(curr_pol, 'int32')], 255)
return mask
@staticmethod
def load_npy(path):
with open(path, 'rb') as f:
depth = np.load(f)
return depth
|
from launcher import Task
import json
import os
import argparse
import pprint
class ConfigureTask(Task):
name = 'configure'
def __init__(self, args):
self.args = args
self.data_dir = os.path.join(os.path.abspath(args["data_dir"]),
args["dataset"])
self.output_dir = os.path.join(os.path.abspath(args["output_dir"]),
args["dataset"])
@staticmethod
def arguments(parser) -> None:
command_parser = parser.add_parser(ConfigureTask.name,
help='Construct the model.')
command_parser.add_argument(
"dataset",
type=str,
help='the dataset to use when running the command.')
@staticmethod
def arguments_to_cli(args) -> str:
return args["dataset"]
def execute(self) -> None:
from segmenter.config import config_from_env
self.job_config, self.job_hash = config_from_env(self.data_dir)
pprint.pprint(self.job_config)
print(self.job_hash)
os.makedirs(os.path.join(self.output_dir, self.job_hash),
exist_ok=True)
config_location = os.path.join(self.output_dir, self.job_hash,
"config.json")
with open(config_location, "w") as config_file:
json.dump(self.job_config, config_file)
tasks = [ConfigureTask] |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Alstec.24xx.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
"""
"""
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "Alstec.24xx.get_version"
interface = IGetVersion
cache = True
rx_ver = re.compile(
r"^System Description(\s+|\.+)\s*(:|)\s*(?P<platform>\S+).+?"
r"^Bootloader\sVersion\s+:\s+(CFE |U-Boot )?(?P<bootprom>\S+)(\s\(.+\)|)\s*\n"
r"^OS Version.+\n"
r"^Software version\s+:\s+(?P<version>\S+)\s*\n"
r"Software type\s+:\s+(?P<fwt>\S+)\s*\n",
re.MULTILINE,
)
rx_serial = re.compile("Serial Number\.+ (?P<serial>\S+)")
rx_platform = re.compile("Machine Model\.+ (?P<platform>\S+)")
rx_version = re.compile("Software Version\.+ (?P<version>\S+)")
rx_ver2 = re.compile(
r"^Machine Model\.+\s*(?P<platform>\S+)"
r"^Serial Number\.+\s*(?P<serial>\S+).*"
r"^Software Version\.+\s*(?P<version>\S+)"
r"^Operating System\.+\s*(?P<os_version>.+)"
r"^Network",
re.MULTILINE | re.DOTALL,
)
def execute(self):
v = self.cli("show sysinfo", cached=True)
match = self.rx_ver.search(v)
if match:
r = {
"vendor": "Alstec",
"platform": match.group("platform"),
"version": match.group("version"),
"attributes": {
"Boot PROM": match.group("bootprom"),
"Firmware Type": match.group("fwt"),
},
}
v = self.cli("show hardware", cached=True)
match = self.rx_serial.search(v)
if match:
r["attributes"]["Serial Number"] = match.group("serial")
else:
v = self.cli("show hardware", cached=True)
r = {
"vendor": "Alstec",
"platform": self.rx_platform.search(v).group("platform"),
"version": self.rx_version.search(v).group("version"),
}
match = self.rx_serial.search(v)
if match:
r["attributes"] = {}
r["attributes"]["Serial Number"] = match.group("serial")
return r
|
# -*- coding: utf-8 -*-
import pytest
import sys
import random
from sfm.timer import DateTimeTimer
import mongoengine
from mongoengine_mate import ExtendedDocument
py_ver = "%s.%s" % (sys.version_info.major, sys.version_info.minor)
user_col_name = "user_%s" % py_ver
class User(ExtendedDocument):
_id = mongoengine.IntField(primary_key=True)
name = mongoengine.StringField()
dob = mongoengine.StringField()
meta = {
"collection": user_col_name
}
def test_smart_update_correctness(connect):
# single document
User.objects.delete()
User(_id=1, name="Alice").save()
User.smart_update(User(_id=2, name="Bob"), upsert=False)
assert User.objects.count() == 1
User.smart_update(User(_id=2, name="Bob"), upsert=True)
assert User.objects.count() == 2
User.smart_update(User(_id=1, dob="1990-01-01"))
assert User.objects(_id=1).get().dob == "1990-01-01"
# batch update
User.objects.delete()
User.objects.insert(User(_id=2, name="Bob", dob="1990-01-01"))
# upsert = False
data = [
User(_id=1, name="Alice"),
User(_id=2, name="Bryan"),
User(_id=3, name="Cathy"),
]
User.smart_update(data, upsert=False)
assert User.objects.count() == 1
assert [
obj.to_dict()
for obj in User.objects()
] == [
{"_id": 2, "name": "Bryan", "dob": "1990-01-01"},
]
# upsert = True
data = [
User(_id=1, name="Alice"),
User(_id=2, name="Bruce"),
User(_id=3, name="Cathy"),
]
User.smart_update(data, upsert=True)
assert User.objects.count() == 3
assert [
obj.to_dict()
for obj in User.objects()
] == [
{"_id": 2, "name": "Bruce", "dob": "1990-01-01"},
{"_id": 1, "name": "Alice", "dob": None},
{"_id": 3, "name": "Cathy", "dob": None},
]
def test_smart_update_performance(connect):
n_total = 100
n_breaker = 25
total_user_ids = list(range(1, 1 + n_total))
random.shuffle(total_user_ids)
breaker_user_ids = total_user_ids[:n_breaker]
User.objects.delete()
total_users = [User(_id=_id, name="Bob") for _id in total_user_ids]
breaker_users = [User(_id=_id, name="Alice") for _id in breaker_user_ids]
User.smart_insert(breaker_users)
assert User.objects.count() == n_breaker
with DateTimeTimer(title="just upsert"):
User.smart_update(total_users, upsert=True, _insert_after_update=False)
assert User.objects.count() == n_total
# insert_after_update strategy
User.objects.delete()
total_users = [User(_id=_id, name="Bob") for _id in total_user_ids]
breaker_users = [User(_id=_id, name="Alice") for _id in breaker_user_ids]
User.smart_insert(breaker_users)
assert User.objects.count() == n_breaker
with DateTimeTimer(title="insert after update"):
User.smart_update(total_users, upsert=True, _insert_after_update=True)
assert User.objects.count() == n_total
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸 (Blueking) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
# Generated by Django 2.2.6 on 2020-12-10 08:40
import apps.utils.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="ConfigInstance",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("config_version_id", models.IntegerField(db_index=True, verbose_name="模板版本ID")),
("config_template_id", models.IntegerField(db_index=True, verbose_name="模板ID")),
("bk_process_id", models.IntegerField(db_index=True, verbose_name="进程实例ID")),
("inst_id", models.IntegerField(db_index=True, verbose_name="实例ID")),
("content", apps.utils.models.CompressedTextField(verbose_name="实例内容")),
("path", models.CharField(max_length=256, verbose_name="文件绝对路径")),
("is_latest", models.BooleanField(db_index=True, default=True, verbose_name="是否最新")),
("is_released", models.BooleanField(db_index=True, default=False, verbose_name="是否已发布")),
("sha256", models.CharField(max_length=64, verbose_name="SHA256")),
("expression", models.CharField(max_length=256, verbose_name="实例表达式")),
("created_at", models.DateTimeField(auto_now_add=True, verbose_name="创建时间")),
("created_by", models.CharField(default="", max_length=32, verbose_name="创建者")),
],
options={"verbose_name": "配置实例(已渲染)", "verbose_name_plural": "配置实例(已渲染)", "ordering": ["-id"]},
),
migrations.CreateModel(
name="ConfigTemplateBindingRelationship",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True, verbose_name="创建时间")),
("created_by", models.CharField(default="", max_length=32, verbose_name="创建者")),
("updated_at", models.DateTimeField(auto_now=True, null=True, verbose_name="更新时间")),
("updated_by", models.CharField(blank=True, default="", max_length=32, verbose_name="修改者")),
("bk_biz_id", models.IntegerField(db_index=True, verbose_name="业务ID")),
("config_template_id", models.IntegerField(db_index=True, verbose_name="模板ID")),
(
"process_object_type",
models.CharField(
choices=[("INSTANCE", "进程实例"), ("TEMPLATE", "进程模板")],
db_index=True,
max_length=16,
verbose_name="进程对象类型",
),
),
("process_object_id", models.IntegerField(db_index=True, verbose_name="进程实例ID/进程模板ID")),
],
options={"verbose_name": "配置模板与进程的绑定关系", "verbose_name_plural": "配置模板与进程的绑定关系"},
),
migrations.CreateModel(
name="ConfigTemplateVersion",
fields=[
("created_at", models.DateTimeField(auto_now_add=True, verbose_name="创建时间")),
("created_by", models.CharField(default="", max_length=32, verbose_name="创建者")),
("updated_at", models.DateTimeField(auto_now=True, null=True, verbose_name="更新时间")),
("updated_by", models.CharField(blank=True, default="", max_length=32, verbose_name="修改者")),
("config_version_id", models.AutoField(primary_key=True, serialize=False, verbose_name="模板版本ID")),
("config_template_id", models.IntegerField(db_index=True, verbose_name="模板ID")),
("description", models.CharField(blank=True, default="", max_length=256, verbose_name="版本描述")),
("content", models.TextField(blank=True, default="", verbose_name="配置模板内容")),
("is_draft", models.BooleanField(db_index=True, default=True, verbose_name="是否草稿")),
("is_active", models.BooleanField(db_index=True, default=False, verbose_name="是否可用")),
(
"file_format",
models.CharField(blank=True, default=None, max_length=16, null=True, verbose_name="文件风格"),
),
],
options={"verbose_name": "配置模板版本", "verbose_name_plural": "配置模板版本", "ordering": ["-config_version_id"]},
),
migrations.CreateModel(
name="Job",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("bk_biz_id", models.IntegerField(db_index=True, verbose_name="业务ID")),
("expression", models.CharField(default="待完善", max_length=256, null=True, verbose_name="实例表达式")),
("scope", models.JSONField(default=dict, verbose_name="范围")),
(
"job_object",
models.CharField(
choices=[("configfile", "配置文件"), ("process", "进程")],
db_index=True,
max_length=16,
verbose_name="任务对象",
),
),
(
"job_action",
models.CharField(
choices=[
("generate", "生成"),
("release", "下发"),
("start", "启动"),
("stop", "停止"),
("restart", "重启"),
("reload", "重载"),
("force_stop", "强制停止"),
("set_auto", "托管"),
("unset_auto", "取消托管"),
],
db_index=True,
max_length=16,
verbose_name="动作",
),
),
(
"status",
models.CharField(
choices=[("pending", "等待中"), ("running", "执行中"), ("succeeded", "执行成功"), ("failed", "执行失败")],
db_index=True,
default="pending",
max_length=16,
verbose_name="任务状态",
),
),
("created_by", models.CharField(db_index=True, max_length=64, verbose_name="执行账户")),
("is_ready", models.BooleanField(default=False, verbose_name="是否已准备(子任务是否全部创建完成)")),
("start_time", models.DateTimeField(auto_now_add=True, db_index=True, verbose_name="开始时间")),
("end_time", models.DateTimeField(null=True, verbose_name="结束时间")),
("pipeline_id", models.CharField(db_index=True, max_length=32, verbose_name="PIPELINE ID")),
("extra_data", models.JSONField(default=dict, verbose_name="额外数据")),
],
options={"verbose_name": "任务历史", "verbose_name_plural": "任务历史", "ordering": ("-id",)},
),
migrations.CreateModel(
name="JobProcInstStatusStatistics",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("job_id", models.IntegerField(db_index=True, verbose_name="任务ID")),
("bk_process_id", models.IntegerField(db_index=True, verbose_name="进程ID")),
("proc_inst_total_num", models.IntegerField(verbose_name="进程实例数量")),
("proc_inst_terminated_num", models.IntegerField(default=0, verbose_name="进程终止数量")),
("proc_inst_running_num", models.IntegerField(default=0, verbose_name="进程运行数量")),
("proc_inst_auto_num", models.IntegerField(default=0, verbose_name="进程托管数量")),
("proc_inst_noauto_num", models.IntegerField(default=0, verbose_name="进程未托管数量")),
],
options={"verbose_name": "任务进程实例状态统计", "verbose_name_plural": "任务进程实例状态统计"},
),
migrations.CreateModel(
name="JobTask",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("job_id", models.IntegerField(db_index=True, verbose_name="任务ID")),
("bk_process_id", models.IntegerField(db_index=True, verbose_name="进程ID")),
(
"status",
models.CharField(
choices=[("pending", "等待中"), ("running", "执行中"), ("succeeded", "执行成功"), ("failed", "执行失败")],
db_index=True,
default="pending",
max_length=16,
verbose_name="任务状态",
),
),
("start_time", models.DateTimeField(auto_now_add=True, db_index=True, verbose_name="开始时间")),
("end_time", models.DateTimeField(null=True, verbose_name="结束时间")),
("pipeline_id", models.CharField(db_index=True, max_length=32, verbose_name="PIPELINE ID")),
("extra_data", models.JSONField(default=dict, verbose_name="额外数据")),
],
options={"verbose_name": "任务详情", "verbose_name_plural": "任务详情"},
),
migrations.CreateModel(
name="Process",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("bk_biz_id", models.IntegerField(db_index=True, verbose_name="业务ID")),
("expression", models.CharField(db_index=True, default="待完善", max_length=256, verbose_name="实例表达式")),
("bk_host_innerip", models.GenericIPAddressField(db_index=True, verbose_name="主机IP")),
("bk_cloud_id", models.IntegerField(db_index=True, verbose_name="云区域ID")),
(
"bk_set_env",
models.CharField(
choices=[("0", "全部"), ("1", "测试"), ("2", "体验"), ("3", "正式")],
db_index=True,
max_length=4,
verbose_name="集群环境类型",
),
),
("bk_set_id", models.IntegerField(db_index=True, verbose_name="集群ID")),
("bk_module_id", models.IntegerField(db_index=True, verbose_name="模块ID")),
(
"service_template_id",
models.IntegerField(blank=True, db_index=True, null=True, verbose_name="服务模板ID"),
),
("service_instance_id", models.IntegerField(db_index=True, verbose_name="服务实例ID")),
(
"bk_process_name",
models.CharField(blank=True, db_index=True, max_length=64, null=True, verbose_name="进程名称"),
),
("bk_process_id", models.IntegerField(db_index=True, verbose_name="进程ID")),
("process_template_id", models.IntegerField(db_index=True, verbose_name="进程模板ID")),
("process_status", models.IntegerField(db_index=True, default=0, verbose_name="进程状态")),
("is_auto", models.BooleanField(db_index=True, default=False, verbose_name="托管状态")),
],
options={"verbose_name": "业务进程缓存", "verbose_name_plural": "业务进程缓存"},
),
migrations.CreateModel(
name="ProcessInst",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("bk_biz_id", models.IntegerField(db_index=True, verbose_name="业务ID")),
("bk_host_num", models.IntegerField(db_index=True, verbose_name="主机编号")),
("bk_host_innerip", models.GenericIPAddressField(db_index=True, verbose_name="主机IP")),
("bk_cloud_id", models.IntegerField(db_index=True, verbose_name="云区域ID")),
("bk_process_id", models.IntegerField(db_index=True, verbose_name="进程ID")),
("bk_module_id", models.IntegerField(db_index=True, verbose_name="模块ID")),
("bk_process_name", models.CharField(db_index=True, max_length=64, verbose_name="进程名称")),
("inst_id", models.IntegerField(db_index=True, verbose_name="InstID")),
("process_status", models.IntegerField(db_index=True, default=0, verbose_name="进程状态")),
("is_auto", models.BooleanField(db_index=True, default=False, verbose_name="托管状态")),
("local_inst_id", models.IntegerField(db_index=True, verbose_name="LocalInstID")),
(
"local_inst_id_uniq_key",
models.CharField(db_index=True, default="", max_length=256, verbose_name="进程实例唯一标识"),
),
("proc_num", models.IntegerField(default=1, verbose_name="启动数量")),
],
options={
"verbose_name": "InstID",
"verbose_name_plural": "InstID",
"unique_together": {
("bk_host_innerip", "bk_cloud_id", "bk_process_name", "local_inst_id"),
("bk_module_id", "bk_process_name", "inst_id"),
},
},
),
migrations.CreateModel(
name="ConfigTemplate",
fields=[
("created_at", models.DateTimeField(auto_now_add=True, verbose_name="创建时间")),
("created_by", models.CharField(default="", max_length=32, verbose_name="创建者")),
("updated_at", models.DateTimeField(auto_now=True, null=True, verbose_name="更新时间")),
("updated_by", models.CharField(blank=True, default="", max_length=32, verbose_name="修改者")),
("config_template_id", models.AutoField(primary_key=True, serialize=False, verbose_name="模板ID")),
("bk_biz_id", models.IntegerField(db_index=True, verbose_name="业务ID")),
("template_name", models.CharField(db_index=True, max_length=32, verbose_name="模板名称")),
("file_name", models.CharField(max_length=64, verbose_name="文件名称")),
("abs_path", models.CharField(max_length=256, verbose_name="文件绝对路径")),
("owner", models.CharField(max_length=32, verbose_name="文件所有者")),
("group", models.CharField(max_length=32, verbose_name="文件归属群组")),
("filemode", models.CharField(max_length=8, verbose_name="文件权限")),
(
"line_separator",
models.CharField(
choices=[("CR", "MacOs(\\r)"), ("LF", "Unix(\\n)"), ("CRLF", "Windows(\\r\\n)")],
max_length=8,
verbose_name="换行符格式",
),
),
],
options={
"verbose_name": "配置模板",
"verbose_name_plural": "配置模板",
"ordering": ["-config_template_id"],
"unique_together": {("bk_biz_id", "template_name")},
},
),
migrations.CreateModel(
name="BscpConfig",
fields=[
("app_id", models.CharField(db_index=True, max_length=64, verbose_name="BSCP应用ID")),
("config_template_id", models.IntegerField(db_index=True, verbose_name="模板ID")),
("cfg_id", models.CharField(max_length=64, primary_key=True, serialize=False, verbose_name="BSCP配置ID")),
],
options={
"verbose_name": "BSCP配置",
"verbose_name_plural": "BSCP配置",
"db_table": "bscp_config",
"unique_together": {("app_id", "config_template_id")},
},
),
migrations.CreateModel(
name="BscpApplication",
fields=[
("biz_id", models.IntegerField(db_index=True, verbose_name="业务ID")),
(
"biz_name",
models.CharField(max_length=64, primary_key=True, serialize=False, verbose_name="BSCP业务ID"),
),
("app_id", models.CharField(db_index=True, max_length=64, verbose_name="BSCP应用ID")),
(
"process_object_type",
models.CharField(
choices=[("INSTANCE", "进程实例"), ("TEMPLATE", "进程模板")],
db_index=True,
max_length=16,
verbose_name="进程对象类型",
),
),
("process_object_id", models.IntegerField(db_index=True, verbose_name="进程实例ID/进程模板ID")),
],
options={
"verbose_name": "BSCP应用",
"verbose_name_plural": "BSCP应用",
"db_table": "bscp_application",
"unique_together": {("app_id", "process_object_type", "process_object_id")},
},
),
]
|
import hassapi as hass
import globals
#
# App to send notification when a sensor changes state
#
# Args:
#
# sensor: sensor to monitor e.g. sensor.upstairs_smoke
# idle_state - normal state of sensor e.g. Idle
# turn_on - scene or device to activate when sensor changes e.g. scene.house_bright
# Release Notes
#
# Version 1.0:
# Initial Version
class SensorNotification(hass.Hass):
def initialize(self):
if "sensor" in self.args:
for sensor in self.split_device_list(self.args["sensor"]):
self.listen_state(self.state_change, sensor)
def state_change(self, entity, attribute, old, new, kwargs):
if new != "":
if "input_select" in self.args:
valid_modes = self.split_device_list(self.args["input_select"])
select = valid_modes.pop(0)
is_state = self.get_state(select)
else:
is_state = None
valid_modes = ()
self.log("{} changed to {}".format(self.friendly_name(entity), new))
self.notify("{} changed to {}".format(self.friendly_name(entity), new), name = globals.notify)
if "idle_state" in self.args:
if new != self.args["idle_state"] and "turn_on" in self.args and is_state in valid_modes:
self.turn_on(self.args["turn_on"]) |
import time
from datetime import datetime
from dateparser import parse
def parse_timestamp(timestamp, utc=False):
if utc:
return datetime.utcfromtimestamp(timestamp)
else:
return datetime.fromtimestamp(timestamp)
def datetime_to_timestamp(dt):
return time.mktime(dt.timetuple())
def datetime_to_str(dt, format='%Y-%m-%d %H:%M:%S:%f'):
if dt:
return dt.strftime(format)
return None
def str_to_datetime(string, format='%Y-%m-%d %H:%M:%S:%f'):
if string:
try:
dt = datetime.strptime(string, format)
except:
dt = parse(string)
return dt
return None
def timestamp_to_str(timestamp, datetime_fmt="%Y/%m/%d %H:%M:%S:%f"):
return datetime.fromtimestamp(timestamp).strftime(datetime_fmt)
|
Lines = [
'Needs more Assault Vest.',
'Only with Assault Vest equipped.',
'Yes, because you have an Assault Vest.',
'#BT approved.',
"I don't like it, there's no Assault Vest.",
"Doesn't seem like there's any Assault Vest here, so probably not.",
'If Assault Vest is included: Unquestionably yes, otherwise: probably not.'
]
|
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import sys
sys.path.append(".")
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
import numpy as np
from datasets import DataSet
import utils
import likelihoods
from sdt_rff_gpu import SdtRff
import losses
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def import_dataset(dataset, fold):
train_X = np.loadtxt('FOLDS/' + dataset + '_ARD_Xtrain__FOLD_' + fold, delimiter=' ')
train_Y = np.loadtxt('FOLDS/' + dataset + '_ARD_ytrain__FOLD_' + fold, delimiter=' ')
test_X = np.loadtxt('FOLDS/' + dataset + '_ARD_Xtest__FOLD_' + fold, delimiter=' ')
test_Y = np.loadtxt('FOLDS/' + dataset + '_ARD_ytest__FOLD_' + fold, delimiter=' ')
data = DataSet(train_X, train_Y)
test = DataSet(test_X, test_Y)
return data, test
if __name__ == '__main__':
FLAGS = utils.get_flags()
## Set random seed for tensorflow and numpy operations
tf.set_random_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
data, test = import_dataset(FLAGS.dataset, FLAGS.fold)
## Here we define a custom loss for RFSDT to show
error_rate = losses.ZeroOneLoss(data.Dout)
## Likelihood
like = likelihoods.Softmax()
## Optimizer
optimizer = utils.get_optimizer(FLAGS.optimizer, FLAGS.learning_rate)
## Main RFSDT object
sdt = SdtRff(like, data.num_examples, data.X.shape[1], data.Y.shape[1], FLAGS.h_tree, FLAGS.n_rff, FLAGS.kernel_type, FLAGS.ard_type, FLAGS.local_reparam, FLAGS.q_Omega_fixed, FLAGS.theta_fixed, FLAGS.likelihood_type, FLAGS.dataset, FLAGS.fold)
## Learning
sdt.learn(data, FLAGS.learning_rate, FLAGS.mc_train, FLAGS.batch_size, FLAGS.n_iterations, optimizer,
FLAGS.display_step, test, FLAGS.mc_test, error_rate, FLAGS.duration, FLAGS.less_prints)
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def detectCycle(head: ListNode) -> ListNode:
def getInstance(head):
fast = head
slow = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
return slow
return None
if not head:
return None
prt2 = getInstance(head)
if not prt2:
return None
prt1 = head
while prt1 != prt2:
prt1 = prt1.next
prt2 = prt2.next
return prt1
if __name__ == "__main__":
node = ListNode(3)
node.next = ListNode(2)
node.next.next = ListNode(0)
node.next.next.next = ListNode(-4)
node.next.next.next.next = node.next
result = detectCycle(node)
print(result.val)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Class that implements the MQTT server (broker).
"""
import tornado.gen
import tornado.ioloop
import tornado.locks
from slugify import slugify
from wotpy.codecs.enums import MediaTypes
from wotpy.protocols.enums import Protocols, InteractionVerbs
from wotpy.protocols.mqtt.handlers.action import ActionMQTTHandler
from wotpy.protocols.mqtt.handlers.event import EventMQTTHandler
from wotpy.protocols.mqtt.handlers.ping import PingMQTTHandler
from wotpy.protocols.mqtt.handlers.property import PropertyMQTTHandler
from wotpy.protocols.mqtt.runner import MQTTHandlerRunner
from wotpy.protocols.server import BaseProtocolServer
from wotpy.wot.enums import InteractionTypes
from wotpy.wot.form import Form
class MQTTServer(BaseProtocolServer):
"""MQTT binding server implementation."""
DEFAULT_SERVIENT_ID = 'wotpy'
def __init__(self, broker_url, property_callback_ms=None, event_callback_ms=None, servient_id=None):
super(MQTTServer, self).__init__(port=None)
self._broker_url = broker_url
self._server_lock = tornado.locks.Lock()
self._servient_id = servient_id
def build_runner(handler):
return MQTTHandlerRunner(broker_url=self._broker_url, mqtt_handler=handler)
self._handler_runners = [
build_runner(PingMQTTHandler(mqtt_server=self)),
build_runner(PropertyMQTTHandler(mqtt_server=self, callback_ms=property_callback_ms)),
build_runner(EventMQTTHandler(mqtt_server=self, callback_ms=event_callback_ms)),
build_runner(ActionMQTTHandler(mqtt_server=self)),
]
@property
def servient_id(self):
"""Servient ID that is used to avoid topic collisions
øwhen multiple Servients are connected to the same broker."""
return slugify(self._servient_id) if self._servient_id else self.DEFAULT_SERVIENT_ID
@property
def protocol(self):
"""Protocol of this server instance.
A member of the Protocols enum."""
return Protocols.MQTT
def _build_forms_property(self, proprty):
"""Builds and returns the MQTT Form instances for the given Property interaction."""
href_rw = "{}/{}/property/requests/{}/{}".format(
self._broker_url.rstrip("/"),
self.servient_id,
proprty.thing.url_name,
proprty.url_name)
form_read = Form(
interaction=proprty,
protocol=self.protocol,
href=href_rw,
content_type=MediaTypes.JSON,
op=InteractionVerbs.READ_PROPERTY)
form_write = Form(
interaction=proprty,
protocol=self.protocol,
href=href_rw,
content_type=MediaTypes.JSON,
op=InteractionVerbs.WRITE_PROPERTY)
href_observe = "{}/{}/property/updates/{}/{}".format(
self._broker_url.rstrip("/"),
self.servient_id,
proprty.thing.url_name,
proprty.url_name)
form_observe = Form(
interaction=proprty,
protocol=self.protocol,
href=href_observe,
content_type=MediaTypes.JSON,
op=InteractionVerbs.OBSERVE_PROPERTY)
return [form_read, form_write, form_observe]
def _build_forms_action(self, action):
"""Builds and returns the MQTT Form instances for the given Action interaction."""
href = "{}/{}/action/invocation/{}/{}".format(
self._broker_url.rstrip("/"),
self.servient_id,
action.thing.url_name,
action.url_name)
form = Form(
interaction=action,
protocol=self.protocol,
href=href,
content_type=MediaTypes.JSON,
op=InteractionVerbs.INVOKE_ACTION)
return [form]
def _build_forms_event(self, event):
"""Builds and returns the MQTT Form instances for the given Event interaction."""
href = "{}/{}/event/{}/{}".format(
self._broker_url.rstrip("/"),
self.servient_id,
event.thing.url_name,
event.url_name)
form = Form(
interaction=event,
protocol=self.protocol,
href=href,
content_type=MediaTypes.JSON,
op=InteractionVerbs.SUBSCRIBE_EVENT)
return [form]
def build_forms(self, hostname, interaction):
"""Builds and returns a list with all Forms that are
linked to this server for the given Interaction."""
intrct_type_map = {
InteractionTypes.PROPERTY: self._build_forms_property,
InteractionTypes.ACTION: self._build_forms_action,
InteractionTypes.EVENT: self._build_forms_event
}
if interaction.interaction_type not in intrct_type_map:
raise ValueError("Unsupported interaction")
return intrct_type_map[interaction.interaction_type](interaction)
def build_base_url(self, hostname, thing):
"""Returns the base URL for the given Thing in the context of this server."""
return self._broker_url
@tornado.gen.coroutine
def start(self):
"""Starts the MQTT broker and all the MQTT clients
that handle the WoT clients requests."""
with (yield self._server_lock.acquire()):
yield [runner.start() for runner in self._handler_runners]
@tornado.gen.coroutine
def stop(self):
"""Stops the MQTT broker and the MQTT clients."""
with (yield self._server_lock.acquire()):
yield [runner.stop() for runner in self._handler_runners]
|
'''
Starting with a 1-indexed array of zeros and a list of operations, for each operation add a value to each of the array element between two given indices, inclusive. Once all operations have been performed, return the maximum value in your array.
For example, the length of your array of zeros n=10. Your list of queries is as follows:
a b k
1 5 3
4 8 7
6 9 1
Add the values of k between the indices a and b inclusive:
index-> 1 2 3 4 5 6 7 8 9 10
[0,0,0, 0, 0,0,0,0,0, 0]
[3,3,3, 3, 3,0,0,0,0, 0]
[3,3,3,10,10,7,7,7,0, 0]
[3,3,3,10,10,8,8,8,1, 0]
The largest value is 10 after all operations are performed.
Function Description
Complete the function arrayManipulation in the editor below. It must return an integer, the maximum value in the resulting array.
arrayManipulation has the following parameters:
n - the number of elements in your array
queries - a two dimensional array of queries where each queries[i] contains three integers, a, b, and k.
Input Format
The first line contains two space-separated integers n and m, the size of the array and the number of operations.
Each of the next m lines contains three space-separated integers a, b and k, the left index, right index and summand.
Constraints
Output Format
Return the integer maximum value in the finished array.
Sample Input
5 3
1 2 100
2 5 100
3 4 100
Sample Output
200
Explanation
After the first update list will be 100 100 0 0 0.
After the second update list will be 100 200 100 100 100.
After the third update list will be 100 200 200 200 100.
The required answer will be 200.
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the arrayManipulation function below.
def arrayManipulation(n, queries):
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
queries = []
for _ in range(m):
queries.append(list(map(int, input().rstrip().split())))
result = arrayManipulation(n, queries)
fptr.write(str(result) + '\n')
fptr.close()
|
import numpy as np
class MSE:
@staticmethod
def f(y, a):
return np.mean(0.5 * np.sum(np.square(y - a), axis=0))
@staticmethod
def df_da(y, a):
"""Return partial derivative wrt `a` (element-wise)."""
return a - y
class CrossEntropy:
@staticmethod
def f(y, a):
return np.mean(-np.sum(y * np.log(a) + (1 - y) * np.log(1 - a), axis=0))
@staticmethod
def df_da(y, a):
"""Return partial derivative wrt `a` (element-wise)."""
return -(y / a + (y - 1) / (1 - a))
class LogLikelihood:
@staticmethod
def f(y, a):
j = np.argmax(y, axis=0)
losses = np.zeros(j.shape)
for sid in range(len(losses)):
losses[sid] = -np.log(a[j[sid], sid])
return np.mean(losses)
@staticmethod
def df_da(y, a):
"""Return partial derivative wrt `a` (element-wise)."""
j = np.argmax(y, axis=0)
r = np.zeros(a.shape)
for sid in range(r.shape[-1]):
r[:, sid] = -1 / a[j[sid], sid]
return r
|
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.cross_validation import train_test_split
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
usecols = []
for c in df_train.columns:
if 'cont' in c:
usecols.append(c)
x_train = df_train[usecols]
x_test = df_test[usecols]
for c in df_train.columns:
if 'cat' in c:
if len(df_train[c].unique()) == 2:
uni = df_train[c].unique()[0]
x_train[c + '_numeric'] = (df_train[c].values == uni)
x_test[c + '_numeric'] = (df_test[c].values == uni)
y_train = df_train['loss']
id_test = df_test['id']
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=4242)
d_train = xgb.DMatrix(x_train, y_train)
d_valid = xgb.DMatrix(x_valid, y_valid)
d_test = xgb.DMatrix(x_test)
params = {}
params['eta'] = 0.0202048 # Brings back memories, doesn't it?
params['colsample_bylevel'] = 0.9
params['subsample'] = 0.9
params['silent'] = 1
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
clf = xgb.train(params, d_train, 1000, watchlist, early_stopping_rounds=20)
p_test = clf.predict(d_test)
sub = pd.DataFrame()
sub['id'] = id_test
sub['loss'] = p_test
sub.to_csv('testsub.csv', index=False)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansible.module_utils.basic import *
from ansible.module_utils.dci_common import *
from ansible.module_utils.dci_base import *
try:
from dciclient.v1.api import test as dci_test
except ImportError:
dciclient_found = False
else:
dciclient_found = True
DOCUMENTATION = '''
---
module: dci_test
short_description: module to interact with the tests endpoint of DCI
description:
- DCI module to manage the test resources
version_added: 2.2
options:
state:
required: false
description: Desired state of the resource
dci_login:
required: false
description: User's DCI login
dci_password:
required: false
description: User's DCI password
dci_cs_url:
required: false
description: DCI Control Server URL
id:
required: false
description: ID of the team to interact with
name:
required: false
description: Test name
data:
required: false
description: Test data
team_id:
required: false
description: Team to which the test will be attached
active:
required: false
description: Wether of not the resource should be active
embed:
required: false
description:
- List of field to embed within the retrieved resource
where:
required: false
description: Specific criterias for search
'''
EXAMPLES = '''
- name: Create a new test
dci_test:
name: 'tempest'
data: {"url": "http://www.redhat.com"}
team_id: XXXX
- name: Get test information
dci_test:
id: XXXXX
- name: Delete a test
dci_test:
state: absent
id: XXXXX
'''
# TODO
RETURN = '''
'''
class DciTest(DciBase):
def __init__(self, params):
super(DciTest, self).__init__(dci_test)
self.id = params.get('id')
self.name = params.get('name')
self.team_id = params.get('team_id')
self.data = params.get('data')
self.active = params.get('active')
self.search_criterias = {
'embed': params.get('embed'),
'where': params.get('where')
}
self.deterministic_params = ['name', 'team_id', 'data', 'active']
def do_delete(self, context):
return self.resource.delete(context, self.id)
def do_create(self, context):
for param in ['name', 'team_id']:
if not getattr(self, param):
raise DciParameterError(
'%s parameter must be speficied' % param
)
return super(DciTest, self).do_create(context)
def main():
resource_argument_spec = dict(
state=dict(
default='present',
choices=['present', 'absent'],
type='str'),
id=dict(type='str'),
name=dict(type='str'),
data=dict(type='dict'),
team_id=dict(type='str'),
active=dict(default=True, type='bool'),
embed=dict(type='str'),
where=dict(type='str'),
)
resource_argument_spec.update(authentication_argument_spec())
module = AnsibleModule(
argument_spec=resource_argument_spec,
required_if=[['state', 'absent', ['id']]]
)
if not dciclient_found:
module.fail_json(msg='The python dciclient module is required')
context = build_dci_context(module)
action_name = get_standard_action(module.params)
test = DciTest(module.params)
action_func = getattr(test, 'do_%s' % action_name)
http_response = run_action_func(action_func, context, module)
result = parse_http_response(http_response, dci_test, context, module)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
from bs4 import BeautifulSoup
import logging
from nltk import tokenize
from ArticlesDataDownloader.text_utilities import format_text_and_split_into_sentences
def science_direct_html_to_json(textHTML):
logger = logging.getLogger("scienceDirectHtmlToJson")
logger.info("Start readig ScienceDirect file")
soup = BeautifulSoup(textHTML, "html.parser")
outputJson = []
logger.info("Reading section : Abstract" )
abstractText = soup.findAll('div', {'class': 'Abstracts u-font-serif'})[0].text
outputJson.append({
'title':'Abstract',
'paragraphs' : [{"sentences":format_text_and_split_into_sentences(abstractText)}]
})
logger.debug("Abstract read correctly")
body = soup.find('div', {'id': 'body'})
if not body:
logger.error('Article has not body')
raise ValueError("article has no body")
for index, sec in enumerate(body.findAll('section')):
titles = sec.findAll('h2') + sec.findAll('h3')
title = str()
if len(titles) > 0:
title = titles[0].text
else:
title = "Section " + str(index)
logger.warning("Found section with unknown title")
paragraphs = []
for par in sec.findAll('p'):
paragraphs.append({"sentences": format_text_and_split_into_sentences(par.text)})
logger.info("Reading section : " + title)
sec_data = {
'title': title,
'paragraphs': paragraphs
}
outputJson.append(sec_data)
if len(outputJson) < 2:
raise Exception("Only abstract found")
return outputJson |
import marshal
import mmap
import os
import os.path
import sys
import logging
# from profilehooks import profile
from cog.cache import Cache
import xxhash
RECORD_SEP = b'\xFD'
UNIT_SEP = b'\xAC'
class TableMeta:
def __init__(self, name, namespace, db_instance_id, column_mode):
self.name = name
self.namespace = namespace
self.db_instance_id = db_instance_id
self.column_mode = column_mode
class Table:
def __init__(self, name, namespace, db_instance_id, config, column_mode=False, shared_cache=None):
self.logger = logging.getLogger('table')
self.config = config
self.shared_cache = shared_cache
self.table_meta = TableMeta(name, namespace, db_instance_id, column_mode)
self.indexer = self.__create_indexer()
self.store = self.__create_store(shared_cache)
def __create_indexer(self):
return Indexer(self.table_meta, self.config, self.logger)
def __create_store(self, shared_cache):
return Store(self.table_meta, self.config, self.logger, shared_cache=shared_cache)
def close(self):
self.indexer.close()
self.store.close()
self.logger.info("closed table: "+self.table_meta.name)
class Record:
RECORD_LINK_LEN = 16
RECORD_LINK_NULL = -1
VALUE_LINK_NULL = -1
def __init__(self, key, value, tombstone='0', store_position=None, value_type="s", key_link=-1, value_link=-1):
self.key = key
self.value = value
self.tombstone = tombstone
self.store_position = store_position
self.key_link = key_link
self.value_link = value_link
self.value_type = value_type
def set_store_position(self, pos):
if type(pos) is not int:
raise ValueError("store position must be int but provided : "+str(pos))
self.store_position = pos
def set_value_link(self, pos):
self.value_link = pos
def set_value(self, value):
self.value = value
def is_equal_val(self, other_record):
return self.key == other_record.key and self.value == other_record.value
def get_kv_tuple(self):
return self.key, self.value
def serialize(self):
return marshal.dumps((self.key, self.value))
def marshal(self):
key_link_bytes = str(self.key_link).encode().rjust(Record.RECORD_LINK_LEN)
serialized = self.serialize()
# print("string:" + str(self) + " serialized: " + str(serialized))
m_record = key_link_bytes \
+ self.tombstone.encode() \
+ self.value_type.encode() \
+ str(len(serialized)).encode() \
+ UNIT_SEP \
+ serialized
if self.value_type == "l":
if self.value_link is not None:
m_record += str(self.value_link).encode()
m_record += RECORD_SEP
# print("marshall: "+str(m_record))
return m_record
def is_empty(self):
return self.key is None and self.value is None
def __str__(self):
return "key: {}, value: {}, tombstone: {}, store_position: {}, key_link: {}, value_link: {}, value_type: {}".format(self.key, self.value, self.tombstone, self.store_position, self.key_link, self.value_link, self.value_type)
@classmethod
def __read_until(cls, start, sbytes, separtor=UNIT_SEP):
buff = b''
i = 0 # default
for i in range(start, len(sbytes)):
s_byte = sbytes[i: i + 1]
if s_byte == separtor:
break
buff += s_byte
return buff, i
@classmethod
def unmarshal(cls, store_bytes):
"""reads from bytes and creates object
"""
base_pos = 0
key_link = int(store_bytes[base_pos: base_pos+Record.RECORD_LINK_LEN])
next_base_pos = Record.RECORD_LINK_LEN
tombstone = store_bytes[next_base_pos:next_base_pos + 1].decode()
value_type = store_bytes[next_base_pos + 1: next_base_pos + 2].decode()
value_len, end_pos = cls.__read_until(next_base_pos + 2, store_bytes)
value_len = int(value_len.decode())
value = store_bytes[end_pos+1: end_pos+1 + value_len]
record = marshal.loads(value)
value_link = None
if value_type == 'l':
value_link, end_pos = cls.__read_until(end_pos + value_len + 1, store_bytes, RECORD_SEP)
value_link = int(value_link.decode())
return cls(record[0], record[1], tombstone, store_position=None, value_type=value_type, key_link=key_link, value_link=value_link)
@classmethod
def __load_value(cls, store_pointer, val_list, store):
"""loads value from the store"""
while store_pointer != Record.VALUE_LINK_NULL:
rec = Record.unmarshal(store.read(store_pointer))
val_list.append(rec.value)
store_pointer = rec.value_link
return val_list
@classmethod
# @profile
def load_from_store(cls, position: int, store):
record = cls.unmarshal(store.read(position))
if record.value_type == 'l':
values = cls.__load_value(record.value_link, [record.value], store)
record.set_value(values)
return record
class Index:
def __init__(self, table_meta, config, logger, index_id=0):
self.logger = logging.getLogger('index')
self.table = table_meta
self.config = config
self.name = self.config.cog_index(table_meta.namespace, table_meta.name, table_meta.db_instance_id, index_id)
self.empty_block = '-1'.zfill(self.config.INDEX_BLOCK_LEN).encode()
if not os.path.exists(self.name):
self.logger.info("creating index...")
f = open(self.name, 'wb+')
i = 0
e_blocks = []
while i < config.INDEX_CAPACITY:
e_blocks.append(self.empty_block)
i += 1
f.write(b''.join(e_blocks))
self.file_limit = f.tell()
f.close()
self.logger.info("new index with capacity" + str(config.INDEX_CAPACITY) + "created: " + self.name)
else:
self.logger.info("Index: "+self.name+" already exists.")
self.db = open(self.name, 'r+b')
self.db_mem = mmap.mmap(self.db.fileno(), 0)
self.db_mem.seek(0)
current_block = self.db_mem.read(self.config.INDEX_BLOCK_LEN)
def close(self):
self.db.close()
def get_index_key(self, int_store_position):
return str(int_store_position).encode().rjust(self.config.INDEX_BLOCK_LEN)
# @profile
def put(self, key, store_position, store):
"""
key chain
:param key:
:param store_position:
:param store:
:return:
"""
"""
k5 -> k4 -> k3 -> k2 -> k1
add: k6
k6 -> k5 -> k4 -> k3 -> k2 -> k1
add/update: k4
1. k4 -> k6 -> k5 -> k4 -> k3 -> k2 -> k1
2. k4 -> k6 -> k5 -> k3 -> k2 -> k1
"""
orig_position, orig_hash = self.get_index(key)
data_at_prob_position = self.db_mem[orig_position: orig_position + self.config.INDEX_BLOCK_LEN]
self.logger.debug('writing : '+str(key) + ' current data at store position: '+ str(data_at_prob_position))
if data_at_prob_position == self.empty_block:
# point next link to record null
store.update_record_link_inplace(store_position, Record.RECORD_LINK_NULL)
self.db_mem[orig_position: orig_position + self.config.INDEX_BLOCK_LEN] = self.get_index_key(store_position)
else:
# read existing record and update pointers
record = Record.load_from_store(int(data_at_prob_position), store)
record.set_store_position(int(data_at_prob_position))
if record.key == key:
""" update existing record """
store.update_record_link_inplace(store_position, int(record.key_link))
else:
# set next link to the record at the top of the bucket
store.update_record_link_inplace(store_position, record.store_position)
# check if this record exists in the bucket, if yes remove pointer.
prev_record = None
while record.key_link != Record.RECORD_LINK_NULL:
record = Record.load_from_store(record.key_link, store)
record.set_store_position(record.key_link)
if record.key == key and prev_record is not None:
"""
if same key found in bucket, update previous record in chain to point to key_link of this record
prev_rec -> current rec.key_link
curr_rec will not be linked in the bucket anymore.
"""
#update in place the key link pointer of pervios record, ! need to add fixed length padding.
store.update_record_link_inplace(prev_record.store_position, record.key_link)
prev_record = record
self.db_mem[orig_position: orig_position + self.config.INDEX_BLOCK_LEN] = self.get_index_key(store_position)
def get_index(self, key):
num = self.cog_hash(key) % ((sys.maxsize + 1) * 2)
self.logger.debug("hash for: " + key + " : " + str(num))
# there may be diff when using mem slice vs write (+1 needed)
index = (self.config.INDEX_BLOCK_LEN *
(max((num % self.config.INDEX_CAPACITY) - 1, 0)))
self.logger.debug("offset : " + key + " : " + str(index))
return index, num
def cog_hash(self, string):
return xxhash.xxh32(string, seed=2).intdigest() % self.config.INDEX_CAPACITY
# @profile
def get(self, key, store):
self.logger.debug("GET: Reading index: " + self.name)
index_position, raw_hash = self.get_index(key)
data_at_index_position = self.db_mem[index_position:index_position + self.config.INDEX_BLOCK_LEN]
if data_at_index_position == self.empty_block:
return None
data_at_index_position = int(data_at_index_position)
record = Record.load_from_store(data_at_index_position, store)
record.set_store_position(data_at_index_position)
self.logger.debug("read record " + str(record))
if record.key == key:
return record
else:
while record.key_link != Record.RECORD_LINK_NULL:
self.logger.debug("record.key_link: "+str(record.key_link))
record = Record.load_from_store(record.key_link, store)
record.set_store_position(record.key_link)
if record.key == key:
return record
return None
'''
Iterates through record in itr_store.
'''
def scanner(self,store):
scan_cursor = 0
while True:
data_at_position = self.db_mem[scan_cursor:scan_cursor + self.config.INDEX_BLOCK_LEN]
if len(data_at_position) == 0:#EOF index
self.logger.info("Index EOF reached! Scan terminated.")
return
if data_at_position == self.empty_block:
scan_cursor += self.config.INDEX_BLOCK_LEN
self.logger.debug("GET: skipping empty block during iteration.")
continue
record = Record.load_from_store(int(data_at_position), store)
if record is None:#EOF store
self.logger.error("Store EOF reached! Iteration terminated.")
return
yield Record(record.key, record.value, record.tombstone)
scan_cursor += self.config.INDEX_BLOCK_LEN
def delete(self, key, store):
"""
k5 -> k4 -> k3 -> k2 -> k1
del: k3
k6 -> k5 -> k4 -> k2 -> k1
"""
self.logger.debug("GET: Reading index: " + self.name)
index_position, raw_hash = self.get_index(key)
data_at_index_position = self.db_mem[index_position:index_position + self.config.INDEX_BLOCK_LEN]
if data_at_index_position == self.empty_block:
return False
data_at_index_position = int(data_at_index_position)
record = Record.load_from_store(data_at_index_position, store)
record.set_store_position(data_at_index_position)
self.logger.debug("read record " + str(record))
if record.key == key:
"""delete bucket => map hash table to empty block"""
self.db_mem[index_position:index_position + self.config.INDEX_BLOCK_LEN] = self.empty_block
else:
"""search bucket"""
prev_record = None
while record.key_link != Record.RECORD_LINK_NULL:
record = Record.load_from_store(record.key_link, store)
record.set_store_position(record.key_link)
if record.key == key:
"""
if same key found in bucket, update previous record in chain to point to key_link of this record
prev_rec -> current rec.key_link
curr_rec will not be linked in the bucket anymore.
"""
# update in place the key link pointer of pervios record, ! need to add fixed length padding.
store.update_record_link_inplace(prev_record.store_position, record.key_link)
prev_record = record
return True
def flush(self):
self.db_mem.flush()
class Store:
def __init__(self, tablemeta, config, logger, caching_enabled=True, shared_cache=None):
self.caching_enabled = caching_enabled
self.logger = logging.getLogger('store')
self.tablemeta = tablemeta
self.config = config
self.empty_block = '-1'.zfill(self.config.INDEX_BLOCK_LEN).encode()
self.store = self.config.cog_store(
tablemeta.namespace, tablemeta.name, tablemeta.db_instance_id)
self.store_cache = Cache(self.store, shared_cache)
temp = open(self.store, 'a') # create if not exist
temp.close()
self.store_file = open(self.store, 'rb+')
logger.info("Store for file init: " + self.store)
def close(self):
self.store_file.close()
def save(self, record):
"""
Store data
"""
self.store_file.seek(0, 2)
store_position = self.store_file.tell()
record.set_store_position(store_position)
marshalled_record = record.marshal()
self.store_file.write(marshalled_record)
self.store_file.flush()
if self.caching_enabled:
self.store_cache.put(store_position, marshalled_record)
return store_position
def update_record_link_inplace(self, start_pos, int_value):
"""updates record link in store file in place"""
if type(int_value) is not int:
raise ValueError("store position must be int but provided : "+str(start_pos))
byte_value = str(int_value).encode().rjust(Record.RECORD_LINK_LEN)
self.logger.debug('update_record_link_inplace: ' + str(byte_value))
self.store_file.seek(start_pos)
self.store_file.write(byte_value)
if self.caching_enabled:
self.store_cache.partial_update_from_zero_index(start_pos, byte_value)
self.store_file.flush()
# @profile
def read(self, position):
self.logger.debug("store read request at position: "+str(position))
if self.caching_enabled:
cached_record = self.store_cache.get(position)
if cached_record is not None:
return cached_record
self.store_file.seek(position)
record = self.__read_until(RECORD_SEP)
if self.caching_enabled:
self.store_cache.put(position, record)
return record
# @profile
def __read_until(self, separator):
data = None
while True:
chunk = self.store_file.read(self.config.STORE_READ_BUFFER_SIZE)
if len(chunk) == 0:
return data
# raise Exception("EOF store file! Data read error.")
i = chunk.find(RECORD_SEP)
if i > 0:
chunk = chunk[:i+1]
if data is None:
data = chunk
else:
data += chunk
break
if data is None:
data = chunk
else:
data += chunk
self.logger.debug("store __read_until: "+str(data))
return data
class Indexer:
'''
Manages indexes. Creates new index when an index is full.
Searches all indexes for get requests.
Provides same get/put/del method as single index but over multuple files.
'''
def __init__(self, tablemeta, config, logger):
self.tablemeta = tablemeta
self.config = config
self.logger = logging.getLogger('indexer')
self.index_list = [] #future range index.
self.index_id = 0
self.load_indexes()
#if no index currenlty exist, create new live index.
if len(self.index_list) == 0:
self.index_list.append(Index(tablemeta, config, logger, self.index_id))
self.live_index = self.index_list[self.index_id]
def close(self):
for idx in self.index_list:
idx.close()
def load_indexes(self):
for f in os.listdir(self.config.cog_data_dir(self.tablemeta.namespace)):
if self.config.INDEX in f:
if self.tablemeta.name == self.config.get_table_name(f):
self.logger.info("loading index file: "+f)
id = self.config.index_id(f)
index = Index(self.tablemeta, self.config, self.logger, id)
self.index_list.append(index)
#make the latest index the live index.
if id >= self.index_id:
self.index_id = id
self.live_index = index
def put(self, key, store_position, store):
resp = self.live_index.put(key, store_position, store)
self.logger.debug("Key: "+key+" indexed in: "+self.live_index.name)
# @profile
def get(self, key, store):
idx = self.index_list[0] # only one index file.
return idx.get(key, store)
def scanner(self, store):
for idx in self.index_list:
self.logger.debug("SCAN: index: "+idx.name)
for r in idx.scanner(store):
yield r
def delete(self, key, store):
for idx in self.index_list:
if idx.delete(key, store):
return True
else:
return False
|
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import os
import numpy as np
import random
from itertools import permutations
from gym.spaces import Discrete, Box
"""
Simple example for environment tests
"""
class SimpleCorridor(gym.Env):
actions = []
action_obj = []
action_list = []
action_space = None
observation_space = None
obs = []
reward_range = [float(0), float(1)]
def __init__(self):#, config):
self.end_pos = 9 #config["corridor_length"]
self.cur_pos = 0
self.action_space = Discrete(2)
self.observation_space = Box(0.0, self.end_pos, shape=(1,), dtype=np.float32)
def reset(self):
self.cur_pos = 0
return [self.cur_pos]#, 0, False
def step(self, action):
assert action in [0, 1], action
if action == 0 and self.cur_pos > 0:
self.cur_pos -= 1
elif action == 1:
self.cur_pos += 1
done = self.cur_pos >= self.end_pos
return [self.cur_pos], 1 if done else 0, done, {}
def render(self, mode='human', close=False):
return self.cur_pos
def close(self):
return
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
|
import numpy as np
def load_data():
postingList = [
['my', 'dog', 'has', 'flea', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage']
]
classVec = [0, 1, 0, 1]
return postingList, classVec
def createVocabList(postingList):
vocabSet = set([])
for posting in postingList:
vocabSet = vocabSet | set(posting)
return list(vocabSet)
def setOfWord2Vec(vocalList, input):
returnVec = [0] * len(vocalList)
for item in input:
if item in vocalList:
returnVec[vocalList.index(item)] = 1
else:
print(item + "is not in vocalList!")
return returnVec
def train(trainMatrix, label):
docuNum = len(trainMatrix)
wordsNum = len(trainMatrix[0])
pAbusive = np.sum(label) / docuNum
p0Num = np.ones(wordsNum);
p1Num = np.ones(wordsNum) # p0,p1分子 初始化为1,避免有一项为0时,相乘结果为0
p0Denom = 2.0;
p1Denom = 2.0 # p0,p1 分母,初始化为2(原因未知)
for i in range(docuNum):
if label[i] == 1:
p1Num += trainMatrix[i]
p1Denom += np.sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += np.sum(trainMatrix[i])
p1Vec = np.log(p1Num / p1Denom)
p0Vec = np.log(p0Num / p0Denom)
return p0Vec, p1Vec, pAbusive
def classsifyNB(vec2classify, p0Vec, p1Vec, pclass1):
p1 = np.sum(vec2classify * p1Vec) + np.log(pclass1)
|
import unittest
import os
from FSharp.lib.fsac.server import Server
from FSharp.lib import const
import sublime
THIS_DIR = os.path.dirname(os.path.dirname(__file__))
DATA_DIR = os.path.join(sublime.packages_path(), 'FSharp_Tests/data')
class ServerTests(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if sublime.platform() in ('osx', 'linux'):
self.cmd_line = ('mono', (const.path_to_fs_ac_binary(),))
else:
assert sublime.platform() == 'windows'
self.cmd_line = (const.path_to_fs_ac_binary(), ())
def testCanInstantiate(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
self.assertEqual(None, s.proc)
finally:
s.stop()
def testCanStart(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
s.start()
self.assertTrue(s.proc.stdin)
finally:
s.stop()
def testCanGetHelp(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
s.start()
s.help()
data = s.read_all(eof=bytes(' \n', 'ascii'))
self.assertEqual(data['Kind'], '_UNPARSED')
self.assertEqual(data['Data'].strip()[:len('Supported')], 'Supported')
finally:
s.stop()
def testCanSetProject(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
s.start()
p = os.path.join(DATA_DIR, 'FindDecl.fsproj')
self.assertTrue(os.path.exists(p))
s.project(p)
response = s.read_line()
self.assertEqual(response['Kind'], 'project')
finally:
s.stop()
def testCanParseFile(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
s.start()
p = os.path.join(DATA_DIR, 'FileTwo.fs')
s.parse(p)
response = s.read_line()
# XXX: Why in all caps?
self.assertEqual(response['Kind'], 'INFO')
finally:
s.stop()
def testCanRetrieveErrors(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
s.start()
p = os.path.join(DATA_DIR, 'FileTwo.fs')
s.parse(p)
_ = s.read_line()
s.errors()
response = s.read_line()
self.assertEqual(response['Kind'], 'errors')
finally:
s.stop()
def testCanRetrieveDeclarations(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
s.start()
p = os.path.join(DATA_DIR, 'FileTwo.fs')
s.parse(p)
_ = s.read_line()
s.declarations(p)
response = s.read_line()
self.assertEqual(response['Kind'], 'declarations')
finally:
s.stop()
def testCanRetrieveCompletions(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
s.start()
p = os.path.join(DATA_DIR, 'FileTwo.fs')
s.parse(p)
_ = s.read_line()
s.completions(p, 12, 9)
helptext = s.read_line()
completions = s.read_line()
self.assertEqual(helptext['Kind'], 'helptext')
self.assertEqual(completions['Kind'], 'completion')
finally:
s.stop()
def testCanRetrieveTooltip(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
s.start()
p = os.path.join(DATA_DIR, 'FileTwo.fs')
s.parse(p)
_ = s.read_line()
s.tooltip(p, 12, 9)
response = s.read_line()
self.assertEqual(response['Kind'], 'tooltip')
finally:
s.stop()
def testCanFindDeclaration(self):
try:
cmd, args = self.cmd_line
s = Server(cmd, *args)
s.start()
p = os.path.join(DATA_DIR, 'FileTwo.fs')
p2 = os.path.join(DATA_DIR, 'Script.fsx')
p3 = os.path.join(DATA_DIR, 'Program.fs')
s.project(p)
_ = s.read_line()
s.parse(p)
_ = s.read_line()
s.parse(p2)
_ = s.read_line()
s.parse(p3)
_ = s.read_line()
s.find_declaration(p3, 5, 15)
response = s.read_line()
self.assertEqual(response['Kind'], 'finddecl')
finally:
s.stop()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import pdc.apps.compose.models
class Migration(migrations.Migration):
dependencies = [
('compose', '0007_auto_20151120_0336'),
('package', '0007_auto_20151208_1011'),
]
operations = [
migrations.AddField(
model_name='buildimage',
name='test_result',
field=models.ForeignKey(default=pdc.apps.compose.models.ComposeAcceptanceTestingState.get_untested, to='compose.ComposeAcceptanceTestingState'),
),
]
|
import npyscreen
class MainList(npyscreen.MultiLineAction):
def __init__(self, *args, **keywords):
super(MainList, self).__init__(*args, **keywords)
self.name = "Main List"
def display_value(self, vl):
return "|{:^76}|".format(str(vl))
def actionHighlighted(self, act_on_this, keypress):
if self.parent.parentApp.database.get_count_of_an_entity(act_on_this) == 0:
self.spawn_notify_popup(act_on_this)
else:
self.parent.parentApp.switchForm(f"{act_on_this.upper()}LIST")
def spawn_notify_popup(self, entity):
message_to_display = f'{entity} is empty. \n\t Do you wanna create some?'
notify_result = npyscreen.notify_yes_no(message_to_display, title='Info box')
if notify_result:
self.parent.parentApp.getForm('MUSICIANEDIT').value = None
self.parent.parentApp.switchForm(f"{entity.upper()[:-1]}EDIT")
else:
self.parent.parentApp.switchForm("MAIN")
class MainListDisplay(npyscreen.FormMutt):
MAIN_WIDGET_CLASS = MainList
def __init__(self, *args, **keywords):
super().__init__(*args, **keywords)
self.add_handlers({
"^Q": self.exit
})
def beforeEditing(self):
self.wMain.values = ['Musicians', 'Releases', 'Listeners']
self.wMain.display()
def exit(self, *args, **keywords):
self.parentApp.switchForm(None)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Flask-APIBlueprint
-------------
Route inheritance for Flask Blueprints.
"""
try:
from setuptools import setup, find_packages
except ImportError:
from disutils.core import setup
try:
with open('LONG_DESCRIPTION.rst') as f:
long_description = f.read()
except:
long_description = 'Route inheritance for Flask Blueprints'
requirements = [
"flask>=0.11.1, <=1.0.2",
"six>=1.10.0, <2.0"
]
setup(
name='Flask-APIBlueprint',
version='1.0.0',
url='https://github.com/gwongz/flask-apiblueprint',
license='BSD',
author='Grace Wong',
author_email='gwongz@gmail.com',
description='Route inheritance for Flask Blueprints',
long_description=long_description,
packages=['flask_apiblueprint'],
zip_safe=False,
include_package_data=True,
download_url='https://github.com/gwongz/flask-apiblueprint/tarball/v1.0.0',
platforms='any',
install_requires=requirements,
test_suite='test_apiblueprint',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3'
]
)
|
# -*- coding: utf-8 -*-
# Root directory where all data can be found : str
ROOT_DIRECTORY = r'';
# Type of platform in use : str
# Examples: 'airplane', 'uav'
PLATFORM = 'airplane';
# Sensor used to make the acquisition : str
# Examples: 'LiDAR', 'UX5HP_RGB15mm', 'EBEEX_MSREM'
SENSOR = 'LiDAR';
# Name of the site where the analysis should be done : str
# Examples: 'Groenendijk', 'DeHaan', 'Mablethorpe', 'Dunkirk'
SITE_NAME = 'Groenendijk';
###############################################################################
RASTER_PREPROCESSING = {
# File name of the shapefile : str or None
# File name of the shapefile used to crop the input raster (incl. file extension)
# If None, no cropping is performed on the input raster
'CROP_ACCORDING_SHAPE_FILE': 'Groenendijk_LiDAR.shp',
# Filter settings based on topography : dict of None or dict of float
# Lowest / highest topography value to take into account. Values lower/higher
# than this threshold are masked
# If None, no threshold is set
'FILTER_BEACH_AREA_ON_RASTER': {
'TOPOGRAPHY_THRESHOLD_LOW': 0.5, # expressed in meter
'TOPOGRAPHY_THRESHOLD_HIGH': 8.0 # expressed in meter
},
'TOPOGRAPHY_GAUSSIAN_SMOOTHING': {
# Should smoothing be done? : bool
'DO': True,
# Standard deviation of the Gaussian kernel : float
'SIGMA': 2,
# If the SIGMA parameter is defined in pixels (True) or meter (False) : bool
'SIGMA_IN_PIXEL': False
}
};
###############################################################################
CROSS_SECTION_SEARCH_BAR_FEATURES = {
# Activation of the module? : bool
'DO': True,
# If the sea is located on the eastern side of the beach grid (e.g. east coast of UK),
# this parameter should be set to False, otherwise put True.
'IS_SEA_AT_WESTERN_SIDE_OF_BEACH': True,
# Orientation of the cross-sections relative to the beach, expressed in degree : float
# 0 = Parallel to the beach
# 90 = Perpendicular to the beach
'RELATIVE_ORIENTATION_DEGREE': 90,
# Distance in meter between subsequent cross-section profile lines : float
'INTER-SPACING_METER': 1,
# Minimum length of valid data on the cross-section to take it into account: float
'MINIMUM_LENGTH_CROSSSECTION_METER': 30,
# Minimum distance between crest and trough points in meter : float
'MINIMUM_DISTANCE_CREST_TROUGH_METER': 3,
# Minimum topography with lower values not to take into account : float or None
# If None, RASTER_PREPROCESSING['FILTER_BEACH_AREA_ON_RASTER']['TOPOGRAPHY_THRESHOLD_LOW'] is used
# If RASTER_PREPROCESSING['FILTER_BEACH_AREA_ON_RASTER']['TOPOGRAPHY_THRESHOLD_LOW'] is None, 'LOW_REFERENCE_LINE'[0] is used
# If 'LOW_REFERENCE_LINE'[0] is None, a default value of -5 is used
'MINIMUM_TOPOGRAPHY_NOT_TO_TAKE_INTO_ACCOUNT_METER': None,
# Frequency for plotting crest/trough points found on the cross-section profiles
# (expressed in number of cross-section profiles) : int
# If -1, no plotting will be done
'FREQUENCY_PLOTTING_CROSSSECTIONS': 50,
# Topography intervals where to search for marker reference points : None or tuple of float
# If None, no searching is performed
'LOW_REFERENCE_LINE': (0.6, 1.0),# expressed in meter
'MEAN_LOW_WATER': None, # expressed in meter
'MEAN_INTERTIDAL': None, # expressed in meter
'MEAN_HIGH_WATER': None, # expressed in meter
'HIGHEST_ASTRONOMICAL_TIDE': None # expressed in meter
};
###############################################################################
# Point filtering to remove isolated feature points on the raster
FILTERING_POINTS = {
# Activation of the module? : bool
'DO': False,
# Topography feature point type where filter is applied on : str
# options: 'CrestPoints', 'TroughPoints' or 'InflectionPoints'
'BEACH_FEATURE_TYPE': 'CrestPoints',
# Dimensions of the filter window (width, height) in meter : tuple of float
'FILTER_DIMENSIONS_METER': (20.0, 10.0),
# Rotation angle (in degree) of the filtering window: None or float
# (+ in clockwise direction)
# if None, the main orientation of the beach is used (automatically determined)
'FILTER_ROTATION_DEGREE': None,
# Number of iterations the filter should be applied : int
'NUMBER_ITERATIONS_TO_APPLY_FILTER_ON_RASTER': 3,
# Number of points within filter window >= (k/100) * largest dimension of filter window
'THRESHOLD_POINT_COUNT_IN_NEIGHBOURHOOD_PCT': 10, # =k [%] : int
# Number of points within filter window with a topography difference > p (in meter)
# <= (q/100) * largest dimension of filter window
'THRESHOLD_TOPOGRAPHY_DIFFERENCE_IN_NEIGHBOURHOOD': 0.10, # =p [m] : float
'THRESHOLD_TOPOGRAPHY_DIFFERENCE_IN_NEIGHBOURHOOD_POINT_COUNT_PCT': 5 # =q [%] : int
};
###############################################################################
# Allocating a label number to each bar feature point on the raster, with points of the same
# label thought to belong to the same individual intertidal bar / trough
GROUPING_POINTS_IN_LABELS = {
# Activation of the module? : bool
'DO': False,
# Number of times the binary dilation should be repeated for respectively
# crest, trough and inflection points: list of int
'NUMBER_ITERATIONS_DILATION_TO_GROUP_POINTS_METER': [10, 10, 3]
};
# Allocating a cluster label number to each bar feature point on the raster, with points of the
# same cluster label thought to belong to the same cluster of intertidal bars / troughs
# The clustering is based on the DBSCAN algorithm (https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)
# and uses two features to cluster on:
# 1. Topography
# 2. Distance to a reference line parallel to the main direction of the intertidal bars
GROUPING_POINTS_IN_CLUSTERS = {
# Activation of the module? : bool
'DO': True,
# Slope of the reference line to calculate the distance from : float or None
# Orientation (+) in clockwise direction
# If None, the main orientation of the beach is used
'REFERENCE_LINE_SLOPE_DEGREE': None,
# Point on the reference line to calculate the distance from : tuple of float
# Coordinates of the point (Easting, Northing) where the reference line
# should go through. Coordinate Reference System is the same as the raster
'REFERENCE_LINE_POINT_COORDS': (55500, 219100),
# Two main parameters of the DBSCAN clustering algorithm:
# First: eps
# Second: min_samples
# For more information, please take a look at: scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
'DBSCAN_PARAMETERS': (0.10, 10),
# Distance in topography to merge clusters, expressed in meter : float
# If two clusters are separated in topography (median value) within this distance,
# they are grouped together into one single cluster
'MERGE_CLUSTERS_TOPOGRAPHY_INTERVAL': 0.3,
# Percentage of total length of beach a cluster should be : float
# e.g. if parameter is 40 and a beach is 2000 meter in length,
# each cluster of points should be at least 800 meter in length
'MINIMUM_SPATIAL_EXTENT_OF_CLUSTER_PCT': 0
}
###############################################################################
CROSS_SECTION_SEARCH_CHANNEL_FEATURES = {
# bool
'DO': True,
# Orientation of the cross-sections relative to the beach, expressed in degree
# 0 = Parallel to the beach
# 90 = Perpendicular to the beach
'RELATIVE_ORIENTATION_DEGREE': 0,
# Distance in meter between subsequent cross-section profile lines
'INTER-SPACING_METER': 2,
# Minimum length of valid data on the cross-section to take it into account: float
'MINIMUM_LENGTH_CROSSSECTION_METER': 100,
# Minimum width to consider a depression in topography as a channel, expressed in meter
# (measured between the top parts on both sides of the depression)
'MINIMUM_WIDTH_METER': 3, #5
# Minimum depth to consider a depression in topography as a channel , expressed in meter
# (measured from the maximum to the minimum topography of the depression)
'MINIMUM_DEPTH_METER': 0.2,
'FREQUENCY_PLOTTING_CROSSSECTIONS': 10,
}; |
#!/usr/bin/env python
import sys, glob
sys.path.insert(0, './gen-py')
sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0])
from ThriftTest.ttypes import *
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
import unittest
import time
class AbstractTest(unittest.TestCase):
def setUp(self):
self.v1obj = VersioningTestV1(
begin_in_both=12345,
old_string='aaa',
end_in_both=54321,
)
self.v2obj = VersioningTestV2(
begin_in_both=12345,
newint=1,
newbyte=2,
newshort=3,
newlong=4,
newdouble=5.0,
newstruct=Bonk(message="Hello!", type=123),
newlist=[7,8,9],
newset=[42,1,8],
newmap={1:2,2:3},
newstring="Hola!",
end_in_both=54321,
)
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = self.protocol_factory.getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _deserialize(self, objtype, data):
prot = self.protocol_factory.getProtocol(TTransport.TMemoryBuffer(data))
ret = objtype()
ret.read(prot)
return ret
def testForwards(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v1obj))
self.assertEquals(obj.begin_in_both, self.v1obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v1obj.end_in_both)
def testBackwards(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v2obj))
self.assertEquals(obj.begin_in_both, self.v2obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v2obj.end_in_both)
class NormalBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class AcceleratedBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
class AcceleratedFramedTest(unittest.TestCase):
def testSplit(self):
"""Test FramedTransport and BinaryProtocolAccelerated
Tests that TBinaryProtocolAccelerated and TFramedTransport
play nicely together when a read spans a frame"""
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
bigstring = "".join(chr(byte) for byte in range(ord("a"), ord("z")+1))
databuf = TTransport.TMemoryBuffer()
prot = protocol_factory.getProtocol(databuf)
prot.writeI32(42)
prot.writeString(bigstring)
prot.writeI16(24)
data = databuf.getvalue()
cutpoint = len(data)/2
parts = [ data[:cutpoint], data[cutpoint:] ]
framed_buffer = TTransport.TMemoryBuffer()
framed_writer = TTransport.TFramedTransport(framed_buffer)
for part in parts:
framed_writer.write(part)
framed_writer.flush()
self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)
# Recreate framed_buffer so we can read from it.
framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())
framed_reader = TTransport.TFramedTransport(framed_buffer)
prot = protocol_factory.getProtocol(framed_reader)
self.assertEqual(prot.readI32(), 42)
self.assertEqual(prot.readString(), bigstring)
self.assertEqual(prot.readI16(), 24)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(NormalBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedFramedTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
|
# For compat
from .hdemucs import HDemucs
WDemucs = HDemucs
|
from .elasticsearch import ElasticSearch
from . import register
try:
from requests_aws4auth import AWS4Auth
enabled = True
except ImportError:
enabled = False
class AmazonElasticsearchService(ElasticSearch):
@classmethod
def name(cls):
return "Amazon Elasticsearch Service"
@classmethod
def enabled(cls):
return enabled
@classmethod
def type(cls):
return "aws_es"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'server': {
'type': 'string',
'title': 'Endpoint'
},
'region': {
'type': 'string',
},
'access_key': {
'type': 'string',
'title': 'Access Key'
},
'secret_key': {
'type': 'string',
'title': 'Secret Key'
}
},
"secret": ["secret_key"],
"order": ["server", "region", "access_key", "secret_key"],
"required": ['server', 'region', 'access_key', 'secret_key']
}
def __init__(self, configuration):
super(AmazonElasticsearchService, self).__init__(configuration)
self.auth = AWS4Auth(configuration['access_key'], configuration['secret_key'], configuration['region'], 'es')
register(AmazonElasticsearchService)
|
import pandas as pd
import pytest
from operational_analysis.toolkits import reanalysis_downloading as rd
def test_get_dataset_names():
dataset_names = {"merra2": "nasa_merra2_global", "era5": "ecmwf_era5_v2"}
assert rd._get_dataset_names("merra2") == dataset_names["merra2"]
assert rd._get_dataset_names("era5") == dataset_names["era5"]
# Check that minor typos are handled
assert rd._get_dataset_names("MERRA2") == dataset_names["merra2"]
assert rd._get_dataset_names(" era5 ") == dataset_names["era5"]
# Check that invalid dataset names are caught
with pytest.raises(KeyError):
rd._get_dataset_names("ERAI")
def test_default_var_dicts_planetos():
var_dict_merra2 = {"U50M": "u_ms", "V50M": "v_ms", "T2M": "temperature_K", "PS": "surf_pres_Pa"}
var_dict_era5 = {
"eastward_wind_at_100_metres": "u_ms",
"northward_wind_at_100_metres": "v_ms",
"air_temperature_at_2_metres": "temperature_K",
"surface_air_pressure": "surf_pres_Pa",
}
assert rd._get_default_var_dicts_planetos("merra2") == var_dict_merra2
assert rd._get_default_var_dicts_planetos("era5") == var_dict_era5
# Check that minor typos are handled
assert rd._get_default_var_dicts_planetos("MERRA2") == var_dict_merra2
assert rd._get_default_var_dicts_planetos(" era5 ") == var_dict_era5
# Check that invalid dataset names are caught
with pytest.raises(ValueError):
rd._get_default_var_dicts_planetos("ERAI")
def test_get_start_end_dates_planetos():
# Define data set start and end dates from PlanetOS (actual values as of November 2021)
start_date_ds_merra2 = pd.to_datetime("1980-01-01 00:30:00")
end_date_ds_merra2 = pd.to_datetime("2021-08-31 23:30:00")
start_date_ds_era5 = pd.to_datetime("1979-01-01 00:00:00")
end_date_ds_era5 = pd.to_datetime("2021-10-28 18:00:00")
# Number of years of data to download
num_years = 20
# First test when start and end dates are both defined for merra2
# Test dates that are within bounds. The end date should have 1 hour added to it.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="1980-01-01 00:30:00",
end_date="2021-08-31 23:30",
num_years=num_years,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("1980-01-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Test end date that is out of bounds. The end date should be the data set end date with 1 hour added to it.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="1980-01-01 00:30:00",
end_date="2021-08-31 23:31",
num_years=num_years,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("1980-01-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Test start date that is out of bounds.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="1980-01-01 00:29",
end_date="2021-08-31 23:30",
num_years=num_years,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("1980-01-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Test start date that is out of bounds. Since the minute values of the new start date changes, the end date should
# be adjusted to match the new start date minute value so that an integer number of hours is still requested.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="1980-01-01 00:00",
end_date="2021-08-31 23:00",
num_years=num_years,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("1980-01-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Test start and end dates that are out of bounds. The end date should be the data set end date with 1 hour added
# to it.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="1980-01-01 00:29:00",
end_date="2021-09-01 00:00",
num_years=num_years,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("1980-01-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Now test when a start date is defined, but the end date is undefined for merra2
# Test dates that are within bounds. The end date should be 20 years after the start date. Even though 20 years
# after the start date is after the data set end date, it is still allowed because the last time specified is not
# actually retrieved from the data set.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="2001-09-01 00:30",
end_date=None,
num_years=20,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("2001-09-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Test end date that is out of bounds. The end date should be the data set end date with 1 hour added to it.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="2001-09-01 01:00",
end_date=None,
num_years=20,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("2001-09-01 01:00"),
pd.to_datetime("2021-09-01 00:30"),
)
# Test start date that is out of bounds. The start and end dates should be the data set start date and 20 years
# after the data set start date.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="1980-01-01 00:29",
end_date=None,
num_years=20,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("1980-01-01 00:30"),
pd.to_datetime("2000-01-01 00:30"),
)
# Test start and end dates that are out of bounds. The start and end dates should be the data set start date and 1
# hour after the data set end date.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="1980-01-01 00:29",
end_date=None,
num_years=42,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("1980-01-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Now test when an end date is defined, but the start date is undefined for merra2
# Test dates that are within bounds. The end date should be 1 hour after the specified end date and the start date
# should be 20 years before that.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date=None,
end_date="2021-08-31 23:30",
num_years=20,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("2001-09-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Test end date that is out of bounds. The end date should be the data set end date with 1 hour added to it and the
# start date should be 20 years before that.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date=None,
end_date="2021-09-01 00:00",
num_years=20,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("2001-09-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Test start date that is out of bounds. The start and end dates should be the data set start date and 20 years
# after the data set start date.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date=None,
end_date="1999-12-31 23:00",
num_years=20,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("1980-01-01 00:30"),
pd.to_datetime("2000-01-01 00:30"),
)
# Test start and end dates that are out of bounds. The start and end dates should be the data set start date and 1
# hour after the data set end date.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date=None,
end_date="2021-09-01 00:00",
num_years=42,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("1980-01-01 00:30"),
pd.to_datetime("2021-09-01 00:30"),
)
# Now test when neither the start or end dates are defined for era5.
# First test when start date is in bounds. The end date should be 1 hour after the end of the last full month in
# the data set and the start date should be 20 years before that.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date=None,
end_date=None,
num_years=num_years,
start_date_ds=start_date_ds_era5,
end_date_ds=end_date_ds_era5,
)
assert start_end_dates_new == (
pd.to_datetime("2001-10-01 00:00"),
pd.to_datetime("2021-10-01 00:00"),
)
# Now test when start date is out of bounds. The end date should be 1 hour after the end of the last full month in
# the data set and the start date should be the start date of the data set.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date=None,
end_date=None,
num_years=43,
start_date_ds=start_date_ds_era5,
end_date_ds=end_date_ds_era5,
)
assert start_end_dates_new == (
pd.to_datetime("1979-01-01 00:00"),
pd.to_datetime("2021-10-01 00:00"),
)
# Now test when start date is in bounds for era5. The end date should be 1 hour after the end of the last full
# month in the data set and the start date should be 20 years before that. Since the end date of the era5 data set
# is the end of a full month, the end date should be the start of the next month.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date=None,
end_date=None,
num_years=num_years,
start_date_ds=start_date_ds_merra2,
end_date_ds=end_date_ds_merra2,
)
assert start_end_dates_new == (
pd.to_datetime("2001-09-01 00:00"),
pd.to_datetime("2021-09-01 00:00"),
)
# Last, test when start or end dates are on February 29 of a leap year for era5.
# First, test an end date that is Feb. 29 of a leap year with a start date 5 years prior. The new start date should
# be automatically changed to Feb. 28 of that year since it is not a leap year.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date=None,
end_date="2016-02-29 12:00",
num_years=5,
start_date_ds=start_date_ds_era5,
end_date_ds=end_date_ds_era5,
)
assert start_end_dates_new == (
pd.to_datetime("2011-02-28 13:00"),
pd.to_datetime("2016-02-29 13:00"),
)
# Now test a start date that is Feb. 29 of a leap year with an end date 5 years after. The new end date should be
# automatically changed to Feb. 28 of that year since it is not a leap year.
start_end_dates_new = rd._get_start_end_dates_planetos(
start_date="2016-02-29 12:00",
end_date=None,
num_years=5,
start_date_ds=start_date_ds_era5,
end_date_ds=end_date_ds_era5,
)
assert start_end_dates_new == (
pd.to_datetime("2016-02-29 12:00"),
pd.to_datetime("2021-02-28 12:00"),
)
|
#
# Builtin Definitions
#
from __future__ import absolute_import
from .Symtab import BuiltinScope, StructOrUnionScope, CppClassScope
from .Code import UtilityCode
from .TypeSlots import Signature
from . import PyrexTypes
from . import Options
# C-level implementations of builtin types, functions and methods
iter_next_utility_code = UtilityCode.load("IterNext", "ObjectHandling.c")
getattr_utility_code = UtilityCode.load("GetAttr", "ObjectHandling.c")
getattr3_utility_code = UtilityCode.load("GetAttr3", "Builtins.c")
pyexec_utility_code = UtilityCode.load("PyExec", "Builtins.c")
pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c")
globals_utility_code = UtilityCode.load("Globals", "Builtins.c")
builtin_utility_code = {
'StopAsyncIteration': UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"),
}
# mapping from builtins to their C-level equivalents
class _BuiltinOverride(object):
def __init__(self, py_name, args, ret_type, cname, py_equiv="*",
utility_code=None, sig=None, func_type=None,
is_strict_signature=False, builtin_return_type=None,
nogil=None):
self.py_name, self.cname, self.py_equiv = py_name, cname, py_equiv
self.args, self.ret_type = args, ret_type
self.func_type, self.sig = func_type, sig
self.builtin_return_type = builtin_return_type
self.is_strict_signature = is_strict_signature
self.utility_code = utility_code
self.nogil = nogil
def build_func_type(self, sig=None, self_arg=None):
if sig is None:
sig = Signature(self.args, self.ret_type, nogil=self.nogil)
sig.exception_check = False # not needed for the current builtins
func_type = sig.function_type(self_arg)
if self.is_strict_signature:
func_type.is_strict_signature = True
if self.builtin_return_type:
func_type.return_type = builtin_types[self.builtin_return_type]
return func_type
class BuiltinAttribute(object):
def __init__(self, py_name, cname=None, field_type=None, field_type_name=None):
self.py_name = py_name
self.cname = cname or py_name
self.field_type_name = field_type_name # can't do the lookup before the type is declared!
self.field_type = field_type
def declare_in_type(self, self_type):
if self.field_type_name is not None:
# lazy type lookup
field_type = builtin_scope.lookup(self.field_type_name).type
else:
field_type = self.field_type or PyrexTypes.py_object_type
entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private')
entry.is_variable = True
class BuiltinFunction(_BuiltinOverride):
def declare_in_scope(self, scope):
func_type, sig = self.func_type, self.sig
if func_type is None:
func_type = self.build_func_type(sig)
scope.declare_builtin_cfunction(self.py_name, func_type, self.cname,
self.py_equiv, self.utility_code)
class BuiltinMethod(_BuiltinOverride):
def declare_in_type(self, self_type):
method_type, sig = self.func_type, self.sig
if method_type is None:
# override 'self' type (first argument)
self_arg = PyrexTypes.CFuncTypeArg("", self_type, None)
self_arg.not_none = True
self_arg.accept_builtin_subtypes = True
method_type = self.build_func_type(sig, self_arg)
self_type.scope.declare_builtin_cfunction(
self.py_name, method_type, self.cname, utility_code=self.utility_code)
builtin_function_table = [
# name, args, return, C API func, py equiv = "*"
BuiltinFunction('abs', "d", "d", "fabs",
is_strict_signature=True, nogil=True),
BuiltinFunction('abs', "f", "f", "fabsf",
is_strict_signature=True, nogil=True),
BuiltinFunction('abs', "i", "i", "abs",
is_strict_signature=True, nogil=True),
BuiltinFunction('abs', "l", "l", "labs",
is_strict_signature=True, nogil=True),
BuiltinFunction('abs', None, None, "__Pyx_abs_longlong",
utility_code = UtilityCode.load("abs_longlong", "Builtins.c"),
func_type = PyrexTypes.CFuncType(
PyrexTypes.c_longlong_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None)
],
is_strict_signature = True, nogil=True)),
] + list(
BuiltinFunction('abs', None, None, "/*abs_{0}*/".format(t.specialization_name()),
func_type = PyrexTypes.CFuncType(
t,
[PyrexTypes.CFuncTypeArg("arg", t, None)],
is_strict_signature = True, nogil=True))
for t in (PyrexTypes.c_uint_type, PyrexTypes.c_ulong_type, PyrexTypes.c_ulonglong_type)
) + list(
BuiltinFunction('abs', None, None, "__Pyx_c_abs{0}".format(t.funcsuffix),
func_type = PyrexTypes.CFuncType(
t.real_type, [
PyrexTypes.CFuncTypeArg("arg", t, None)
],
is_strict_signature = True, nogil=True))
for t in (PyrexTypes.c_float_complex_type,
PyrexTypes.c_double_complex_type,
PyrexTypes.c_longdouble_complex_type)
) + [
BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute",
utility_code=UtilityCode.load("py_abs", "Builtins.c")),
#('all', "", "", ""),
#('any', "", "", ""),
#('ascii', "", "", ""),
#('bin', "", "", ""),
BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check",
utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")),
#('chr', "", "", ""),
#('cmp', "", "", "", ""), # int PyObject_Cmp(PyObject *o1, PyObject *o2, int *result)
#('compile', "", "", ""), # PyObject* Py_CompileString( char *str, char *filename, int start)
BuiltinFunction('delattr', "OO", "r", "PyObject_DelAttr"),
BuiltinFunction('dir', "O", "O", "PyObject_Dir"),
BuiltinFunction('divmod', "OO", "O", "PyNumber_Divmod"),
BuiltinFunction('exec', "O", "O", "__Pyx_PyExecGlobals",
utility_code = pyexec_globals_utility_code),
BuiltinFunction('exec', "OO", "O", "__Pyx_PyExec2",
utility_code = pyexec_utility_code),
BuiltinFunction('exec', "OOO", "O", "__Pyx_PyExec3",
utility_code = pyexec_utility_code),
#('eval', "", "", ""),
#('execfile', "", "", ""),
#('filter', "", "", ""),
BuiltinFunction('getattr3', "OOO", "O", "__Pyx_GetAttr3", "getattr",
utility_code=getattr3_utility_code), # Pyrex legacy
BuiltinFunction('getattr', "OOO", "O", "__Pyx_GetAttr3",
utility_code=getattr3_utility_code),
BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr",
utility_code=getattr_utility_code),
BuiltinFunction('hasattr', "OO", "b", "__Pyx_HasAttr",
utility_code = UtilityCode.load("HasAttr", "Builtins.c")),
BuiltinFunction('hash', "O", "h", "PyObject_Hash"),
#('hex', "", "", ""),
#('id', "", "", ""),
#('input', "", "", ""),
BuiltinFunction('intern', "O", "O", "__Pyx_Intern",
utility_code = UtilityCode.load("Intern", "Builtins.c")),
BuiltinFunction('isinstance', "OO", "b", "PyObject_IsInstance"),
BuiltinFunction('issubclass', "OO", "b", "PyObject_IsSubclass"),
BuiltinFunction('iter', "OO", "O", "PyCallIter_New"),
BuiltinFunction('iter', "O", "O", "PyObject_GetIter"),
BuiltinFunction('len', "O", "z", "PyObject_Length"),
BuiltinFunction('locals', "", "O", "__pyx_locals"),
#('map', "", "", ""),
#('max', "", "", ""),
#('min', "", "", ""),
BuiltinFunction('next', "O", "O", "__Pyx_PyIter_Next",
utility_code = iter_next_utility_code), # not available in Py2 => implemented here
BuiltinFunction('next', "OO", "O", "__Pyx_PyIter_Next2",
utility_code = iter_next_utility_code), # not available in Py2 => implemented here
#('oct', "", "", ""),
#('open', "ss", "O", "PyFile_FromString"), # not in Py3
] + [
BuiltinFunction('ord', None, None, "__Pyx_long_cast",
func_type=PyrexTypes.CFuncType(
PyrexTypes.c_long_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
is_strict_signature=True))
for c_type in [PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type]
] + [
BuiltinFunction('ord', None, None, "__Pyx_uchar_cast",
func_type=PyrexTypes.CFuncType(
PyrexTypes.c_uchar_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
is_strict_signature=True))
for c_type in [PyrexTypes.c_char_type, PyrexTypes.c_schar_type, PyrexTypes.c_uchar_type]
] + [
BuiltinFunction('ord', None, None, "__Pyx_PyObject_Ord",
utility_code=UtilityCode.load_cached("object_ord", "Builtins.c"),
func_type=PyrexTypes.CFuncType(
PyrexTypes.c_long_type, [
PyrexTypes.CFuncTypeArg("c", PyrexTypes.py_object_type, None)
],
exception_value="(long)(Py_UCS4)-1")),
BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"),
BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2",
utility_code = UtilityCode.load("pow2", "Builtins.c")),
#('range', "", "", ""),
#('raw_input', "", "", ""),
#('reduce', "", "", ""),
BuiltinFunction('reload', "O", "O", "PyImport_ReloadModule"),
BuiltinFunction('repr', "O", "O", "PyObject_Repr"), # , builtin_return_type='str'), # add in Cython 3.1
#('round', "", "", ""),
BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"),
#('sum', "", "", ""),
#('sorted', "", "", ""),
#('type', "O", "O", "PyObject_Type"),
BuiltinFunction('unichr', "l", "O", "PyUnicode_FromOrdinal", builtin_return_type='unicode'),
#('unicode', "", "", ""),
#('vars', "", "", ""),
#('zip', "", "", ""),
# Can't do these easily until we have builtin type entries.
#('typecheck', "OO", "i", "PyObject_TypeCheck", False),
#('issubtype', "OO", "i", "PyType_IsSubtype", False),
# Put in namespace append optimization.
BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"),
# This is conditionally looked up based on a compiler directive.
BuiltinFunction('__Pyx_Globals', "", "O", "__Pyx_Globals",
utility_code=globals_utility_code),
]
# Builtin types
# bool
# buffer
# classmethod
# dict
# enumerate
# file
# float
# int
# list
# long
# object
# property
# slice
# staticmethod
# super
# str
# tuple
# type
# xrange
builtin_types_table = [
("type", "PyType_Type", []),
# This conflicts with the C++ bool type, and unfortunately
# C++ is too liberal about PyObject* <-> bool conversions,
# resulting in unintuitive runtime behavior and segfaults.
# ("bool", "PyBool_Type", []),
("int", "PyInt_Type", []),
("long", "PyLong_Type", []),
("float", "PyFloat_Type", []),
("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'),
BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type),
BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type),
]),
("basestring", "PyBaseString_Type", [
BuiltinMethod("join", "TO", "T", "__Pyx_PyBaseString_Join",
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("bytearray", "PyByteArray_Type", [
]),
("bytes", "PyBytes_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("join", "TO", "O", "__Pyx_PyBytes_Join",
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("str", "PyString_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join",
builtin_return_type='basestring',
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("unicode", "PyUnicode_Type", [BuiltinMethod("__contains__", "TO", "b", "PyUnicode_Contains"),
BuiltinMethod("join", "TO", "T", "PyUnicode_Join"),
]),
("tuple", "PyTuple_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
]),
("list", "PyList_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("insert", "TzO", "r", "PyList_Insert"),
BuiltinMethod("reverse", "T", "r", "PyList_Reverse"),
BuiltinMethod("append", "TO", "r", "__Pyx_PyList_Append",
utility_code=UtilityCode.load("ListAppend", "Optimize.c")),
BuiltinMethod("extend", "TO", "r", "__Pyx_PyList_Extend",
utility_code=UtilityCode.load("ListExtend", "Optimize.c")),
]),
("dict", "PyDict_Type", [BuiltinMethod("__contains__", "TO", "b", "PyDict_Contains"),
BuiltinMethod("has_key", "TO", "b", "PyDict_Contains"),
BuiltinMethod("items", "T", "O", "__Pyx_PyDict_Items",
utility_code=UtilityCode.load("py_dict_items", "Builtins.c")),
BuiltinMethod("keys", "T", "O", "__Pyx_PyDict_Keys",
utility_code=UtilityCode.load("py_dict_keys", "Builtins.c")),
BuiltinMethod("values", "T", "O", "__Pyx_PyDict_Values",
utility_code=UtilityCode.load("py_dict_values", "Builtins.c")),
BuiltinMethod("iteritems", "T", "O", "__Pyx_PyDict_IterItems",
utility_code=UtilityCode.load("py_dict_iteritems", "Builtins.c")),
BuiltinMethod("iterkeys", "T", "O", "__Pyx_PyDict_IterKeys",
utility_code=UtilityCode.load("py_dict_iterkeys", "Builtins.c")),
BuiltinMethod("itervalues", "T", "O", "__Pyx_PyDict_IterValues",
utility_code=UtilityCode.load("py_dict_itervalues", "Builtins.c")),
BuiltinMethod("viewitems", "T", "O", "__Pyx_PyDict_ViewItems",
utility_code=UtilityCode.load("py_dict_viewitems", "Builtins.c")),
BuiltinMethod("viewkeys", "T", "O", "__Pyx_PyDict_ViewKeys",
utility_code=UtilityCode.load("py_dict_viewkeys", "Builtins.c")),
BuiltinMethod("viewvalues", "T", "O", "__Pyx_PyDict_ViewValues",
utility_code=UtilityCode.load("py_dict_viewvalues", "Builtins.c")),
BuiltinMethod("clear", "T", "r", "__Pyx_PyDict_Clear",
utility_code=UtilityCode.load("py_dict_clear", "Optimize.c")),
BuiltinMethod("copy", "T", "T", "PyDict_Copy")]),
("slice", "PySlice_Type", [BuiltinAttribute('start'),
BuiltinAttribute('stop'),
BuiltinAttribute('step'),
]),
# ("file", "PyFile_Type", []), # not in Py3
("set", "PySet_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("clear", "T", "r", "PySet_Clear"),
# discard() and remove() have a special treatment for unhashable values
BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard",
utility_code=UtilityCode.load("py_set_discard", "Optimize.c")),
BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove",
utility_code=UtilityCode.load("py_set_remove", "Optimize.c")),
# update is actually variadic (see Github issue #1645)
# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update",
# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")),
BuiltinMethod("add", "TO", "r", "PySet_Add"),
BuiltinMethod("pop", "T", "O", "PySet_Pop")]),
("frozenset", "PyFrozenSet_Type", []),
("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []),
("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []),
]
types_that_construct_their_instance = set([
# some builtin types do not always return an instance of
# themselves - these do:
'type', 'bool', 'long', 'float', 'complex',
'bytes', 'unicode', 'bytearray',
'tuple', 'list', 'dict', 'set', 'frozenset'
# 'str', # only in Py3.x
# 'file', # only in Py2.x
])
builtin_structs_table = [
('Py_buffer', 'Py_buffer',
[("buf", PyrexTypes.c_void_ptr_type),
("obj", PyrexTypes.py_object_type),
("len", PyrexTypes.c_py_ssize_t_type),
("itemsize", PyrexTypes.c_py_ssize_t_type),
("readonly", PyrexTypes.c_bint_type),
("ndim", PyrexTypes.c_int_type),
("format", PyrexTypes.c_char_ptr_type),
("shape", PyrexTypes.c_py_ssize_t_ptr_type),
("strides", PyrexTypes.c_py_ssize_t_ptr_type),
("suboffsets", PyrexTypes.c_py_ssize_t_ptr_type),
("smalltable", PyrexTypes.CArrayType(PyrexTypes.c_py_ssize_t_type, 2)),
("internal", PyrexTypes.c_void_ptr_type),
]),
('Py_complex', 'Py_complex',
[('real', PyrexTypes.c_double_type),
('imag', PyrexTypes.c_double_type),
])
]
# inject cyobject
def inject_cy_object(self):
global cy_object_type
def init_scope(scope):
scope.is_cpp_class_scope = 1
scope.is_cyp_class_scope = 1
scope.inherited_var_entries = []
scope.inherited_type_entries = []
cy_object_scope = CppClassScope("CyObject", self, None)
init_scope(cy_object_scope)
cy_object_type = PyrexTypes.cy_object_type
cy_object_scope.type = PyrexTypes.cy_object_type
cy_object_type.set_scope(cy_object_scope)
cy_object_entry = self.declare("CyObject", "CyObject", cy_object_type, None, "extern")
cy_object_entry.is_type = 1
# inject acthon interfaces
def inject_acthon_interfaces(self):
global acthon_result_type, acthon_message_type, acthon_sync_type, acthon_queue_type, acthon_activable_type
def init_scope(scope):
scope.is_cpp_class_scope = 1
scope.is_cyp_class_scope = 1
scope.inherited_var_entries = []
scope.inherited_type_entries = []
# cypclass ActhonResultInterface(CyObject):
# void pushVoidStarResult(void* result){}
# void* getVoidStarResult(){}
# void pushIntResult(int result){}
# int getIntResult(){}
# operator int() { return this->getIntResult(); }
result_scope = CppClassScope("ActhonResultInterface", self, None)
init_scope(result_scope)
acthon_result_type = result_type = PyrexTypes.CypClassType(
"ActhonResultInterface", result_scope, "ActhonResultInterface", (PyrexTypes.cy_object_type,),
activable=False)
result_scope.type = result_type
#result_type.set_scope is required because parent_type is used when doing scope inheritance
result_type.set_scope(result_scope)
result_entry = self.declare("ActhonResultInterface", "ActhonResultInterface", result_type, None, "extern")
result_entry.is_type = 1
result_pushVoidStar_arg_type = PyrexTypes.CFuncTypeArg("result", PyrexTypes.c_void_ptr_type, None)
result_pushVoidStar_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [result_pushVoidStar_arg_type], nogil = 1)
result_pushVoidStar_entry = result_scope.declare("pushVoidStarResult", "pushVoidStarResult",
result_pushVoidStar_type, None, "extern")
result_pushVoidStar_entry.is_cfunction = 1
result_pushVoidStar_entry.is_variable = 1
result_scope.var_entries.append(result_pushVoidStar_entry)
result_getVoidStar_type = PyrexTypes.CFuncType(PyrexTypes.c_void_ptr_type, [], nogil = 1)
result_getVoidStar_type.is_const_method = 1
result_getVoidStar_entry = result_scope.declare("getVoidStarResult", "getVoidStarResult",
result_getVoidStar_type, None, "extern")
result_getVoidStar_entry.is_cfunction = 1
result_getVoidStar_entry.is_variable = 1
result_scope.var_entries.append(result_getVoidStar_entry)
result_pushInt_arg_type = PyrexTypes.CFuncTypeArg("result", PyrexTypes.c_int_type, None)
result_pushInt_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [result_pushInt_arg_type], nogil = 1)
result_pushInt_entry = result_scope.declare("pushIntResult", "pushIntResult",
result_pushInt_type, None, "extern")
result_pushInt_entry.is_cfunction = 1
result_pushInt_entry.is_variable = 1
result_scope.var_entries.append(result_pushInt_entry)
result_getInt_type = PyrexTypes.CFuncType(PyrexTypes.c_int_type, [], nogil = 1)
result_getInt_type.is_const_method = 1
result_getInt_entry = result_scope.declare("getIntResult", "getIntResult",
result_getInt_type, None, "extern")
result_getInt_entry.is_cfunction = 1
result_getInt_entry.is_variable = 1
result_scope.var_entries.append(result_getInt_entry)
result_int_typecast_type = PyrexTypes.CFuncType(PyrexTypes.c_int_type, [], nogil = 1)
result_int_typecast_entry = result_scope.declare("operator int", "operator int",
result_int_typecast_type, None, "extern")
result_int_typecast_entry.is_cfunction = 1
result_int_typecast_entry.is_variable = 1
result_scope.var_entries.append(result_int_typecast_entry)
result_voidStar_typecast_type = PyrexTypes.CFuncType(PyrexTypes.c_void_ptr_type, [], nogil = 1)
result_voidStar_typecast_entry = result_scope.declare("operator void *", "operator void *",
result_voidStar_typecast_type, None, "extern")
result_voidStar_typecast_entry.is_cfunction = 1
result_voidStar_typecast_entry.is_variable = 1
result_scope.var_entries.append(result_voidStar_typecast_entry)
# cypclass ActhonMessageInterface
message_scope = CppClassScope("ActhonMessageInterface", self, None)
init_scope(message_scope)
acthon_message_type = message_type = PyrexTypes.CypClassType(
"ActhonMessageInterface", message_scope, "ActhonMessageInterface", (PyrexTypes.cy_object_type,),
activable=False)
message_type.set_scope(message_scope)
message_scope.type = message_type
# cypclass ActhonSyncInterface(CyObject):
# bool isActivable(){}
# bool isCompleted(){}
# void insertActivity(ActhonMessageInterface msg){}
# void removeActivity(ActhonMessageInterface msg){}
sync_scope = CppClassScope("ActhonSyncInterface", self, None)
init_scope(sync_scope)
acthon_sync_type = sync_type = PyrexTypes.CypClassType(
"ActhonSyncInterface", sync_scope, "ActhonSyncInterface", (PyrexTypes.cy_object_type,),
activable=False)
sync_type.set_scope(sync_scope)
sync_scope.type = sync_type
sync_entry = self.declare("ActhonSyncInterface", "ActhonSyncInterface", sync_type, None, "extern")
sync_entry.is_type = 1
sync_isActivable_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
sync_isActivable_type.is_const_method = 1
sync_isActivable_entry = sync_scope.declare("isActivable", "isActivable",
sync_isActivable_type, None, "extern")
sync_isActivable_entry.is_cfunction = 1
sync_isActivable_entry.is_variable = 1
sync_scope.var_entries.append(sync_isActivable_entry)
sync_isCompleted_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
sync_isCompleted_type.is_const_method = 1
sync_isCompleted_entry = sync_scope.declare("isCompleted", "isCompleted",
sync_isCompleted_type, None, "extern")
sync_isCompleted_entry.is_cfunction = 1
sync_isCompleted_entry.is_variable = 1
sync_scope.var_entries.append(sync_isCompleted_entry)
sync_insertActivity_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [], nogil = 1)
sync_removeActivity_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [], nogil = 1)
sync_insertActivity_entry = sync_scope.declare("insertActivity", "insertActivity",
sync_insertActivity_type, None, "extern")
sync_insertActivity_entry.is_cfunction = 1
sync_insertActivity_entry.is_variable = 1
sync_scope.var_entries.append(sync_insertActivity_entry)
sync_removeActivity_entry = sync_scope.declare("removeActivity", "removeActivity",
sync_removeActivity_type, None, "extern")
sync_removeActivity_entry.is_cfunction = 1
sync_removeActivity_entry.is_variable = 1
sync_scope.var_entries.append(sync_removeActivity_entry)
# cypclass ActhonMessageInterface(CyObject):
# ActhonSyncInterface _sync_method
# ActhonResultInterface _result
# bool activate(){}
message_entry = self.declare("ActhonMessageInterface", "ActhonMessageInterface", message_type, None, "extern")
message_entry.is_type = 1
message_sync_attr_entry = message_scope.declare("_sync_method", "_sync_method",
PyrexTypes.cyp_class_qualified_type(sync_type, 'lock'), None, "extern")
message_sync_attr_entry.is_variable = 1
message_scope.var_entries.append(message_sync_attr_entry)
message_result_attr_entry = message_scope.declare("_result", "_result",
PyrexTypes.cyp_class_qualified_type(result_type, 'lock'), None, "extern")
message_result_attr_entry.is_variable = 1
message_scope.var_entries.append(message_result_attr_entry)
message_activate_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
message_activate_entry = message_scope.declare("activate", "activate",
message_activate_type, None, "extern")
message_activate_entry.is_cfunction = 1
message_activate_entry.is_variable = 1
message_scope.var_entries.append(message_activate_entry)
# cypclass ActhonQueueInterface(CyObject):
# void push(ActhonMessageInterface message){}
# bool activate(){}
queue_scope = CppClassScope("ActhonQueueInterface", self, None)
init_scope(queue_scope)
acthon_queue_type = queue_type = PyrexTypes.CypClassType(
"ActhonQueueInterface", queue_scope, "ActhonQueueInterface", (PyrexTypes.cy_object_type,),
activable=False)
queue_type.set_scope(queue_scope)
queue_scope.type = queue_type
queue_entry = self.declare("ActhonQueueInterface", "ActhonQueueInterface", queue_type, self, "extern")
queue_entry.is_type = 1
queue_msg_arg = PyrexTypes.CFuncTypeArg("msg", message_type, None)
queue_push_type = PyrexTypes.CFuncType(PyrexTypes.c_void_type, [queue_msg_arg], nogil = 1, self_qualifier = 'locked')
queue_push_entry = queue_scope.declare("push", "push", queue_push_type,
None, "extern")
queue_push_entry.is_cfunction = 1
queue_push_entry.is_variable = 1
queue_scope.var_entries.append(queue_push_entry)
queue_activate_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
queue_activate_entry = queue_scope.declare("activate", "activate",
queue_activate_type, None, "extern")
queue_activate_entry.is_cfunction = 1
queue_activate_entry.is_variable = 1
queue_scope.var_entries.append(queue_activate_entry)
queue_is_empty_type = PyrexTypes.CFuncType(PyrexTypes.c_bint_type, [], nogil = 1)
queue_is_empty_type.is_const_method = 1
queue_is_empty_entry = queue_scope.declare("is_empty", "is_empty",
queue_is_empty_type, None, "extern")
queue_is_empty_entry.is_cfunction = 1
queue_is_empty_entry.is_variable = 1
queue_scope.var_entries.append(queue_is_empty_entry)
# cdef cypclass ActivableClass:
# ResultInterface (*_active_result_class)()
# QueueInterface _active_queue_class
activable_scope = CppClassScope("ActhonActivableClass", self, None)
init_scope(activable_scope)
acthon_activable_type = activable_type = PyrexTypes.CypClassType(
"ActhonActivableClass", activable_scope, "ActhonActivableClass", (PyrexTypes.cy_object_type,),
activable=False)
activable_type.set_scope(activable_scope)
activable_entry = self.declare("ActhonActivableClass", None, activable_type, "ActhonActivableClass", "extern")
activable_entry.is_type = 1
activable_result_attr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(result_entry.type, []))
activable_result_attr_entry = activable_scope.declare("_active_result_class", "_active_result_class",
activable_result_attr_type, None, "extern")
activable_result_attr_entry.is_variable = 1
activable_scope.var_entries.append(activable_result_attr_entry)
activable_queue_attr_entry = activable_scope.declare("_active_queue_class", "_active_queue_class",
PyrexTypes.cyp_class_qualified_type(queue_type, 'lock'), None, "extern")
activable_queue_attr_entry.is_variable = 1
activable_scope.var_entries.append(activable_queue_attr_entry)
# set up builtin scope
builtin_scope = BuiltinScope()
def init_builtin_funcs():
for bf in builtin_function_table:
bf.declare_in_scope(builtin_scope)
builtin_types = {}
def init_builtin_types():
global builtin_types
for name, cname, methods in builtin_types_table:
utility = builtin_utility_code.get(name)
if name == 'frozenset':
objstruct_cname = 'PySetObject'
elif name == 'bytearray':
objstruct_cname = 'PyByteArrayObject'
elif name == 'bool':
objstruct_cname = None
elif name == 'Exception':
objstruct_cname = "PyBaseExceptionObject"
elif name == 'StopAsyncIteration':
objstruct_cname = "PyBaseExceptionObject"
else:
objstruct_cname = 'Py%sObject' % name.capitalize()
the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname)
builtin_types[name] = the_type
for method in methods:
method.declare_in_type(the_type)
def init_builtin_structs():
for name, cname, attribute_types in builtin_structs_table:
scope = StructOrUnionScope(name)
for attribute_name, attribute_type in attribute_types:
scope.declare_var(attribute_name, attribute_type, None,
attribute_name, allow_pyobject=True)
builtin_scope.declare_struct_or_union(
name, "struct", scope, 1, None, cname = cname)
def inject_cypclass_refcount_macros():
incref_type = PyrexTypes.CFuncType(
PyrexTypes.c_void_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None)
],
nogil = 1)
decref_type = PyrexTypes.CFuncType(
PyrexTypes.c_void_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.CReferenceType(PyrexTypes.const_cy_object_type), None)
],
nogil = 1)
getref_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None)
],
nogil = 1)
for macro, macro_type in [("Cy_INCREF", incref_type), ("Cy_DECREF", decref_type), ("Cy_XDECREF", decref_type), ("Cy_GETREF", getref_type)]:
builtin_scope.declare_builtin_cfunction(macro, macro_type, macro)
def inject_cypclass_lock_macros():
blocking_macro_type = PyrexTypes.CFuncType(
PyrexTypes.c_void_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None)
],
nogil = 1)
for macro in ("Cy_RLOCK", "Cy_WLOCK", "Cy_UNWLOCK", "Cy_UNRLOCK"):
builtin_scope.declare_builtin_cfunction(macro, blocking_macro_type, macro)
nonblocking_macro_type = PyrexTypes.CFuncType(PyrexTypes.c_int_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None)
],
nogil = 1)
for macro in ("Cy_TRYRLOCK", "Cy_TRYWLOCK"):
builtin_scope.declare_builtin_cfunction(macro, nonblocking_macro_type, macro)
def inject_cypclass_typecheck_functions():
template_placeholder_type = PyrexTypes.TemplatePlaceholderType("T")
isinstanceof_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type,
[
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.const_cy_object_type, None),
PyrexTypes.CFuncTypeArg("type", template_placeholder_type, None)
],
nogil = 1,
templates = [template_placeholder_type]
)
builtin_scope.declare_builtin_cfunction("isinstanceof", isinstanceof_type, "isinstanceof")
def init_builtins():
init_builtin_structs()
init_builtin_types()
init_builtin_funcs()
builtin_scope.declare_var(
'__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type),
pos=None, cname='(!Py_OptimizeFlag)', is_cdef=True)
global list_type, tuple_type, dict_type, set_type, frozenset_type
global bytes_type, str_type, unicode_type, basestring_type, slice_type
global float_type, bool_type, type_type, complex_type, bytearray_type
type_type = builtin_scope.lookup('type').type
list_type = builtin_scope.lookup('list').type
tuple_type = builtin_scope.lookup('tuple').type
dict_type = builtin_scope.lookup('dict').type
set_type = builtin_scope.lookup('set').type
frozenset_type = builtin_scope.lookup('frozenset').type
slice_type = builtin_scope.lookup('slice').type
bytes_type = builtin_scope.lookup('bytes').type
str_type = builtin_scope.lookup('str').type
unicode_type = builtin_scope.lookup('unicode').type
basestring_type = builtin_scope.lookup('basestring').type
bytearray_type = builtin_scope.lookup('bytearray').type
float_type = builtin_scope.lookup('float').type
bool_type = builtin_scope.lookup('bool').type
complex_type = builtin_scope.lookup('complex').type
inject_cypclass_refcount_macros()
inject_cypclass_lock_macros()
inject_cypclass_typecheck_functions()
inject_acthon_interfaces(builtin_scope)
inject_cy_object(builtin_scope)
init_builtins()
|
"""
wordpress_username_enumeration.py
Copyright 2011 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import re
import w3af.core.controllers.output_manager as om
import w3af.core.data.kb.knowledge_base as kb
from w3af.core.controllers.plugins.crawl_plugin import CrawlPlugin
from w3af.core.controllers.exceptions import RunOnce
from w3af.core.controllers.core_helpers.fingerprint_404 import is_404
from w3af.core.data.kb.info import Info
class wordpress_enumerate_users(CrawlPlugin):
"""
Finds users in a WordPress installation.
:author: Andres Tarantini ( atarantini@gmail.com )
"""
def __init__(self):
CrawlPlugin.__init__(self)
# Internal variables
self._exec = True
def crawl(self, fuzzable_request):
"""
Find users in a WordPress installation
:param fuzzable_request: A fuzzable_request instance that contains
(among other things) the URL to test.
"""
if not self._exec:
raise RunOnce()
# Check if there is a wordpress installation in this directory
domain_path = fuzzable_request.get_url().get_domain_path()
wp_unique_url = domain_path.url_join('wp-login.php')
response = self._uri_opener.GET(wp_unique_url, cache=True)
if is_404(response):
return
self._enum_users(fuzzable_request)
def _enum_users(self, fuzzable_request):
# Only run once
self._exec = False
# First user ID, will be incremented until 404
uid = 0
# Save the last title for non-redirection scenario
self._title_cache = ''
# Tolerance for user ID gaps in the sequence (this gaps are present
# when users are deleted and new users created)
gap_tolerance = 10
gap = 0
domain_path = fuzzable_request.get_url().get_domain_path()
# Loop into authors and increment user ID
while gap <= gap_tolerance:
uid += 1
gap += 1
domain_path.querystring = [(u'author', [u'%s' % uid])]
wp_author_url = domain_path
response_author = self._uri_opener.GET(wp_author_url, cache=True)
if is_404(response_author):
continue
if response_author.was_redirected():
extracted_from_redir = self._extract_from_redir(response_author)
if extracted_from_redir:
gap = 0
continue
extracted_from_body = self._extract_from_body(response_author)
if extracted_from_body:
gap = 0
continue
def _extract_from_body(self, response_author):
"""No redirect was made, try to fetch username from
title of the author's archive page"""
# Example strings:
# <title>admin | moth</title>
# <title>admin | Bonsai - Information Security Blog</title>
title_search = re.search('<title>(.*?)</title>',
response_author.get_body(), re.I)
if title_search:
title = title_search.group(1)
# If the title is the same than the last user
# ID requested, there are no new users
if title == self._title_cache:
return False
else:
# The title changed, username probably found
self._title_cache = title
username = title.split(' ')[0]
self._kb_info_user(response_author.get_url(),
response_author.id, username)
return True
return False
def _extract_from_redir(self, response_author):
path = response_author.get_redir_uri().get_path()
if 'author' in path:
# A redirect to /author/<username> was made, username probably found
username = path.split("/")[-2]
self._kb_info_user(response_author.get_uri(),
response_author.id, username)
return True
return False
def _kb_info_user(self, url, response_id, username):
"""
Put user in Kb
:return: None, everything is saved in kb
"""
desc = 'WordPress user "%s" found during username enumeration.'
desc = desc % username
i = Info('Identified WordPress user', desc, response_id,
self.get_name())
i.set_url(url)
kb.kb.append(self, 'users', i)
om.out.information(i.get_desc())
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
This plugin finds user names in WordPress installations.
The author's archive page is tried using "?author=ID" query and
incrementing the ID for each request until a 404 response is received.
If the response is a redirect, the blog is affected by TALSOFT-2011-0526
(http://seclists.org/fulldisclosure/2011/May/493) advisory. If no
redirect is done, the plugin will try to fetch the username from title.
"""
|
from django.db import models
class YearsOfExperience(models.Model):
text = models.CharField(max_length=100)
def __str__(self):
return self.text
|
import pytest
from rotkehlchen.exchanges.coinbasepro import Coinbasepro
from rotkehlchen.tests.utils.factories import make_api_key, make_api_secret
class MockCoinbasepro(Coinbasepro):
pass
@pytest.fixture(scope='session')
def coinbasepro_passphrase():
return 'supersecretpassphrase'
@pytest.fixture(scope='session')
def coinbasepro(session_database, session_inquirer, messages_aggregator):
mock = MockCoinbasepro(
api_key=make_api_key(),
secret=make_api_secret(),
database=session_database,
msg_aggregator=messages_aggregator,
)
return mock
@pytest.fixture(scope='function')
def function_scope_coinbasepro(
database,
inquirer, # pylint: disable=unused-argument,
function_scope_messages_aggregator,
coinbasepro_passphrase,
):
mock = MockCoinbasepro(
api_key=make_api_key(),
secret=make_api_secret(),
database=database,
msg_aggregator=function_scope_messages_aggregator,
passphrase=coinbasepro_passphrase,
)
return mock
|
# -*- coding: utf-8 -*-
"""
Benchmarking script to make sure the porting of the code to Python
produces correct results and compare the execution times.
Based on example: Commands/Steps.py
Created on Sun Apr 19 13:18:33 2020
@author: Leonard.Doyle
"""
import numpy as np
import matplotlib.pyplot as plt
from LightPipes import tictoc
from LightPipes.units import * #m, mm, ...
from LightPipes import plotutils
import LightPipes as lp
"""reference LightPipes (Cpp) renamed and installed with "setup.py develop" as
oldLightPipes"""
import oldLightPipes as olp
#******** Simulation parameters *************
wavelength=632.8*nm
size=4*mm
N=200
R=1*mm
dz=10*mm
Nsteps=100
f=50*cm
n=(1.0 + 0.1j)*np.ones((N,N))
# n[int(N/4):int(N*3/4),int(N/4):int(N*5/8)] = 1.5 #create asymmtery to see
# if code accidentally flips anything etc.
X=range(N)
Z=range(Nsteps)
X, Z=np.meshgrid(X,Z)
def system(lib):
"""The optical system run by LightPipes library lib."""
Icross=np.zeros((Nsteps,N))
F = lib.Begin(size, wavelength, N)
F = lib.CircAperture(R,0,0,F)
F = lib.Lens(f,0,0,F)
t_tot = 0.0
for i in range(Nsteps):
tictoc.tic()
# with tictoc.printtimer('1 Steps'):
F = lib.Steps(dz,1,n,F)
t_tot += tictoc.toc()
I = lib.Intensity(0,F)
Icross[i,:] = I[int(N/2)][:] #center lineout
print(t_tot)
return F, Icross
#********* Run for new python LP *******
F, Icross_lp = system(lp)
I = lp.Intensity(0, F)
phi = lp.Phase(F)
#****** Run for reference cpp OLP *******
F_o, Icross_olp = system(olp)
I_o = np.asarray(olp.Intensity(0, F_o))
phi_o = np.asarray(olp.Phase(F_o))
#*********** Plot results *******************
fig, [ax1, ax2] = plt.subplots(1, 2, sharex=True, sharey=True)
ax1.imshow(Icross_lp)
im2 = ax2.imshow(Icross_lp- Icross_olp)
plt.colorbar(im2, ax=ax2)
Idiff = I - I_o
phidiff = phi - phi_o
fig=plt.figure(figsize=(15,6))
ax1 = fig.add_subplot(231)
ax1.set_title('I')
p1 = ax1.imshow(I,cmap='rainbow'); ax1.axis('off')
fig.colorbar(p1, ax=ax1)
ax2 = fig.add_subplot(232, sharex = ax1, sharey = ax1)
ax2.set_title('I_o')
p2 = ax2.imshow(I_o, cmap='rainbow'); ax2.axis('off')
fig.colorbar(p2, ax=ax2)
ax3 = fig.add_subplot(233, sharex = ax1, sharey = ax1)
ax3.set_title('I - I_o')
p3 = ax3.imshow(Idiff, cmap='rainbow'); ax2.axis('off')
fig.colorbar(p3, ax=ax3)
ax4 = fig.add_subplot(234, sharex = ax1, sharey = ax1)
ax4.set_title('Phi')
p4 = ax4.imshow(phi, cmap='rainbow'); ax3.axis('off')
fig.colorbar(p4, ax=ax4)
ax5 = fig.add_subplot(235, sharex = ax1, sharey = ax1)
ax5.set_title('Phi_o')
p5 = ax5.imshow(phi_o, cmap='rainbow'); ax3.axis('off')
fig.colorbar(p5, ax=ax5)
ax6 = fig.add_subplot(236, sharex = ax1, sharey = ax1)
ax6.set_title('Phi - Phi_olp')
p6 = ax6.imshow(phidiff, cmap='rainbow'); ax4.axis('off')
fig.colorbar(p6, ax=ax6)
plt.show()
|
# Copyright (C) 2017 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: Gerald Hempel, Andres Goens
import sys
import numpy as np
from hydra.utils import instantiate
from mocasin.representations import MappingRepresentation
import mocasin.util.random_distributions.lp as lp
from mocasin.util import logging
log = logging.getLogger(__name__)
class Volume(object):
def __init__(self):
log.debug("create default volume")
def adapt(vol):
log.debug("adapt volume")
return vol
def shrink(vol):
log.debug("shrink volume")
return vol
class Cube(Volume):
def __init__(
self,
graph,
platform,
representation,
center,
radius=1.0,
max_step=10,
max_pe=16,
):
# define initial cube with radius 1 at the given center
self.center = center.to_list()
self.radius = radius
self.dim = len(center)
# https://stackoverflow.com/questions/4984647/accessing-dict-keys-like-an-attribute
def adapt_center(self, s_set):
fs_set = list(map(lambda s: s.sample, s_set.get_feasible()))
if not fs_set:
return self.center
# take mean of feasible points as new center
m = np.mean(fs_set, axis=0)
self.center = np.around(m).tolist()
return self.center
def correct_center(self, s_set, center, old_center):
# shortest points to center
d_cur = list(map(lambda s: [s.dist(center), s], s_set.get_feasible()))
d_cur = sorted(d_cur, key=lambda x: x[0])
nearest_samples = []
for s in d_cur:
if s[0] == d_cur[0][0]:
nearest_samples.append(s[1])
# take (first) shortest point to old center from that result
d_old = list(
map(lambda s: [s.dist(old_center), s], s_set.get_feasible())
)
d_old = sorted(d_old, key=lambda x: x[0])
for s in d_old:
if s[1] in nearest_samples:
return s[1].sample
return None
def adapt_volume(self, s_set, target_p, s_val):
fs_set = list(map(lambda s: s.sample, s_set.get_feasible()))
# adjust radius
p = len(s_set.get_feasible()) / len(s_set.sample_set)
log.debug("---------- adapt_volume() -----------")
log.debug(
"p-factors: {} {}".format(
s_set.get_feasible(), len(s_set.sample_set)
)
)
if p >= target_p:
# simple adaptation: cube does not support shape adaption
log.debug(
"extend at p: {:f} target_p {:f} r: {:f}".format(
p, target_p, self.radius
)
)
self.extend(s_val)
else:
log.debug(
"shrink at p: {:f} target_p {:f} r: {:f}".format(
p, target_p, self.radius
)
)
self.shrink(s_val)
return p
def shrink(self, step):
# shink volume by one on each border
self.radius = self.radius - 1 if (self.radius - 1 > 0) else self.radius
def extend(self, step):
# extend volume by one on each border
self.radius = (
self.radius + step * self.max_step
if (self.radius + step * self.max_step < self.max_pe)
else self.radius
)
class LPVolume(Volume):
def __init__(
self,
graph,
platform,
representation,
center,
radius,
adaptable_center_weights=True,
aggressive_center_movement=False,
adapt_samples=0,
):
# This is a workaround until Hydra 1.1 (with recursive instantiaton!)
if not issubclass(type(type(representation)), MappingRepresentation):
representation = instantiate(representation, graph, platform)
self.representation = representation
self.graph = graph
self.platform = platform
self.adaptable_center_weights = adaptable_center_weights
self.adapt_samples = adapt_samples
self.aggressive_center_movement = aggressive_center_movement
self.center = np.array(self.representation.toRepresentation(center))
log.debug(f"Initializing center with representation:{self.center}")
self.old_center = self.center
self.radius = radius
self.dim = len(self.center)
self.true_dim = len(graph.processes())
if not hasattr(self.representation, "p"):
raise RuntimeError("Representation does not have a norm")
self.norm_p = representation.p
self.weight_center = 1 / (np.exp(1) * self.dim)
self.rk1_learning_constant = 1 / np.sqrt(self.true_dim)
self.rk1_vec = np.zeros(self.dim)
self.transformation = np.identity(self.dim) * self.radius ** 2
self.adapt_covariance()
def update_factors(self, p, num_samples):
self.learning_rate = 0.6 / (
(self.true_dim + 1.3) ** 2 + p * num_samples
) # Beta
self.expansion_factor = 1 + (self.learning_rate * (1 - p)) # f_e
self.contraction_factor = 1 - (self.learning_rate * p) # f_c
def adapt_center(self, s_set):
# all feas. samples in s_set
fs_set = list(map(lambda s: s.sample, s_set.get_feasible()))
if fs_set == []:
return self.center
# take mean of feasible points to add weighted to the old center
num_feasible = len(fs_set) # mu
if self.adaptable_center_weights:
self.weight_center = min(
0.5, num_feasible / (np.exp(1) * self.true_dim)
)
if self.aggressive_center_movement:
self.weight_center = 0.51
mean_center = np.mean(fs_set, axis=0)
mean_center_approx = self.representation.approximate(mean_center)
log.debug("mean mapping {}".format(mean_center))
new_center_vec = (
1 - self.weight_center
) * self.center + self.weight_center * np.array(mean_center_approx)
vector_of_distances = [
lp.p_norm(self.center - v, self.norm_p) for v in fs_set
]
if min(vector_of_distances) <= 0:
log.warning("DC points did not move.")
# approximate center
new_center = self.representation.approximate(new_center_vec)
dist1 = lp.p_norm(mean_center_approx - self.center, self.norm_p)
if np.allclose(dist1, 0):
log.warning("DC mean center unchanged.")
else:
log.info(f"DC mean center moved by {dist1}")
self.old_center = self.center
self.center = np.array(new_center)
dist2 = lp.p_norm(self.old_center - self.center, self.norm_p)
if np.allclose(dist2, 0):
log.warning("DC Center unchanged.")
else:
log.info(f"DC center moved by {dist2}")
return self.center
def correct_center(self, s_set, center, old_center):
log.error(
"This function (correct_center) is deprecated and should not be called."
)
sys.exit(-1)
def adapt_volume(self, s_set, target_p, s_val):
fs_set = list(map(lambda s: s.sample, s_set.get_feasible()))
# adjust radius
num_feasible = len(s_set.get_feasible())
num_samples = len(s_set.sample_set)
assert num_samples <= self.adapt_samples or log.error(
f"number of samples produced ({num_samples}) exceeds self.configuration ({self.adapt_samples})"
)
self.update_factors(target_p, num_samples)
if num_feasible != 0:
p_emp = num_feasible / num_samples
else:
p_emp = 0
log.debug("---------- adapt_volume() -----------")
self.adapt_radius(num_feasible, num_samples)
self.adapt_transformation(s_set)
return p_emp
def adapt_radius(self, num_feasible, num_samples):
factor = (
self.expansion_factor ** num_feasible
* self.contraction_factor ** (num_samples - num_feasible)
)
if factor > 1:
log.debug(f"extend radius {self.radius} by factor: {factor}")
else:
log.debug(f"shrink radius {self.radius} by factor: {factor}")
# print(f"radius: {self.radius} with p_emp = {num_feasible/num_samples} yields factor {factor}")
self.radius = self.radius * factor
def adapt_transformation(self, s_set):
"""
This function adapts the transformation matrix of the ball around the center.
It assumes it is called *after* adapt_center!
"""
feasible = s_set.get_feasible()
num_feasible = len(feasible)
if num_feasible == 0:
return
centers = self.center - self.old_center
centers_factor = np.sqrt(
self.rk1_learning_constant * (2 - self.rk1_learning_constant)
)
self.rk1_vec = (1 - self.rk1_learning_constant) * self.rk1_vec
if np.dot(centers, centers.transpose()) != 0:
centers_alpha = 1 / np.sqrt(np.dot(centers, centers))
self.rk1_vec += centers_factor * centers_alpha * centers
rank_one_update = np.array(self.rk1_vec).transpose() @ np.array(
self.rk1_vec
)
rank_mu_update = np.zeros([self.dim, self.dim])
try:
Qinv = np.linalg.inv(self.covariance)
except np.linalg.LinAlgError:
Qinv = np.identity(self.dim)
arnorm = dict()
for j, X in enumerate(feasible):
V = Qinv @ (np.array(X.sample2tuple()) - self.old_center)
# TODO: look up the alphas in original implementation, as not described in paper
arnorm[j] = np.sqrt(np.dot(V, V))
if arnorm[j] != 0:
arnorm[j] = 1 / arnorm[j]
else:
arnorm[j] = 0
for j, X in enumerate(feasible):
alphai = np.sqrt(self.dim) * min(
np.median(np.array(list(arnorm.values()))), 2.0 * arnorm[j]
)
rank_1_matrix = np.array(V).transpose() @ np.array(V)
rank_mu_update += 1 / num_feasible * alphai * rank_1_matrix
rk_1_weight = 0.6 / ((self.true_dim + 1.3) ** 2 + num_feasible)
rk_mu_weight = (
0.04
* (num_feasible - 2 + (1 / num_feasible))
/ ((self.dim + 2) ** 2 + 0.2 * num_feasible)
)
self.transformation = (
1 - rk_1_weight - rk_mu_weight
) * self.transformation
self.transformation += rk_1_weight * rank_one_update
self.transformation += rk_mu_weight * rank_mu_update
self.adapt_covariance()
def adapt_covariance(self):
vals, vecs = np.linalg.eig(self.transformation)
idx = (
vals.argsort()
) # why would I sort them? #Josefine does in her matlab implementation...
vals_sqrt_diag = np.sqrt(vals[idx])
norm = np.prod(vals_sqrt_diag ** (1 / self.dim))
vals_sqrt_diag = vals_sqrt_diag * 1 / norm
Q = vecs[idx] * vals_sqrt_diag
# Q @ Q.transpose() is approx. self.transformation (modulo norm)
self.covariance = Q
norm = np.abs(np.linalg.det(self.covariance))
cnt = 0
while not np.allclose(norm, 1, atol=0.1 ** (11 - cnt)) and cnt < 10:
log.warning(f"covariance matrix not normed ({norm}), retrying.")
norm = np.abs(np.linalg.det(self.covariance))
cnt += 1
self.covariance = np.real(
1 / (norm ** (1 / self.dim)) * self.covariance
)
if not np.allclose(norm, 1, atol=0.1 ** (11 - cnt)):
log.warning(
f"failed to norm ({norm}) covariance matrix. Resetting to identity"
)
self.transformation = np.identity(self.dim) * self.radius ** 2
self.covariance = np.identity(self.dim)
# def draw_volume_projection(self,coordinates):
# assert(len(coordinates) == 2)
|
#!/usr/bin/env python
import logging
import os
from json.decoder import JSONDecodeError
from typing import Dict, List, Optional, Union
import requests
from gevent.lock import Semaphore
from rotkehlchen import typing
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.utils import rlk_jsondumps, rlk_jsonloads
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def data_up_todate(json_data: dict, start_ts: typing.Timestamp, end_ts: typing.Timestamp) -> bool:
if 'data' not in json_data or 'start_time' not in json_data or 'end_time' not in json_data:
return False
start_ts_ok = (
(start_ts is not None and json_data['start_time'] is not None) and
start_ts >= json_data['start_time']
)
end_ts_ok = (
end_ts is not None and json_data['end_time'] is not None and
end_ts <= json_data['end_time']
)
return start_ts_ok and end_ts_ok
class Exchange(object):
def __init__(
self,
name: str,
api_key: typing.ApiKey,
secret: typing.ApiSecret,
user_directory: typing.FilePath,
):
assert isinstance(api_key, typing.T_ApiKey), (
'api key for {} should be a bytestring'.format(name)
)
assert isinstance(secret, typing.T_ApiSecret), (
'secret for {} should be a bytestring'.format(name)
)
self.name = name
self.user_directory = user_directory
self.api_key = api_key
self.secret = secret
self.first_connection_made = False
self.session = requests.session()
self.lock = Semaphore()
self.results_cache: dict = {}
self.session.headers.update({'User-Agent': 'rotkehlchen'})
log.info(f'Initialized {name} exchange')
def _get_cachefile_name(self, special_name: str = None) -> str:
if special_name is None:
return os.path.join(self.user_directory, "%s_trades.json" % self.name)
else:
return os.path.join(self.user_directory, "%s_%s.json" % (self.name, special_name))
def check_trades_cache(
self,
start_ts: typing.Timestamp,
end_ts: typing.Timestamp,
special_name: str = None,
) -> Optional[Union[List, Dict]]:
trades_file = self._get_cachefile_name(special_name)
trades: dict = dict()
if os.path.isfile(trades_file):
with open(trades_file, 'r') as f:
try:
trades = rlk_jsonloads(f.read())
except JSONDecodeError:
pass
# no need to query again
if data_up_todate(trades, start_ts, end_ts):
return trades['data']
return None
def update_trades_cache(
self,
data: Union[List, Dict],
start_ts: typing.Timestamp,
end_ts: typing.Timestamp,
special_name: str = None,
) -> None:
trades_file = self._get_cachefile_name(special_name)
trades: Dict[str, Union[typing.Timestamp, List, Dict]] = dict()
with open(trades_file, 'w') as f:
trades['start_time'] = start_ts
trades['end_time'] = end_ts
trades['data'] = data
f.write(rlk_jsondumps(trades))
def query_balances(self):
"""Returns the balances held in the exchange in the following format:
{
'name' : {'amount': 1337, 'usd_value': 42},
'ICN': {'amount': 42, 'usd_value': 1337}
}
The name must be the canonical name used by rotkehlchen
"""
raise NotImplementedError("query_balances should only be implemented by subclasses")
def query_deposits_withdrawals(
self,
start_ts: typing.Timestamp,
end_ts: typing.Timestamp,
end_at_least_ts: typing.Timestamp,
):
raise NotImplementedError(
'query_deposits_withdrawals should only be implemented by subclasses',
)
def first_connection(self):
"""Performs actions that should be done in the first time coming online
and attempting to query data from an exchange.
"""
raise NotImplementedError('first_connection() should only be implemented by subclasses')
def validate_api_key(self):
"""Tries to make the simplest private api query to the exchange in order to
verify the api key's validity"""
raise NotImplementedError('validate_api_key() should only be implemented by subclasses')
|
"""New table for notes
Revision ID: 6fe71f4f1aba
Revises: 9c829c41652e
Create Date: 2021-03-21 11:33:27.533851
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "6fe71f4f1aba"
down_revision = "9c829c41652e"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"notes",
sa.Column("rodne_cislo", sa.String(length=10), nullable=False),
sa.Column("note", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("rodne_cislo"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("notes")
# ### end Alembic commands ###
|
import os.path
class Arguments(object):
def __init__(self,
json_dict):
self.positionals = json_dict["positionals"] if "positionals" in json_dict else []
self.options = json_dict["options"] if "options" in json_dict else {}
@property
def to_list(self):
result = []
for positional in self.positionals:
result.append('"{}"'.format(positional))
for key, value in self.options.items():
result.append('--{}="{}"'.format(key, value) if value else '--{}'.format(key))
return result
class Experiment(object):
def __init__(self,
json,
name,
description):
# Name (kind) of the experiment
self.name = name
self.description = description
self.command_pathname = json["command_pathname"]
self.arguments = Arguments(json["arguments"]) if "arguments" in json else Arguments({})
self.max_duration = json["max_duration"] if "max_duration" in json else None
self.max_tree_depth = json["max_tree_depth"] if "max_tree_depth" in json else None
self.nr_time_steps = json["nr_time_steps"]
self.program_name = os.path.basename(self.command_pathname)
def to_json(self):
result = {
"command_pathname": self.command_pathname,
"nr_time_steps": self.nr_time_steps,
}
if self.max_duration:
result["max_duration"] = self.max_duration
if self.max_tree_depth:
result["max_tree_depth"] = self.max_tree_depth
return result
@property
def argument_list(self):
return self.arguments.to_list
def workspace_pathname(self,
cluster_name,
scenario_name):
"""
Return pathname of directory in which or below which all
experiment results must be stored
"""
return os.path.join(
os.path.abspath(cluster_name),
self.program_name,
scenario_name,
self.name)
def result_pathname(self,
cluster_name,
scenario_name,
basename,
extension):
return os.path.join(
self.workspace_pathname(cluster_name, scenario_name),
"{}.{}".format(basename, extension))
|
"""
pdb.py
Open pdb files
"""
import numpy as np
def open_pdb(file_location):
"""
Open and read the pdb file from a .pdb file.
The pdb file must specify the atom elements in the last column and follow the conventions outlines in the PDB format specification
Parameters
----------
file location : str
The file location of the .pdb file
Returns
-------
Symbols : list
The atomic symbols of the elements from the pdb file.
Coordinates : np.ndarray
The coordinates of the elements from the pdb file.
"""
# This function reads in a pdb file and returns the atom names and coordinates.
with open(file_location) as f:
data = f.readlines()
coordinates = []
symbols = []
for line in data:
if 'ATOM' in line[0:6] or 'HETATM' in line[0:6]:
symbols.append(line[76:79].strip())
atom_coordinates = [float(x) for x in line[30:55].split()]
coordinates.append(atom_coordinates)
# Convert list to numpy array
coordinates = np.array(coordinates)
return symbols, coordinates
|
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import object_session, relationship
from sqlalchemy.sql.functions import current_timestamp
from .Base import Base
from .SnapshotAssociation import SnapshotAssociation
from .Symbol import Symbol
from .Timestamp import Timestamp
from .Version import Version
class Snapshot(Base):
__tablename__ = "snapshots"
id = Column(Integer, primary_key=True)
name = Column(String, index=True, nullable=False)
timestamp = Column(Timestamp(timezone=True), server_default=current_timestamp())
library_id = Column(Integer, ForeignKey("libraries.id"), nullable=False)
library = relationship("Library", back_populates="snapshots")
versions = relationship("SnapshotAssociation", back_populates="snapshot")
__table_args__ = (UniqueConstraint("library_id", "name"),)
def get_symbols(self):
session = object_session(self)
associations = session.query(SnapshotAssociation).with_parent(self)
versions = session.query(Version).join(associations.subquery())
symbols = session.query(Symbol).join(versions.subquery())
symbols = symbols.all()
return symbols
def get_versions(self):
session = object_session(self)
associations = session.query(SnapshotAssociation).with_parent(self)
versions = session.query(Version).join(associations.subquery())
versions = versions.all()
return versions
def get_version_of_symbol(self, symbol):
session = object_session(self)
if isinstance(symbol, str):
symbol = (
session.query(Symbol)
.filter(Symbol.library_id == self.library_id and Symbol.name == symbol)
.one()
)
associations = session.query(SnapshotAssociation).with_parent(self)
version = (
session.query(Version)
.with_parent(symbol)
.join(associations.subquery())
.one()
)
return version
def delete(self):
session = object_session(self)
associations = session.query(SnapshotAssociation).with_parent(self)
associations.delete()
session.delete(self)
|
#!/usr/bin/env python3
# encoding=utf8
"""Android UI Automate"""
import uiautomator2
import ppadb.client
class RemoteAndroidUI:
def __init__(self):
self._adb = ppadb.client.Client()
self._devices_dict = {}
self.get_devices()
self._remote_ui: uiautomator2.Device or None = None
@property
def adb(self):
return self._adb
@property
def devices(self):
return self._devices_dict
@property
def remote_ui(self):
return self._remote_ui
def get_devices(self) -> dict:
d = {}
for device in self.adb.devices():
d[device.serial] = device
self._devices_dict = d
return d
def add_remote(self, addr: str) -> bool:
host, port = addr.split(':', maxsplit=1)
ok = self.adb.remote_connect(host, int(port))
if ok:
self.get_devices()
return ok
def sel_remote_ui(self, addr: str) -> bool:
if addr not in self.devices:
if not self.add_remote(addr):
return False
self._remote_ui = uiautomator2.connect(addr)
return True
def click_object(self, prop: str, value: str, timeout=None, offset=None) -> bool:
if self.remote_ui is None:
return False
uio = self.find_object(prop, value)
if uio:
try:
uio.click(timeout=timeout, offset=offset)
return True
except uiautomator2.exceptions.UiObjectNotFoundError:
return False
else:
return False
def find_object(self, prop: str, value: str, timeout=None) -> uiautomator2.UiObject or None:
if self.remote_ui is None:
return None
uio = self.remote_ui(**{prop: value})
try:
uio.must_wait(timeout=timeout)
return uio
except uiautomator2.exceptions.UiObjectNotFoundError:
return None
|
#!/usr/bin/env python
# -*- coding: utf-8
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
# Timothy Berkelbach <tim.berkelbach@gmail.com>
#
import sys
import json
import ctypes
import warnings
import numpy as np
import scipy.linalg
import scipy.misc
import scipy.special
import scipy.optimize
import pyscf.lib.parameters as param
from pyscf import lib
from pyscf import dft
from pyscf.lib import logger
from pyscf.gto import mole
from pyscf.gto import moleintor
from pyscf.gto.mole import _symbol, _rm_digit, _atom_symbol, _std_symbol, _charge
from pyscf.gto.mole import conc_env, uncontract
from pyscf.pbc.gto import basis
from pyscf.pbc.gto import pseudo
from pyscf.pbc.tools import pbc as pbctools
from pyscf.gto.basis import ALIAS as MOLE_ALIAS
# For code compatiblity in python-2 and python-3
if sys.version_info >= (3,):
unicode = str
libpbc = lib.load_library('libpbc')
def M(**kwargs):
r'''This is a shortcut to build up Cell object.
Examples:
>>> from pyscf.pbc import gto
>>> cell = gto.M(a=numpy.eye(3)*4, atom='He 1 1 1', basis='6-31g', gs=[10]*3)
'''
cell = Cell()
cell.build(**kwargs)
return cell
C = M
def format_pseudo(pseudo_tab):
r'''Convert the input :attr:`Cell.pseudo` (dict) to the internal data format::
{ atom: ( (nelec_s, nele_p, nelec_d, ...),
rloc, nexp, (cexp_1, cexp_2, ..., cexp_nexp),
nproj_types,
(r1, nproj1, ( (hproj1[1,1], hproj1[1,2], ..., hproj1[1,nproj1]),
(hproj1[2,1], hproj1[2,2], ..., hproj1[2,nproj1]),
...
(hproj1[nproj1,1], hproj1[nproj1,2], ... ) )),
(r2, nproj2, ( (hproj2[1,1], hproj2[1,2], ..., hproj2[1,nproj1]),
... ) )
)
... }
Args:
pseudo_tab : dict
Similar to :attr:`Cell.pseudo` (a dict), it **cannot** be a str
Returns:
Formatted :attr:`~Cell.pseudo`
Examples:
>>> pbc.format_pseudo({'H':'gth-blyp', 'He': 'gth-pade'})
{'H': [[1],
0.2, 2, [-4.19596147, 0.73049821], 0],
'He': [[2],
0.2, 2, [-9.1120234, 1.69836797], 0]}
'''
fmt_pseudo = {}
for atom in pseudo_tab:
symb = _symbol(atom)
rawsymb = _rm_digit(symb)
stdsymb = _std_symbol(rawsymb)
symb = symb.replace(rawsymb, stdsymb)
if isinstance(pseudo_tab[atom], (str, unicode)):
fmt_pseudo[symb] = pseudo.load(str(pseudo_tab[atom]), stdsymb)
else:
fmt_pseudo[symb] = pseudo_tab[atom]
return fmt_pseudo
def make_pseudo_env(cell, _atm, _pseudo, pre_env=[]):
for ia, atom in enumerate(cell._atom):
symb = atom[0]
if symb in _pseudo:
_atm[ia,0] = sum(_pseudo[symb][0])
_pseudobas = None
return _atm, _pseudobas, pre_env
def format_basis(basis_tab):
'''Convert the input :attr:`Cell.basis` to the internal data format::
{ atom: (l, kappa, ((-exp, c_1, c_2, ..), nprim, nctr, ptr-exps, ptr-contraction-coeff)), ... }
Args:
basis_tab : dict
Similar to :attr:`Cell.basis`, it **cannot** be a str
Returns:
Formated :attr:`~Cell.basis`
Examples:
>>> pbc.format_basis({'H':'gth-szv'})
{'H': [[0,
(8.3744350009, -0.0283380461),
(1.8058681460, -0.1333810052),
(0.4852528328, -0.3995676063),
(0.1658236932, -0.5531027541)]]}
'''
def convert(basis_name, symb):
if basis_name.lower().startswith('unc'):
return uncontract(basis.load(basis_name[3:], symb))
else:
return basis.load(basis_name, symb)
fmt_basis = {}
for atom in basis_tab.keys():
symb = _atom_symbol(atom)
stdsymb = _std_symbol(symb)
if stdsymb.startswith('GHOST-'):
stdsymb = stdsymb[6:]
atom_basis = basis_tab[atom]
if isinstance(atom_basis, (str, unicode)):
if 'gth' in atom_basis:
bset = convert(str(atom_basis), symb)
else:
bset = atom_basis
else:
bset = []
for rawb in atom_basis:
if isinstance(rawb, (str, unicode)) and 'gth' in rawb:
bset.append(convert(str(rawb), stdsymb))
else:
bset.append(rawb)
fmt_basis[symb] = bset
return mole.format_basis(fmt_basis)
def copy(cell):
'''Deepcopy of the given :class:`Cell` object
'''
import copy
newcell = mole.copy(cell)
newcell._pseudo = copy.deepcopy(cell._pseudo)
return newcell
def pack(cell):
'''Pack the input args of :class:`Cell` to a dict, which can be serialized
with :mod:`pickle`
'''
cldic = mole.pack(cell)
cldic['a'] = cell.a
cldic['gs'] = cell.gs
cldic['precision'] = cell.precision
cldic['pseudo'] = cell.pseudo
cldic['ke_cutoff'] = cell.ke_cutoff
cldic['rcut'] = cell.rcut
cldic['ew_eta'] = cell.ew_eta
cldic['ew_cut'] = cell.ew_cut
cldic['dimension'] = cell.dimension
return cldic
def unpack(celldic):
'''Convert the packed dict to a :class:`Cell` object, to generate the
input arguments for :class:`Cell` object.
'''
cl = Cell()
cl.__dict__.update(celldic)
return cl
def dumps(cell):
'''Serialize Cell object to a JSON formatted str.
'''
exclude_keys = set(('output', 'stdout', '_keys'))
celldic = dict(cell.__dict__)
for k in exclude_keys:
del(celldic[k])
for k in celldic:
if isinstance(celldic[k], np.ndarray):
celldic[k] = celldic[k].tolist()
celldic['atom'] = repr(cell.atom)
celldic['basis']= repr(cell.basis)
celldic['pseudo'] = repr(cell.pseudo)
celldic['ecp'] = repr(cell.ecp)
try:
return json.dumps(celldic)
except TypeError:
def skip_value(dic):
dic1 = {}
for k,v in dic.items():
if (v is None or
isinstance(v, (str, unicode, bool, int, long, float))):
dic1[k] = v
elif isinstance(v, (list, tuple)):
dic1[k] = v # Should I recursively skip_vaule?
elif isinstance(v, set):
dic1[k] = list(v)
elif isinstance(v, dict):
dic1[k] = skip_value(v)
else:
msg =('Function cell.dumps drops attribute %s because '
'it is not JSON-serializable' % k)
warnings.warn(msg)
return dic1
return json.dumps(skip_value(celldic), skipkeys=True)
def loads(cellstr):
'''Deserialize a str containing a JSON document to a Cell object.
'''
from numpy import array # for eval function
celldic = json.loads(cellstr)
if sys.version_info < (3,):
# Convert to utf8 because JSON loads fucntion returns unicode.
def byteify(inp):
if isinstance(inp, dict):
return dict([(byteify(k), byteify(v)) for k, v in inp.iteritems()])
elif isinstance(inp, (tuple, list)):
return [byteify(x) for x in inp]
elif isinstance(inp, unicode):
return inp.encode('utf-8')
else:
return inp
celldic = byteify(celldic)
cell = Cell()
cell.__dict__.update(celldic)
cell.atom = eval(cell.atom)
cell.basis = eval(cell.basis)
cell.pseudo = eval(cell.pseudo)
cell.pseudo = eval(cell.ecp)
cell._atm = np.array(cell._atm, dtype=np.int32)
cell._bas = np.array(cell._bas, dtype=np.int32)
cell._env = np.array(cell._env, dtype=np.double)
cell._ecpbas = np.array(cell._ecpbas, dtype=np.int32)
return cell
def intor_cross(intor, cell1, cell2, comp=1, hermi=0, kpts=None, kpt=None):
r'''1-electron integrals from two cells like
.. math::
\langle \mu | intor | \nu \rangle, \mu \in cell1, \nu \in cell2
'''
intor = moleintor.ascint3(intor)
if kpts is None:
if kpt is not None:
kpts_lst = np.reshape(kpt, (1,3))
else:
kpts_lst = np.zeros((1,3))
else:
kpts_lst = np.reshape(kpts, (-1,3))
nkpts = len(kpts_lst)
atm, bas, env = conc_env(cell1._atm, cell1._bas, cell1._env,
cell2._atm, cell2._bas, cell2._env)
atm = np.asarray(atm, dtype=np.int32)
bas = np.asarray(bas, dtype=np.int32)
env = np.asarray(env, dtype=np.double)
natm = len(atm)
nbas = len(bas)
shls_slice = (0, cell1.nbas, cell1.nbas, nbas)
ao_loc = moleintor.make_loc(bas, intor)
ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]
nj = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]
out = np.empty((nkpts,comp,ni,nj), dtype=np.complex128)
if hermi == 0:
aosym = 's1'
else:
aosym = 's2'
fill = getattr(libpbc, 'PBCnr2c_fill_k'+aosym)
fintor = getattr(moleintor.libcgto, intor)
intopt = lib.c_null_ptr()
Ls = cell1.get_lattice_Ls(rcut=max(cell1.rcut, cell2.rcut))
expkL = np.asarray(np.exp(1j*np.dot(kpts_lst, Ls.T)), order='C')
drv = libpbc.PBCnr2c_drv
drv(fintor, fill, out.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nkpts), ctypes.c_int(comp), ctypes.c_int(len(Ls)),
Ls.ctypes.data_as(ctypes.c_void_p),
expkL.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(*(shls_slice[:4])),
ao_loc.ctypes.data_as(ctypes.c_void_p), intopt,
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas),
env.ctypes.data_as(ctypes.c_void_p))
mat = []
for k, kpt in enumerate(kpts_lst):
v = out[k]
if hermi != 0:
for ic in range(comp):
lib.hermi_triu(v[ic], hermi=hermi, inplace=True)
if comp == 1:
v = v[0]
if abs(kpt).sum() < 1e-9: # gamma_point
v = v.real
mat.append(v)
if kpts is None or np.shape(kpts) == (3,): # A single k-point
mat = mat[0]
return mat
def get_nimgs(cell, precision=None):
r'''Choose number of basis function images in lattice sums
to include for given precision in overlap, using
precision ~ \int r^l e^{-\alpha r^2} (r-rcut)^l e^{-\alpha (r-rcut)^2}
~ (rcut^2/(2\alpha))^l e^{\alpha/2 rcut^2}
where \alpha is the smallest exponent in the basis. Note
that assumes an isolated exponent in the middle of the box, so
it adds one additional lattice vector to be safe.
'''
if precision is None:
precision = cell.precision
rcut = max([cell.bas_rcut(ib, precision) for ib in range(cell.nbas)])
# nimgs determines the supercell size
nimgs = cell.get_bounding_sphere(rcut)
return nimgs
def _estimate_rcut(alpha, l, c, precision=1e-8):
C = (c**2+1e-200)*(2*l+1)*alpha / precision
r0 = 20
r0 = np.sqrt(max(0, 2*np.log(C*(r0**2*alpha)**(l+1)).max()) / alpha)
rcut = np.sqrt(max(0, 2*np.log(C*(r0**2*alpha)**(l+1)).max()) / alpha)
return rcut
def bas_rcut(cell, bas_id, precision=1e-8):
r'''Estimate the largest distance between the function and its image to
reach the precision in overlap
precision ~ \int g(r-0) g(r-R)
'''
l = cell.bas_angular(bas_id)
es = cell.bas_exp(bas_id)
cs = abs(cell.bas_ctr_coeff(bas_id)).max(axis=1)
rcut = _estimate_rcut(es, l, cs, precision)
return rcut.max()
def _estimate_ke_cutoff(alpha, l, c, precision=1e-8, weight=1.):
'''Energy cutoff estimation'''
log_k0 = 2.5 + np.log(alpha) / 2
l2fac2 = scipy.misc.factorial2(l*2+1)
log_rest = np.log(precision*l2fac2**2*(4*alpha)**(l*2+1) / (32*np.pi**2*c**4*weight))
Ecut = 2*alpha * (log_k0*(4*l+3) - log_rest)
Ecut[Ecut<0] = 1e-10
log_k0 = .5 * np.log(Ecut*2)
Ecut = 2*alpha * (log_k0*(4*l+3) - log_rest)
return Ecut.max()
def estimate_ke_cutoff(cell, precision=1e-8):
'''Energy cutoff estimation'''
b = cell.reciprocal_vectors()
if cell.dimension == 0:
w = 1
elif cell.dimension == 1:
w = np.linalg.norm(b[0]) / (2*np.pi)
elif cell.dimension == 2:
w = np.linalg.norm(np.cross(b[0], b[1])) / (2*np.pi)**2
else:
w = abs(np.linalg.det(b)) / (2*np.pi)**3
Ecut_max = 0
for i in range(cell.nbas):
l = cell.bas_angular(i)
es = cell.bas_exp(i)
cs = abs(cell.bas_ctr_coeff(i)).max(axis=1)
Ecut_max = max(Ecut_max, _estimate_ke_cutoff(es, l, cs, precision, w))
return Ecut_max
def error_for_ke_cutoff(cell, ke_cutoff):
b = cell.reciprocal_vectors()
if cell.dimension == 0:
w = 1
elif cell.dimension == 1:
w = np.linalg.norm(b[0]) / (2*np.pi)
elif cell.dimension == 2:
w = np.linalg.norm(np.cross(b[0], b[1])) / (2*np.pi)**2
else:
w = abs(np.linalg.det(b)) / (2*np.pi)**3
kmax = np.sqrt(ke_cutoff*2)
errmax = 0
for i in range(cell.nbas):
l = cell.bas_angular(i)
es = cell.bas_exp(i)
cs = abs(cell.bas_ctr_coeff(i)).max(axis=1)
fac = 64*np.pi**2*cs**4*w / scipy.misc.factorial2(l*2+1)**2
efac = np.exp(-ke_cutoff/(2*es))
if 0:
ka = scipy.misc.factorial2(l*4+3) * (2*es)**(2*l+2)
err0 = np.sqrt(np.pi*es)*scipy.special.erfc(kmax/np.sqrt(4*es)) * ka
ka *= efac * kmax
for m in range(l*2+2):
err0 += ka
ka *= ke_cutoff / (es * (2*m+3))
errmax = max(errmax, (fac/(4*es)**(2*l+2)*err0).max())
else:
err1 = .5*fac/(4*es)**(2*l+1) * kmax**(4*l+3) * efac
errmax = max(errmax, err1.max())
if np.any(ke_cutoff < 5*es):
err2 = (1.41*efac+2.51)*fac/(4*es)**(2*l+2) * kmax**(4*l+5)
errmax = max(errmax, err2[ke_cutoff<5*es].max())
if np.any(ke_cutoff < es):
err2 = (1.41*efac+2.51)*fac/2**(2*l+2) * np.sqrt(2*es)
errmax = max(errmax, err2[ke_cutoff<es].max())
return errmax
def get_bounding_sphere(cell, rcut):
'''Finds all the lattice points within a sphere of radius rcut.
Defines a parallelipiped given by -N_x <= n_x <= N_x, with x in [1,3]
See Martin p. 85
Args:
rcut : number
real space cut-off for interaction
Returns:
cut : ndarray of 3 ints defining N_x
'''
#Gmat = cell.reciprocal_vectors(norm_to=1)
#n1 = np.ceil(lib.norm(Gmat[0,:])*rcut)
#n2 = np.ceil(lib.norm(Gmat[1,:])*rcut)
#n3 = np.ceil(lib.norm(Gmat[2,:])*rcut)
#cut = np.array([n1, n2, n3]).astype(int)
b = cell.reciprocal_vectors(norm_to=1)
heights_inv = lib.norm(b, axis=1)
nimgs = np.ceil(rcut*heights_inv).astype(int)
for i in range(cell.dimension, 3):
nimgs[i] = 1
return nimgs
def get_Gv(cell, gs=None):
'''Calculate three-dimensional G-vectors for the cell; see MH (3.8).
Indices along each direction go as [0...cell.gs, -cell.gs...-1]
to follow FFT convention. Note that, for each direction, ngs = 2*cell.gs+1.
Args:
cell : instance of :class:`Cell`
Returns:
Gv : (ngs, 3) ndarray of floats
The array of G-vectors.
'''
if gs is None:
gs = cell.gs
gxrange = np.append(range(gs[0]+1), range(-gs[0],0))
gyrange = np.append(range(gs[1]+1), range(-gs[1],0))
gzrange = np.append(range(gs[2]+1), range(-gs[2],0))
gxyz = lib.cartesian_prod((gxrange, gyrange, gzrange))
b = cell.reciprocal_vectors()
Gv = np.dot(gxyz, b)
return Gv
def get_Gv_weights(cell, gs=None):
'''Calculate G-vectors and weights.
Returns:
Gv : (ngs, 3) ndarray of floats
The array of G-vectors.
'''
if gs is None:
gs = cell.gs
def plus_minus(n):
#rs, ws = dft.delley(n)
#rs, ws = dft.treutler_ahlrichs(n)
#rs, ws = dft.mura_knowles(n)
rs, ws = dft.gauss_chebyshev(n)
#return np.hstack((0,rs,-rs[::-1])), np.hstack((0,ws,ws[::-1]))
return np.hstack((rs,-rs[::-1])), np.hstack((ws,ws[::-1]))
# Default, the 3D uniform grids
b = cell.reciprocal_vectors()
rx = np.append(np.arange(gs[0]+1.), np.arange(-gs[0],0.))
ry = np.append(np.arange(gs[1]+1.), np.arange(-gs[1],0.))
rz = np.append(np.arange(gs[2]+1.), np.arange(-gs[2],0.))
ngs = [i*2+1 for i in gs]
if cell.dimension == 0:
rx, wx = plus_minus(gs[0])
ry, wy = plus_minus(gs[1])
rz, wz = plus_minus(gs[2])
rx /= np.linalg.norm(b[0])
ry /= np.linalg.norm(b[1])
rz /= np.linalg.norm(b[2])
weights = np.einsum('i,j,k->ijk', wx, wy, wz).reshape(-1)
elif cell.dimension == 1:
wx = np.repeat(np.linalg.norm(b[0]), ngs[0])
ry, wy = plus_minus(gs[1])
rz, wz = plus_minus(gs[2])
ry /= np.linalg.norm(b[1])
rz /= np.linalg.norm(b[2])
weights = np.einsum('i,j,k->ijk', wx, wy, wz).reshape(-1)
elif cell.dimension == 2:
area = np.linalg.norm(np.cross(b[0], b[1]))
wxy = np.repeat(area, ngs[0]*ngs[1])
rz, wz = plus_minus(gs[2])
rz /= np.linalg.norm(b[2])
weights = np.einsum('i,k->ik', wxy, wz).reshape(-1)
else:
weights = abs(np.linalg.det(b))
Gvbase = (rx, ry, rz)
Gv = np.dot(lib.cartesian_prod(Gvbase), b)
# 1/cell.vol == det(b)/(2pi)^3
weights *= 1/(2*np.pi)**3
return Gv, Gvbase, weights
def get_SI(cell, Gv=None):
'''Calculate the structure factor for all atoms; see MH (3.34).
Args:
cell : instance of :class:`Cell`
Gv : (N,3) array
G vectors
Returns:
SI : (natm, ngs) ndarray, dtype=np.complex128
The structure factor for each atom at each G-vector.
'''
if Gv is None:
Gv = cell.get_Gv()
coords = cell.atom_coords()
SI = np.exp(-1j*np.dot(coords, Gv.T))
return SI
def get_ewald_params(cell, precision=1e-8, gs=None):
r'''Choose a reasonable value of Ewald 'eta' and 'cut' parameters.
Choice is based on largest G vector and desired relative precision.
The relative error in the G-space sum is given by
precision ~ 4\pi Gmax^2 e^{(-Gmax^2)/(4 \eta^2)}
which determines eta. Then, real-space cutoff is determined by (exp.
factors only)
precision ~ erfc(eta*rcut) / rcut ~ e^{(-eta**2 rcut*2)}
Returns:
ew_eta, ew_cut : float
The Ewald 'eta' and 'cut' parameters.
'''
if cell.dimension == 3:
if gs is None:
gs = 5
Gmax = min(np.asarray(gs) * lib.norm(cell.reciprocal_vectors(), axis=1))
log_precision = np.log(precision/(4*np.pi*Gmax**2))
ew_eta = np.sqrt(-Gmax**2/(4*log_precision))
ew_cut = _estimate_rcut(ew_eta**2, 0, 1., precision)
else:
# Non-uniform PW grids are used for low-dimensional ewald summation. The cutoff
# estimation for long range part based on exp(G^2/(4*eta^2)) does not work for
# non-uniform grids. Smooth model density is preferred.
ew_cut = cell.rcut
ew_eta = np.sqrt(max(np.log(4*np.pi*ew_cut**2/precision)/ew_cut**2, .1))
return ew_eta, ew_cut
def ewald(cell, ew_eta=None, ew_cut=None):
'''Perform real (R) and reciprocal (G) space Ewald sum for the energy.
Formulation of Martin, App. F2.
Returns:
float
The Ewald energy consisting of overlap, self, and G-space sum.
See Also:
pyscf.pbc.gto.get_ewald_params
'''
if ew_eta is None: ew_eta = cell.ew_eta
if ew_cut is None: ew_cut = cell.ew_cut
chargs = cell.atom_charges()
coords = cell.atom_coords()
Lall = cell.get_lattice_Ls(rcut=ew_cut)
ewovrl = 0.
for i, qi in enumerate(chargs):
ri = coords[i]
for j in range(i):
qj = chargs[j]
rj = coords[j]
r1 = ri-rj + Lall
r = np.sqrt(np.einsum('ji,ji->j', r1, r1))
ewovrl += (qi * qj / r * scipy.special.erfc(ew_eta * r)).sum()
# exclude the point where Lall == 0
r = lib.norm(Lall, axis=1)
r[r<1e-16] = 1e200
ewovrl += .5 * (chargs**2).sum() * (1./r * scipy.special.erfc(ew_eta * r)).sum()
# last line of Eq. (F.5) in Martin
ewself = -.5 * np.dot(chargs,chargs) * 2 * ew_eta / np.sqrt(np.pi)
if cell.dimension == 3:
ewself += -.5 * np.sum(chargs)**2 * np.pi/(ew_eta**2 * cell.vol)
# g-space sum (using g grid) (Eq. (F.6) in Martin, but note errors as below)
# Eq. (F.6) in Martin is off by a factor of 2, the
# exponent is wrong (8->4) and the square is in the wrong place
#
# Formula should be
# 1/2 * 4\pi / Omega \sum_I \sum_{G\neq 0} |ZS_I(G)|^2 \exp[-|G|^2/4\eta^2]
# where
# ZS_I(G) = \sum_a Z_a exp (i G.R_a)
# See also Eq. (32) of ewald.pdf at
# http://www.fisica.uniud.it/~giannozz/public/ewald.pdf
gs = cell.gs
Gv, Gvbase, weights = cell.get_Gv_weights(gs)
absG2 = np.einsum('gi,gi->g', Gv, Gv)
absG2[absG2==0] = 1e200
coulG = 4*np.pi / absG2
coulG *= weights
JexpG2 = np.exp(-absG2/(4*ew_eta**2)) * coulG
ZSI = np.einsum("i,ij->j", chargs, cell.get_SI(Gv))
ZSIG2 = np.abs(ZSI)**2
ewg = .5 * np.dot(ZSIG2, JexpG2)
logger.debug(cell, 'Ewald components = %.15g, %.15g, %.15g', ewovrl, ewself, ewg)
return ewovrl + ewself + ewg
energy_nuc = ewald
def make_kpts(cell, nks, wrap_around=False, with_gamma_point=True):
'''Given number of kpoints along x,y,z , generate kpoints
Args:
nks : (3,) ndarray
Kwargs:
wrap_around : bool
To ensure all kpts are in first Brillouin zone.
with_gamma_point : bool
Whether to shift Monkhorst-pack grid to include gamma-point.
Returns:
kpts in absolute value (unit 1/Bohr). Gamma point is placed at the
first place in the k-points list
Examples:
>>> cell.make_kpts((4,4,4))
'''
ks_each_axis = []
for n in nks:
if with_gamma_point:
ks = np.arange(n, dtype=float) / n
else:
ks = (np.arange(n)+.5)/n-.5
if wrap_around:
ks[ks>=.5] -= 1
ks_each_axis.append(ks)
scaled_kpts = lib.cartesian_prod(ks_each_axis)
kpts = cell.get_abs_kpts(scaled_kpts)
return kpts
def gen_uniform_grids(cell, gs=None):
'''Generate a uniform real-space grid consistent w/ samp thm; see MH (3.19).
Args:
cell : instance of :class:`Cell`
Returns:
coords : (ngx*ngy*ngz, 3) ndarray
The real-space grid point coordinates.
'''
if gs is None: gs = cell.gs
ngs = 2*np.asarray(gs)+1
qv = lib.cartesian_prod([np.arange(x) for x in ngs])
a_frac = np.einsum('i,ij->ij', 1./ngs, cell.lattice_vectors())
coords = np.dot(qv, a_frac)
return coords
# Check whether ecp keywords are presented in pp and whether pp keywords are
# presented in ecp. The return (ecp, pp) should have only the ecp keywords and
# pp keywords in each dict.
# The "misplaced" ecp/pp keywords have lowest priority, ie if the atom is
# defined in ecp, the misplaced ecp atom found in pp does NOT replace the
# definition in ecp, and versa vise.
def classify_ecp_pseudo(cell, ecp, pp):
def classify(ecp, pp_alias):
if isinstance(ecp, (str, unicode)):
if pseudo._format_pseudo_name(ecp)[0] in pp_alias:
return {}, str(ecp)
elif isinstance(ecp, dict):
ecp_as_pp = {}
for atom in ecp:
key = ecp[atom]
if (isinstance(key, (str, unicode)) and
pseudo._format_pseudo_name(key)[0] in pp_alias):
ecp_as_pp[atom] = str(key)
if ecp_as_pp:
ecp_left = dict(ecp)
for atom in ecp_as_pp:
ecp_left.pop(atom)
return ecp_left, ecp_as_pp
return ecp, {}
ecp_left, ecp_as_pp = classify(ecp, pseudo.ALIAS)
pp_left , pp_as_ecp = classify(pp, MOLE_ALIAS)
# ecp = ecp_left + pp_as_ecp
# pp = pp_left + ecp_as_pp
ecp = ecp_left
if pp_as_ecp and not isinstance(ecp_left, (str, unicode)):
# If ecp is a str, all atoms have ecp definition. The misplaced ecp has no effects.
logger.info(cell, 'PBC pseudo-potentials keywords for %s found in .ecp',
pp_as_ecp.keys())
if ecp_left:
pp_as_ecp.update(ecp_left)
ecp = pp_as_ecp
pp = pp_left
if ecp_as_pp and not isinstance(pp_left, (str, unicode)):
logger.info(cell, 'ECP keywords for %s found in PBC .pseudo',
ecp_as_pp.keys())
if pp_left:
ecp_as_pp.update(pp_left)
pp = ecp_as_pp
return ecp, pp
class Cell(mole.Mole):
'''A Cell object holds the basic information of a crystal.
Attributes:
a : (3,3) ndarray
Lattice primitive vectors. Each row represents a lattice vector
Reciprocal lattice vectors are given by b1,b2,b3 = 2 pi inv(a).T
gs : (3,) list of ints
The number of *positive* G-vectors along each direction.
The default value is estimated based on :attr:`precision`
pseudo : dict or str
To define pseudopotential.
precision : float
To control Ewald sums and lattice sums accuracy
rcut : float
Cutoff radius (unit Bohr) in lattice summation. The default value
is estimated based on the required :attr:`precision`.
ke_cutoff : float
If set, defines a spherical cutoff of planewaves, with .5 * G**2 < ke_cutoff
The default value is estimated based on :attr:`precision`
dimension : int
Default is 3
** Following attributes (for experts) are automatically generated. **
ew_eta, ew_cut : float
The Ewald 'eta' and 'cut' parameters. See :func:`get_ewald_params`
(See other attributes in :class:`Mole`)
Examples:
>>> mol = Mole(atom='H^2 0 0 0; H 0 0 1.1', basis='sto3g')
>>> cl = Cell()
>>> cl.build(a='3 0 0; 0 3 0; 0 0 3', gs=[8,8,8], atom='C 1 1 1', basis='sto3g')
>>> print(cl.atom_symbol(0))
C
'''
def __init__(self, **kwargs):
mole.Mole.__init__(self, **kwargs)
self.a = None # lattice vectors, (a1,a2,a3)
self.gs = None
self.ke_cutoff = None # if set, defines a spherical cutoff
# of fourier components, with .5 * G**2 < ke_cutoff
self.precision = 1.e-8
self.pseudo = None
self.dimension = 3
##################################################
# These attributes are initialized by build function if not given
self.ew_eta = None
self.ew_cut = None
self.rcut = None
##################################################
# don't modify the following variables, they are not input arguments
self._pseudo = {}
self._keys = set(self.__dict__.keys())
#Note: Exculde dump_input, parse_arg, basis from kwargs to avoid parsing twice
def build(self, dump_input=True, parse_arg=True,
a=None, gs=None, ke_cutoff=None, precision=None, nimgs=None,
ew_eta=None, ew_cut=None, pseudo=None, basis=None, h=None,
dimension=None, rcut= None, ecp=None,
*args, **kwargs):
'''Setup Mole molecule and Cell and initialize some control parameters.
Whenever you change the value of the attributes of :class:`Cell`,
you need call this function to refresh the internal data of Cell.
Kwargs:
a : (3,3) ndarray
The real-space unit cell lattice vectors. Each row represents
a lattice vector.
gs : (3,) ndarray of ints
The number of *positive* G-vectors along each direction.
pseudo : dict or str
To define pseudopotential. If given, overwrite :attr:`Cell.pseudo`
'''
if h is not None: self.h = h
if a is not None: self.a = a
if gs is not None: self.gs = gs
if nimgs is not None: self.nimgs = nimgs
if ew_eta is not None: self.ew_eta = ew_eta
if ew_cut is not None: self.ew_cut = ew_cut
if pseudo is not None: self.pseudo = pseudo
if basis is not None: self.basis = basis
if dimension is not None: self.dimension = dimension
if precision is not None: self.precision = precision
if rcut is not None: self.rcut = rcut
if ecp is not None: self.ecp = ecp
if ke_cutoff is not None: self.ke_cutoff = ke_cutoff
assert(self.a is not None)
if 'unit' in kwargs:
self.unit = kwargs['unit']
if 'atom' in kwargs:
self.atom = kwargs['atom']
# Set-up pseudopotential if it exists
# This must happen before build() because it affects
# tot_electrons() via atom_charge()
self.ecp, self.pseudo = classify_ecp_pseudo(self, self.ecp, self.pseudo)
if self.pseudo is not None:
if isinstance(self.pseudo, (str, unicode)):
# specify global pseudo for whole molecule
_atom = self.format_atom(self.atom, unit=self.unit)
uniq_atoms = set([a[0] for a in _atom])
self._pseudo = self.format_pseudo(dict([(a, str(self.pseudo))
for a in uniq_atoms]))
else:
self._pseudo = self.format_pseudo(self.pseudo)
# Do regular Mole.build with usual kwargs
_built = self._built
mole.Mole.build(self, False, parse_arg, *args, **kwargs)
if self.rcut is None:
self.rcut = max([self.bas_rcut(ib, self.precision)
for ib in range(self.nbas)])
_a = self.lattice_vectors()
if np.linalg.det(_a) < 0 and self.dimension == 3:
sys.stderr.write('''WARNING!
Lattice are not in right-handed coordinate system. This can cause wrong value for some integrals.
It's recommended to resort the lattice vectors to\na = %s\n\n''' % _a[[0,2,1]])
ke_cutoff is None
if self.gs is None:
if self.ke_cutoff is None:
ke_cutoff = estimate_ke_cutoff(self, self.precision)
else:
ke_cutoff = self.ke_cutoff
self.gs = pbctools.cutoff_to_gs(_a, ke_cutoff)
if self.dimension < 3:
#prec ~ exp(-0.87278467*gs -2.99944305)*nelec
gz = np.log(self.nelectron/self.precision)/0.8727847-3.4366358
self.gs[self.dimension:] = int(gz)
if self.ew_eta is None or self.ew_cut is None:
self.ew_eta, self.ew_cut = self.get_ewald_params(self.precision, self.gs)
if dump_input and not _built and self.verbose > logger.NOTE:
self.dump_input()
logger.info(self, 'lattice vectors a1 [%.9f, %.9f, %.9f]', *_a[0])
logger.info(self, ' a2 [%.9f, %.9f, %.9f]', *_a[1])
logger.info(self, ' a3 [%.9f, %.9f, %.9f]', *_a[2])
logger.info(self, 'dimension = %s', self.dimension)
logger.info(self, 'Cell volume = %g', self.vol)
logger.info(self, 'rcut = %s (nimgs = %s)', self.rcut, self.nimgs)
logger.info(self, 'lattice sum = %d cells', len(self.get_lattice_Ls()))
logger.info(self, 'precision = %g', self.precision)
logger.info(self, 'pseudo = %s', self.pseudo)
if ke_cutoff is not None:
logger.info(self, 'ke_cutoff = %s', ke_cutoff)
logger.info(self, ' = gs (FFT-mesh) %s', self.gs)
else:
logger.info(self, 'gs (FFT-mesh) = %s', self.gs)
Ecut = pbctools.gs_to_cutoff(self.lattice_vectors(), self.gs)
logger.info(self, ' = ke_cutoff %s', Ecut[:self.dimension])
logger.info(self, 'ew_eta = %g', self.ew_eta)
logger.info(self, 'ew_cut = %s (nimgs = %s)', self.ew_cut,
self.get_bounding_sphere(self.ew_cut))
return self
kernel = build
@property
def h(self):
return np.asarray(self.a).T
@h.setter
def h(self, x):
sys.stderr.write('cell.h is deprecated. It is replaced by the '
'(row-based) lattice vectors cell.a: cell.a = cell.h.T\n')
if isinstance(x, (str, unicode)):
x = x.replace(';',' ').replace(',',' ').replace('\n',' ')
self.a = np.asarray([float(z) for z in x.split()]).reshape(3,3).T
else:
self.a = np.asarray(x).T
@property
def _h(self):
return self.lattice_vectors().T
@property
def vol(self):
return abs(np.linalg.det(self.lattice_vectors()))
@property
def Gv(self):
return self.get_Gv(self.gs)
@lib.with_doc(format_pseudo.__doc__)
def format_pseudo(self, pseudo_tab):
return format_pseudo(pseudo_tab)
@lib.with_doc(format_basis.__doc__)
def format_basis(self, basis_tab):
return format_basis(basis_tab)
@property
def nimgs(self):
return self.get_bounding_sphere(self.rcut)
@nimgs.setter
def nimgs(self, x):
b = self.reciprocal_vectors(norm_to=1)
heights_inv = lib.norm(b, axis=1)
self.rcut = max(np.asarray(x) / heights_inv)
if self.nbas == 0:
rcut_guess = _estimate_rcut(.05, 0, 1, 1e-8)
else:
rcut_guess = max([self.bas_rcut(ib, self.precision)
for ib in range(self.nbas)])
if self.rcut > rcut_guess*1.5:
msg = ('.nimgs is a deprecated attribute. It is replaced by .rcut '
'attribute for lattic sum cutoff radius. The given nimgs '
'%s is far over the estimated cutoff radius %s. ' %
(x, rcut_guess))
warnings.warn(msg)
def make_ecp_env(self, _atm, _ecp, pre_env=[]):
if _ecp and self._pseudo:
conflicts = set(self._pseudo.keys()).intersection(set(_ecp.keys()))
if conflicts:
raise RuntimeError('Pseudo potential for atoms %s are defined '
'in both .ecp and .pseudo.' % list(conflicts))
_ecpbas, _env = np.zeros((0,8)), pre_env
if _ecp:
_atm, _ecpbas, _env = mole.make_ecp_env(self, _atm, _ecp, _env)
if self._pseudo:
_atm, _, _env = make_pseudo_env(self, _atm, self._pseudo, _env)
return _atm, _ecpbas, _env
def lattice_vectors(self):
'''Convert the primitive lattice vectors.
Return 3x3 array in which each row represents one direction of the
lattice vectors (unit in Bohr)
'''
if isinstance(self.a, (str, unicode)):
a = self.a.replace(';',' ').replace(',',' ').replace('\n',' ')
a = np.asarray([float(x) for x in a.split()]).reshape(3,3)
else:
a = np.asarray(self.a, dtype=np.double)
if isinstance(self.unit, (str, unicode)):
if self.unit.startswith(('B','b','au','AU')):
return a
else:
return a/param.BOHR
else:
return a/self.unit
def reciprocal_vectors(self, norm_to=2*np.pi):
r'''
.. math::
\begin{align}
\mathbf{b_1} &= 2\pi \frac{\mathbf{a_2} \times \mathbf{a_3}}{\mathbf{a_1} \cdot (\mathbf{a_2} \times \mathbf{a_3})} \\
\mathbf{b_2} &= 2\pi \frac{\mathbf{a_3} \times \mathbf{a_1}}{\mathbf{a_2} \cdot (\mathbf{a_3} \times \mathbf{a_1})} \\
\mathbf{b_3} &= 2\pi \frac{\mathbf{a_1} \times \mathbf{a_2}}{\mathbf{a_3} \cdot (\mathbf{a_1} \times \mathbf{a_2})}
\end{align}
'''
a = self.lattice_vectors()
if self.dimension == 1:
assert(abs(np.dot(a[0], a[1])) < 1e-9 and
abs(np.dot(a[0], a[2])) < 1e-9 and
abs(np.dot(a[1], a[2])) < 1e-9)
elif self.dimension == 2:
assert(abs(np.dot(a[0], a[2])) < 1e-9 and
abs(np.dot(a[1], a[2])) < 1e-9)
b = np.linalg.inv(a.T)
return norm_to * b
def get_abs_kpts(self, scaled_kpts):
'''Get absolute k-points (in 1/Bohr), given "scaled" k-points in
fractions of lattice vectors.
Args:
scaled_kpts : (nkpts, 3) ndarray of floats
Returns:
abs_kpts : (nkpts, 3) ndarray of floats
'''
return np.dot(scaled_kpts, self.reciprocal_vectors())
def get_scaled_kpts(self, abs_kpts):
'''Get scaled k-points, given absolute k-points in 1/Bohr.
Args:
abs_kpts : (nkpts, 3) ndarray of floats
Returns:
scaled_kpts : (nkpts, 3) ndarray of floats
'''
return 1./(2*np.pi)*np.dot(abs_kpts, self.lattice_vectors().T)
make_kpts = make_kpts
def copy(self):
return copy(self)
pack = pack
@lib.with_doc(unpack.__doc__)
def unpack(self, moldic):
return unpack(moldic)
def unpack_(self, moldic):
self.__dict__.update(moldic)
return self
dumps = dumps
@lib.with_doc(loads.__doc__)
def loads(self, molstr):
return loads(molstr)
def loads_(self, molstr):
self.__dict__.update(loads(molstr).__dict__)
return self
bas_rcut = bas_rcut
get_lattice_Ls = pbctools.get_lattice_Ls
get_nimgs = get_nimgs
get_ewald_params = get_ewald_params
get_bounding_sphere = get_bounding_sphere
get_Gv = get_Gv
get_Gv_weights = get_Gv_weights
get_SI = get_SI
ewald = ewald
energy_nuc = ewald
gen_uniform_grids = gen_uniform_grids
def pbc_intor(self, intor, comp=1, hermi=0, kpts=None, kpt=None):
'''One-electron integrals with PBC. See also Mole.intor'''
return intor_cross(intor, self, self, comp, hermi, kpts, kpt)
def from_ase(self, ase_atom):
'''Update cell based on given ase atom object
Examples:
>>> from ase.lattice import bulk
>>> cell.from_ase(bulk('C', 'diamond', a=LATTICE_CONST))
'''
from pyscf.pbc.tools import pyscf_ase
self.a = ase_atom.cell
self.atom = pyscf_ase.ase_atoms_to_pyscf(ase_atom)
return self
def to_mol(self):
'''Return a Mole object using the same atoms and basis functions as
the Cell object.
'''
mol = mole.Mole()
cell_dic = [(key, getattr(self, key)) for key in mol.__dict__.keys()]
mol.__dict__.update(cell_dic)
return mol
def has_ecp(self):
'''Whether pesudo potential is used in the system.'''
return self.pseudo or self._pseudo or (len(self._ecpbas) > 0)
|
import sys
sys.path.append("../..")
from sdf import *
### Define new types of Nodes
class Node(GenericNode):
def __init__(self,name,theType,inLength,outLength):
GenericNode.__init__(self,name)
self.addInput("i",theType,inLength)
self.addOutput("o",theType,outLength)
class Sink(GenericSink):
def __init__(self,name,theType,inLength):
GenericSink.__init__(self,name)
self.addInput("i",theType,inLength)
@property
def typeName(self):
return "Sink"
class Source(GenericSource):
def __init__(self,name,theType,inLength):
GenericSource.__init__(self,name)
self.addOutput("o",theType,inLength)
@property
def typeName(self):
return "Source"
class ProcessingNode(Node):
@property
def typeName(self):
return "ProcessingNode"
### Define nodes
floatType=CType(F32)
src=Source("source",floatType,5)
b=ProcessingNode("filter",floatType,7,5)
b.addLiteralArg(4)
b.addLiteralArg("Test")
b.addVariableArg("someVariable")
sink=Sink("sink",floatType,5)
g = Graph()
g.connect(src.o,b.i)
g.connect(b.o,sink.i)
print("Generate graphviz and code")
conf=Configuration()
conf.debugLimit=1
conf.cOptionalArgs="int someVariable"
#conf.displayFIFOSizes=True
# Prefix for global FIFO buffers
#conf.prefix="sched1"
#print(g.nullVector())
sched = g.computeSchedule()
#print(sched.schedule)
print("Schedule length = %d" % sched.scheduleLength)
print("Memory usage %d bytes" % sched.memory)
#
#conf.codeArray=True
sched.ccode("generated",conf)
with open("test.dot","w") as f:
sched.graphviz(f)
|
import numpy as np
import torch
from torch import nn
import radam # pip install git+https://github.com/LiyuanLucasLiu/RAdam/
from run_nerf import DEBUG
import nerf
import os
import contextlib
# https://github.com/lioryariv/idr/blob/main/code/model/implicit_differentiable_renderer.py
class ImplicitNetwork(nn.Module):
def __init__(
self,
feature_vector_size,
d_in,
d_out,
dims,
geometric_init=True,
bias=1.0,
skip_in=(),
weight_norm=True,
multires=0
):
super().__init__()
dims = [d_in] + dims + [d_out + feature_vector_size]
self.embed_fn = None
if multires > 0:
embed_fn, input_ch = nerf.get_embedder(multires)
self.embed_fn = embed_fn
dims[0] = input_ch
self.num_layers = len(dims)
self.skip_in = skip_in
for l in range(0, self.num_layers - 1):
if l + 1 in self.skip_in:
out_dim = dims[l + 1] - dims[0]
else:
out_dim = dims[l + 1]
lin = nn.Linear(dims[l], out_dim)
if geometric_init:
if l == self.num_layers - 2:
torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)
torch.nn.init.constant_(lin.bias, -bias)
elif multires > 0 and l == 0:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.constant_(lin.weight[:, 3:], 0.0)
torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))
elif multires > 0 and l in self.skip_in:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))
torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0)
else:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))
if weight_norm:
lin = nn.utils.weight_norm(lin)
setattr(self, "lin" + str(l), lin)
self.softplus = nn.Softplus(beta=100)
def forward(self, input):
if self.embed_fn is not None:
input = self.embed_fn(input)
x = input
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
if l in self.skip_in:
x = torch.cat([x, input], -1) / np.sqrt(2)
x = lin(x)
if l < self.num_layers - 2:
x = self.softplus(x)
return x
def predict_sdf(self, coord, compute_normal=False):
"""
coord:
`torch.Tensor`, shape = `(..., 3)`
compute_normal:
`bool`
If `True`, also return SDF gradient.
"""
if compute_normal:
coord.requires_grad_(True)
maybe_enable_grad = torch.enable_grad
else:
maybe_enable_grad = contextlib.nullcontext
with maybe_enable_grad():
prediction = self(coord)
sdf, latent_features = prediction[..., :1], prediction[..., 1:]
if compute_normal:
# Prevent `coord.grad` from getting overwritten/accumulated
old_coord_grad = coord.grad # almost always `None`
d_output = torch.ones_like(sdf)
sdf.backward(
d_output, create_graph=self.training, inputs=[coord])
sdf_normal = coord.grad
coord.grad = old_coord_grad
return sdf, latent_features, sdf_normal
else:
return sdf, latent_features
# https://github.com/lioryariv/idr/blob/44959e7aac267775e63552d8aac6c2e9f2918cca/code/model/implicit_differentiable_renderer.py#L99
class RenderingNetwork(nn.Module):
def __init__(
self,
feature_vector_size,
mode,
d_in,
d_out,
dims,
weight_norm=True,
multires_view=0
):
super().__init__()
self.mode = mode
dims = [d_in + feature_vector_size] + dims + [d_out]
self.embedview_fn = None
if multires_view > 0:
embedview_fn, input_ch = nerf.get_embedder(multires_view)
self.embedview_fn = embedview_fn
dims[0] += (input_ch - 3)
self.num_layers = len(dims)
for l in range(0, self.num_layers - 1):
out_dim = dims[l + 1]
lin = nn.Linear(dims[l], out_dim)
if weight_norm:
lin = nn.utils.weight_norm(lin)
setattr(self, "lin" + str(l), lin)
self.relu = nn.ReLU()
def forward(self, points, normals, view_dirs, feature_vectors):
if self.embedview_fn is not None:
view_dirs = self.embedview_fn(view_dirs)
if self.mode == 'idr':
rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1)
elif self.mode == 'no_view_dir':
rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)
elif self.mode == 'no_normal':
rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)
x = rendering_input
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
x = lin(x)
if l < self.num_layers - 2:
x = self.relu(x)
x = x.sigmoid()
# Widen the sigmoid output a little bit
# to let the network yield 0.0 and 1.0 easier
widen_factor = 0.04
x = x * (1 + widen_factor * 2) - widen_factor
return x
class VolSDF(nn.Module):
def __init__(self, args):
super().__init__()
assert args.use_viewdirs, "NYI"
self.embedding_coords_fn, embedding_dims_coords = \
nerf.get_embedder(args.multires, args.i_embed)
self.embedding_viewdirs_fn, embedding_dims_viewdirs = \
nerf.get_embedder(args.multires_views, args.i_embed)
self.r = args.r
# TODO move to args
feature_vector_size = 256
self.f = ImplicitNetwork(
feature_vector_size=feature_vector_size,
d_in=3,
d_out=1,
dims=[256] * 8,
geometric_init=True,
bias=1.0,
skip_in=(4,),
weight_norm=True,
multires=args.multires)
self.L = RenderingNetwork(
feature_vector_size=feature_vector_size,
mode='idr',
d_in=3+3+3, # coordinate, SDF normal, view direction
d_out=3,
dims=[256] * 4, # TODO move to args
weight_norm=True,
multires_view=args.multires_views)
self.beta = nn.Parameter(torch.tensor(1.0))
def forward(self, coord, view_direction):
sdf, latent_features, sdf_normal = self.f.predict_sdf(coord, compute_normal=True)
sdf = torch.minimum(sdf, self.r - coord.norm(2, dim=-1, keepdim=True)) # equation 19
light_field = self.L(coord, sdf_normal, view_direction, latent_features)
def laplace_cdf(x, beta):
retval = torch.empty_like(x)
mask = x > 0
retval[mask] = 1.0 - 0.5 * (x[mask] / -beta).exp()
mask = ~mask
retval[mask] = 0.5 * (x[mask] / beta).exp()
return retval
# alpha and beta are Laplace CDF parameters
alpha = self.beta.reciprocal() # hard setting from the paper
density = alpha * laplace_cdf(-sdf, self.beta)
return torch.cat((light_field, density), dim=-1), sdf_normal
def render_rays(ray_batch, # of length <= `args.chunk`
network_fn,
network_query_fn,
N_samples,
retraw=False,
lindisp=False,
perturb=0.,
white_bkgd=False,
raw_noise_std=0.,
r=3.0):
"""Volumetric rendering.
Args:
ray_batch: array of shape [N, ...]. All information necessary
for sampling along a ray, including: ray origin, ray direction, min
dist, max dist, and unit-magnitude viewing direction.
network_fn: not used, only needed for saving weights, TODO remove.
network_query_fn: instance of `VolSDF` wrapped in `nerf.batchify()`
N_samples: int. Number samples along each ray.
retraw: bool. If True, include model's raw, unprocessed predictions.
lindisp: bool. If True, sample linearly in inverse depth rather than in depth.
perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified
random points in time.
white_bkgd: bool. If True, assume a white background.
raw_noise_std: random perturbation for density values (as in NeRF).
Returns a dict with these keys:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray.
disp_map: [num_rays]. Disparity map. 1 / depth.
acc_map: [num_rays]. Accumulated opacity along each ray.
raw: [num_rays, num_samples, 4]. Raw predictions from model (R, G, B, density).
"""
N_rays = ray_batch.shape[0]
rays_o, rays_d = ray_batch[..., 0:3], ray_batch[..., 3:6] # [N_rays, 3] each
viewdirs = ray_batch[..., -3:] if ray_batch.shape[-1] > 8 else None # [N_rays, 3]
# see "Modeling the background" in Section B.3
M = 2 * r
near, far = 0, M
# Normalize ray directions. Just to be safe -- not sure if this is really
# needed (e.g. this wasn't done in 'nerf-pytorch')
rays_d_norm = rays_d.norm(dim=-1, keepdim=True).clamp(min=1e-4)
rays_d = rays_d / rays_d_norm
# TODO implement proper sampling Τ (see Algorithm 1)
t_vals = torch.linspace(0., 1., steps=N_samples)
if not lindisp:
z_vals = near * (1.-t_vals) + far * (t_vals)
else:
z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))
z_vals = z_vals.expand([N_rays, N_samples])
if perturb > 0.:
# get intervals between samples
mids = .5 * (z_vals[...,1:] + z_vals[...,:-1])
upper = torch.cat([mids, z_vals[...,-1:]], -1)
lower = torch.cat([z_vals[...,:1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape)
z_vals = lower + (upper - lower) * t_rand
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples, 3]
# TODO expand only after applying positional encoding to avoid extra computations
viewdirs = viewdirs[:, None].expand(pts.shape)
# Predict light field and density at points sampled on rays
rgb_and_density, _ = network_query_fn(pts, viewdirs)
# Generate points uniformly in a ball
# https://stats.stackexchange.com/a/30622
points_for_eikonal_loss = torch.randn((N_rays, 3), device=pts.device)
points_for_eikonal_loss /= points_for_eikonal_loss.norm(2, dim=-1, keepdim=True).clip(1e-5)
points_for_eikonal_loss *= r * (torch.rand(N_rays, 1) ** (1/3))
_, _, sdf_normal_for_eikonal_loss = network_fn.f.predict_sdf(
points_for_eikonal_loss, compute_normal=True)
# rgb_and_density: [N_rays, N_samples, 4]
# sdf_normal: [N_rays, N_samples, 3]
# Convert predictions to pixel values with numerical integration
rgb_map, disp_map, acc_map, weights, depth_map = nerf.raw2outputs(
rgb_and_density, z_vals, rays_d,
rgb_activation_fn=lambda x: x, density_activation_fn=lambda x: x,
raw_noise_std=raw_noise_std, white_bkgd=white_bkgd)
ret = {'rgb_map' : rgb_map, 'disp_map' : disp_map, 'acc_map' : acc_map}
if retraw:
ret['raw'] = rgb_and_density
ret['sdf_normal'] = sdf_normal_for_eikonal_loss
for k in ret:
if DEBUG and (torch.isnan(ret[k]).any() or torch.isinf(ret[k]).any()):
print(f"! [Numerical Error] {k} contains nan or inf.")
return ret
def create_model(args):
model = VolSDF(args)
# Create optimizer
grad_vars = model.parameters()
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))
elif args.optimizer == 'radam':
optimizer = radam.RAdam(params=grad_vars, lr=args.lrate)
start = args.start_iter
# Load checkpoints
basedir = args.basedir
expname = args.expname
if args.ft_path is not None and args.ft_path!='None':
ckpts = [args.ft_path]
else:
experiment_dir = os.path.join(basedir, expname)
ckpts = [os.path.join(experiment_dir, f) for f in sorted(os.listdir(experiment_dir)) if 'tar' in f]
print('Found ckpts', ckpts)
if len(ckpts) > 0 and not args.no_reload:
ckpt_path = ckpts[-1]
print('Reloading from', ckpt_path)
ckpt = torch.load(ckpt_path, map_location=args.device)
def remove_prefix(state_dict, prefix):
if all(k.startswith(prefix) for k in state_dict):
return {k[len(prefix):] : v for k, v in state_dict.items()}
else:
assert all(not k.startswith(prefix) for k in state_dict)
return state_dict
for k in 'network_fn_state_dict',:
ckpt[k] = remove_prefix(ckpt[k], 'module.')
start = ckpt['global_step'] + 1
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
# Load model
model.load_state_dict(ckpt['network_fn_state_dict'])
##########################
render_kwargs_train = {
'network_query_fn': nerf.batchify(model, args.netchunk),
'perturb' : args.perturb,
'N_samples' : args.N_samples,
'network_fn' : model,
'use_viewdirs' : args.use_viewdirs,
'white_bkgd' : args.white_bkgd,
'raw_noise_std' : args.raw_noise_std,
'render_rays_fn': render_rays,
'r': args.r,
}
# NDC only good for LLFF-style forward facing data
if args.dataset_type != 'llff' or args.no_ndc:
print('Not ndc!')
render_kwargs_train['ndc'] = False
render_kwargs_train['lindisp'] = args.lindisp
render_kwargs_test = {k : render_kwargs_train[k] for k in render_kwargs_train}
# render_kwargs_test['raw_noise_std'] = 0.
return render_kwargs_train, render_kwargs_test, start, optimizer
|
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
"""DomainFlavour package contains DomainFlavour class representing domain_flavour table in db."""
from typing import Any
from sqlalchemy import Column, DateTime, Integer, String, event
from sqlalchemy.orm import relationship, session, sessionmaker
from sqlalchemy.sql import func
from neural_compressor.ux.components.db_manager.db_manager import Base, DBManager
from neural_compressor.ux.utils.consts import DomainFlavours
from neural_compressor.ux.utils.logger import log
log.debug("Initializing DomainFlavour table")
db_manager = DBManager()
Session = sessionmaker(bind=db_manager.engine)
class DomainFlavour(Base):
"""INC Bench DomainFlavour table class."""
__tablename__ = "domain_flavour"
id = Column(Integer, primary_key=True, index=True)
name = Column(String(50), unique=True)
created_at = Column(DateTime, nullable=False, server_default=func.now())
modified_at = Column(DateTime, nullable=True, onupdate=func.now())
models: Any = relationship("Model", back_populates="domain_flavour")
@staticmethod
def get_domain_flavour_id(db_session: session.Session, domain_flavour_name: str) -> int:
"""Get domain_flavour id from name."""
domain_flavour_ids = db_session.query(DomainFlavour.id).filter(
DomainFlavour.name == domain_flavour_name,
)
if domain_flavour_ids.count() != 1:
raise Exception("Domain flavour name is not unique.")
return domain_flavour_ids[0].id
@staticmethod
def list(db_session: session.Session) -> dict:
"""List available domain flavours."""
domain_flavours = []
domain_flavour_instances = db_session.query(
DomainFlavour.id,
DomainFlavour.name,
).order_by(DomainFlavour.id)
for domain_flavour in domain_flavour_instances:
domain_flavours.append(
{
"id": domain_flavour.id,
"name": domain_flavour.name,
},
)
return {"domain_flavours": domain_flavours}
@event.listens_for(DomainFlavour.__table__, "after_create")
def fill_dictionary(*args: list, **kwargs: dict) -> None:
"""Fill dictionary with default values."""
with Session.begin() as db_session:
for domain_flavour in DomainFlavours:
db_session.add(DomainFlavour(name=domain_flavour.value))
db_session.commit()
|
"""
This module handles interactions with the OpenWeather api
"""
import os
from typing import Dict
import requests
from server.utils.logger import log_exception
OPEN_WEATHER_API_URL = "https://api.openweathermap.org/data/2.5"
def fetch_weather(lat: float, long: float) -> Dict[str, any]:
"""
Fetches the current weather from the OpenWeather api.
:param lat: The latitude of user location
:param long: The longitude of user location
:returns: A dictionary of information of the current weather,
as described in the OpenWeather api doc
"""
# the request parameters required for the api call
req_params = {
"lat": lat,
"lon": long,
"appid": os.environ["OPEN_WEATHER_API_KEY"],
# metric is the superior unit of measurement
"units": "metric",
}
try:
response = requests.get(f"{OPEN_WEATHER_API_URL}/weather", params=req_params)
return response.json()
except requests.ConnectionError as req_err:
log_exception("fetch_weather", req_err)
return {}
|
from rest_framework import permissions
from instructeurs.models import Administration
class AdministrationPermission(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `owner` attribute.
"""
def has_object_permission(self, request, view, obj):
# administration = Administration.objects.get(uuid=obj.uuid)
# return administration in request.user.administrations()
try:
request.user.administrations().get(uuid=obj.uuid)
except Administration.DoesNotExist:
return False
return True
def has_permission(self, request, view):
if request.method == "GET":
return request.user.has_perm("administration.view_administration")
if request.method == "PUT":
return request.user.has_perm("administration.change_administration")
if request.method == "POST":
return request.user.has_perm("administration.add_administration")
if request.method == "DELETE":
return request.user.has_perm("administration.delete_administration")
return False
|
import torch
from deep_privacy import logger
import pathlib
def _get_map_location():
if not torch.cuda.is_available():
logger.warn(
"Cuda is not available. Forcing map checkpoint to be loaded into CPU.")
return "cpu"
return None
def load_checkpoint_from_url(model_url: str):
if model_url is None:
return None
return torch.hub.load_state_dict_from_url(
model_url, map_location=_get_map_location())
def load_checkpoint(ckpt_dir_or_file: pathlib.Path) -> dict:
if ckpt_dir_or_file.is_dir():
with open(ckpt_dir_or_file.joinpath('latest_checkpoint')) as f:
ckpt_path = f.readline().strip()
ckpt_path = ckpt_dir_or_file.joinpath(ckpt_path)
else:
ckpt_path = ckpt_dir_or_file
if not ckpt_path.is_file():
raise FileNotFoundError(f"Did not find path: {ckpt_path}")
ckpt = torch.load(ckpt_path, map_location=_get_map_location())
logger.info(f"Loaded checkpoint from {ckpt_path}")
return ckpt
def _get_checkpoint_path(
output_dir: str, validation_checkpoint_step: int = None):
if validation_checkpoint_step is None:
return pathlib.Path(output_dir, "checkpoints")
step = validation_checkpoint_step * 10**6
path = pathlib.Path(
output_dir, "validation_checkpoints", f"step_{step}.ckpt")
return path
def get_checkpoint(
output_dir: str, validation_checkpoint_step: int = None):
path = _get_checkpoint_path(output_dir, validation_checkpoint_step)
return load_checkpoint(path)
def get_previous_checkpoints(directory: pathlib.Path) -> list:
if directory.is_file():
directory = directory.parent
list_path = directory.joinpath("latest_checkpoint")
list_path.touch(exist_ok=True)
with open(list_path) as fp:
ckpt_list = fp.readlines()
return [_.strip() for _ in ckpt_list]
def get_checkpoint_step(output_dir: str, validation_checkpoint_step: int):
if validation_checkpoint_step is not None:
return validation_checkpoint_step
directory = _get_checkpoint_path(output_dir)
ckpt_path = get_previous_checkpoints(directory)[0]
print(ckpt_path)
ckpt_path = pathlib.Path(ckpt_path)
step = ckpt_path.stem.replace("step_", "")
step = step.replace(".ckpt", "")
return int(step)
class Checkpointer:
def __init__(self, output_dir: str):
self.checkpoint_dir = pathlib.Path(
output_dir, "checkpoints")
self.checkpoint_dir.mkdir(exist_ok=True, parents=True)
def save_checkpoint(
self,
state_dict: dict,
filepath: pathlib.Path = None,
max_keep=2):
if filepath is None:
global_step = self.trainer.global_step
filename = f"step_{global_step}.ckpt"
filepath = self.checkpoint_dir.joinpath(filename)
list_path = filepath.parent.joinpath("latest_checkpoint")
torch.save(state_dict, filepath)
previous_checkpoints = get_previous_checkpoints(filepath)
if filepath.name not in previous_checkpoints:
previous_checkpoints = [filepath.name] + previous_checkpoints
if len(previous_checkpoints) > max_keep:
for ckpt in previous_checkpoints[max_keep:]:
path = self.checkpoint_dir.joinpath(ckpt)
if path.exists():
logger.info(f"Removing old checkpoint: {path}")
path.unlink()
previous_checkpoints = previous_checkpoints[:max_keep]
with open(list_path, 'w') as fp:
fp.write("\n".join(previous_checkpoints))
logger.info(f"Saved checkpoint to: {filepath}")
def checkpoint_exists(self) -> bool:
num_checkpoints = len(list(self.checkpoint_dir.glob("*.ckpt")))
return num_checkpoints > 0
def load_checkpoint(self) -> dict:
checkpoint = load_checkpoint(self.checkpoint_dir)
return checkpoint
|
from .types.CMakeVariable import CMakeVariable
from .types.VariableCollection import VariableCollection
class CMakeLangs(VariableCollection):
"""CMake Language related variables"""
CMAKE_C_COMPILE_FEATURES = ()
CMAKE_C_EXTENSIONS = ()
CMAKE_C_STANDARD = ()
CMAKE_C_STANDARD_REQUIRED = ()
CMAKE_CXX_COMPILE_FEATURES = ()
CMAKE_CXX_EXTENSIONS = ()
CMAKE_CXX_STANDARD = ()
CMAKE_CXX_STANDARD_REQUIRED = ()
CMAKE_Fortran_MODDIR_DEFAULT = ()
CMAKE_Fortran_MODDIR_FLAG = ()
CMAKE_Fortran_MODOUT_FLAG = ()
CMAKE_INTERNAL_PLATFORM_ABI = ()
@staticmethod
def CMAKE_COMPILER_IS_GNU(lang: str):
return CMakeVariable("CMAKE_COMPILER_IS_GNU" + lang, lang)
@staticmethod
def CMAKE_LANG_ARCHIVE_APPEND(lang: str):
return CMakeVariable("CMAKE_" + lang + "_ARCHIVE_APPEND", lang)
@staticmethod
def CMAKE_LANG_ARCHIVE_CREATE(lang: str):
return CMakeVariable("CMAKE_" + lang + "_ARCHIVE_CREATE", lang)
@staticmethod
def CMAKE_LANG_ARCHIVE_FINISH(lang: str):
return CMakeVariable("CMAKE_" + lang + "_ARCHIVE_FINISH", lang)
@staticmethod
def CMAKE_LANG_COMPILE_OBJECT(lang: str):
return CMakeVariable("CMAKE_" + lang + "_COMPILE_OBJECT", lang)
@staticmethod
def CMAKE_LANG_COMPILER_ABI(lang: str):
return CMakeVariable("CMAKE_" + lang + "_COMPILER_ABI", lang)
@staticmethod
def CMAKE_LANG_COMPILER_ID(lang: str):
return CMakeVariable("CMAKE_" + lang + "_COMPILER_ID", lang)
@staticmethod
def CMAKE_LANG_COMPILER_LOADED(lang: str):
return CMakeVariable("CMAKE_" + lang + "_COMPILER_LOADED", lang)
@staticmethod
def CMAKE_LANG_COMPILER(lang: str):
return CMakeVariable("CMAKE_" + lang + "_COMPILER", lang)
@staticmethod
def CMAKE_LANG_COMPILER_EXTERNAL_TOOLCHAIN(lang: str):
return CMakeVariable("CMAKE_" + lang + "_COMPILER_EXTERNAL_TOOLCHAIN", lang)
@staticmethod
def CMAKE_LANG_COMPILER_TARGET(lang: str):
return CMakeVariable("CMAKE_" + lang + "_COMPILER_TARGET", lang)
@staticmethod
def CMAKE_LANG_COMPILER_VERSION(lang: str):
return CMakeVariable("CMAKE_" + lang + "_COMPILER_VERSION", lang)
@staticmethod
def CMAKE_LANG_CREATE_SHARED_LIBRARY(lang: str):
return CMakeVariable("CMAKE_" + lang + "_CREATE_SHARED_LIBRARY", lang)
@staticmethod
def CMAKE_LANG_CREATE_SHARED_MODULE(lang: str):
return CMakeVariable("CMAKE_" + lang + "_CREATE_SHARED_MODULE", lang)
@staticmethod
def CMAKE_LANG_CREATE_STATIC_LIBRARY(lang: str):
return CMakeVariable("CMAKE_" + lang + "_CREATE_STATIC_LIBRARY", lang)
@staticmethod
def CMAKE_LANG_FLAGS_DEBUG(lang: str):
return CMakeVariable("CMAKE_" + lang + "_FLAGS_DEBUG", lang)
@staticmethod
def CMAKE_LANG_FLAGS_MINSIZEREL(lang: str):
return CMakeVariable("CMAKE_" + lang + "_FLAGS_MINSIZEREL", lang)
@staticmethod
def CMAKE_LANG_FLAGS_RELEASE(lang: str):
return CMakeVariable("CMAKE_" + lang + "_FLAGS_RELEASE", lang)
@staticmethod
def CMAKE_LANG_FLAGS_RELWITHDEBINFO(lang: str):
return CMakeVariable("CMAKE_" + lang + "_FLAGS_RELWITHDEBINFO", lang)
@staticmethod
def CMAKE_LANG_FLAGS(lang: str):
return CMakeVariable("CMAKE_" + lang + "_FLAGS", lang)
@staticmethod
def CMAKE_LANG_GHS_KERNEL_FLAGS_DEBUG(lang: str):
return CMakeVariable("CMAKE_" + lang + "_GHS_KERNEL_FLAGS_DEBUG", lang)
@staticmethod
def CMAKE_LANG_GHS_KERNEL_FLAGS_MINSIZEREL(lang: str):
return CMakeVariable("CMAKE_" + lang + "_GHS_KERNEL_FLAGS_MINSIZEREL", lang)
@staticmethod
def CMAKE_LANG_GHS_KERNEL_FLAGS_RELEASE(lang: str):
return CMakeVariable("CMAKE_" + lang + "_GHS_KERNEL_FLAGS_RELEASE", lang)
@staticmethod
def CMAKE_LANG_GHS_KERNEL_FLAGS_RELWITHDEBINFO(lang: str):
return CMakeVariable("CMAKE_" + lang + "_GHS_KERNEL_FLAGS_RELWITHDEBINFO", lang)
@staticmethod
def CMAKE_LANG_IGNORE_EXTENSIONS(lang: str):
return CMakeVariable("CMAKE_" + lang + "_IGNORE_EXTENSIONS", lang)
@staticmethod
def CMAKE_LANG_IMPLICIT_INCLUDE_DIRECTORIES(lang: str):
return CMakeVariable("CMAKE_" + lang + "_IMPLICIT_INCLUDE_DIRECTORIES", lang)
@staticmethod
def CMAKE_LANG_IMPLICIT_LINK_DIRECTORIES(lang: str):
return CMakeVariable("CMAKE_" + lang + "_IMPLICIT_LINK_DIRECTORIES", lang)
@staticmethod
def CMAKE_LANG_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES(lang: str):
return CMakeVariable("CMAKE_" + lang + "_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES", lang)
@staticmethod
def CMAKE_LANG_IMPLICIT_LINK_LIBRARIES(lang: str):
return CMakeVariable("CMAKE_" + lang + "_IMPLICIT_LINK_LIBRARIES", lang)
@staticmethod
def CMAKE_LANG_LIBRARY_ARCHITECTURE(lang: str):
return CMakeVariable("CMAKE_" + lang + "_LIBRARY_ARCHITECTURE", lang)
@staticmethod
def CMAKE_LANG_LINKER_PREFERENCE_PROPAGATES(lang: str):
return CMakeVariable("CMAKE_" + lang + "_LINKER_PREFERENCE_PROPAGATES", lang)
@staticmethod
def CMAKE_LANG_LINKER_PREFERENCE(lang: str):
return CMakeVariable("CMAKE_" + lang + "_LINKER_PREFERENCE", lang)
@staticmethod
def CMAKE_LANG_LINK_EXECUTABLE(lang: str):
return CMakeVariable("CMAKE_" + lang + "_LINK_EXECUTABLE", lang)
@staticmethod
def CMAKE_LANG_OUTPUT_EXTENSION(lang: str):
return CMakeVariable("CMAKE_" + lang + "_OUTPUT_EXTENSION", lang)
@staticmethod
def CMAKE_LANG_PLATFORM_ID(lang: str):
return CMakeVariable("CMAKE_" + lang + "_PLATFORM_ID", lang)
@staticmethod
def CMAKE_LANG_SIMULATE_ID(lang: str):
return CMakeVariable("CMAKE_" + lang + "_SIMULATE_ID", lang)
@staticmethod
def CMAKE_LANG_SIMULATE_VERSION(lang: str):
return CMakeVariable("CMAKE_" + lang + "_SIMULATE_VERSION", lang)
@staticmethod
def CMAKE_LANG_SIZEOF_DATA_PTR(lang: str):
return CMakeVariable("CMAKE_" + lang + "_SIZEOF_DATA_PTR", lang)
@staticmethod
def CMAKE_LANG_SOURCE_FILE_EXTENSIONS(lang: str):
return CMakeVariable("CMAKE_" + lang + "_SOURCE_FILE_EXTENSIONS", lang)
@staticmethod
def CMAKE_USER_MAKE_RULES_OVERRIDE_LANG(lang: str):
return CMakeVariable("CMAKE_USER_MAKE_RULES_OVERRIDE_" + lang, lang)
|
#!/usr/bin/python3
"""
Context Manager
---------------
Redo a transaction handler task in such a way that there is no necessity to wrap
each action requiring a transaction into a separate function.
E.g. in the following code snippet there are 3 actions wrapped into a
transaction, including nested transactions:
.. code-block:: Python
def my_func(a, b, c):
with transaction('root'):
print a
with transaction('nested successful'):
print b
with transaction('nested with error'):
print c
raise Exception
.. hint::
Use a `contextlib.context_manager` decorator to implement this task.
"""
import contextlib
transaction_count = 0
@contextlib.contextmanager
def transaction(name):
global transaction_count
transaction_count += 1
local_transaction = transaction_count
try:
print("Transaction {} for {} started".format(local_transaction, name))
yield
print("Transaction {} for {} ended".format(local_transaction, name))
except:
print("Transaction {} for {} cancelled\n".format(local_transaction, name))
def my_func(a, b, c):
with transaction('root'):
print(a)
with transaction('nested successful'):
print(b)
with transaction('nested with error'):
print(c)
raise Exception
if __name__ == '__main__':
my_func(1, 2, 3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.