content stringlengths 5 1.05M |
|---|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import imp
import os
import re
import functools
import sys
import socket
import time
import types
import unittest
import weakref
import warnings
from errors import *
from marionette import HTMLElement, Marionette
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
pass
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
pass
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, (type, types.ClassType)):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
def skip_if_b2g(target):
def wrapper(self, *args, **kwargs):
if not hasattr(self.marionette, 'b2g') or not self.marionette.b2g:
return target(self, *args, **kwargs)
else:
sys.stderr.write('skipping ... ')
return wrapper
class CommonTestCase(unittest.TestCase):
match_re = None
failureException = AssertionError
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
self.loglines = None
self.duration = 0
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest as e:
self._addSkip(result, str(e))
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
else:
try:
testMethod()
except self.failureException:
result.addFailure(self, sys.exc_info())
except KeyboardInterrupt:
raise
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure as e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, sys.exc_info())
except SkipTest as e:
self._addSkip(result, str(e))
except:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
success = False
# Here we could handle doCleanups() instead of calling cleanTest directly
self.cleanTest()
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
@classmethod
def match(cls, filename):
"""
Determines if the specified filename should be handled by this
test class; this is done by looking for a match for the filename
using cls.match_re.
"""
if not cls.match_re:
return False
m = cls.match_re.match(filename)
return m is not None
@classmethod
def add_tests_to_suite(cls, mod_name, filepath, suite, testloader, marionette, testvars):
"""
Adds all the tests in the specified file to the specified suite.
"""
raise NotImplementedError
@property
def test_name(self):
if hasattr(self, 'jsFile'):
return os.path.basename(self.jsFile)
else:
return '%s.py %s.%s' % (self.__class__.__module__,
self.__class__.__name__,
self._testMethodName)
def set_up_test_page(self, emulator, url="test.html", permissions=None):
emulator.set_context("content")
url = emulator.absolute_url(url)
emulator.navigate(url)
if not permissions:
return
emulator.set_context("chrome")
emulator.execute_script("""
Components.utils.import("resource://gre/modules/Services.jsm");
let [url, permissions] = arguments;
let uri = Services.io.newURI(url, null, null);
permissions.forEach(function (perm) {
Services.perms.add(uri, "sms", Components.interfaces.nsIPermissionManager.ALLOW_ACTION);
});
""", [url, permissions])
emulator.set_context("content")
def setUp(self):
# Convert the marionette weakref to an object, just for the
# duration of the test; this is deleted in tearDown() to prevent
# a persistent circular reference which in turn would prevent
# proper garbage collection.
self.start_time = time.time()
self.marionette = self._marionette_weakref()
if self.marionette.session is None:
self.marionette.start_session()
if self.marionette.timeout is not None:
self.marionette.timeouts(self.marionette.TIMEOUT_SEARCH, self.marionette.timeout)
self.marionette.timeouts(self.marionette.TIMEOUT_SCRIPT, self.marionette.timeout)
self.marionette.timeouts(self.marionette.TIMEOUT_PAGE, self.marionette.timeout)
else:
self.marionette.timeouts(self.marionette.TIMEOUT_PAGE, 30000)
def tearDown(self):
pass # bug 874599
def cleanTest(self):
self._deleteSession()
def _deleteSession(self):
if hasattr(self, 'start_time'):
self.duration = time.time() - self.start_time
if hasattr(self.marionette, 'session'):
if self.marionette.session is not None:
try:
self.loglines = self.marionette.get_logs()
except Exception, inst:
self.loglines = [['Error getting log: %s' % inst]]
try:
self.marionette.delete_session()
except (socket.error, MarionetteException):
# Gecko has crashed?
self.marionette.session = None
try:
self.marionette.client.close()
except socket.error:
pass
self.marionette = None
class MarionetteTestCase(CommonTestCase):
match_re = re.compile(r"test_(.*)\.py$")
def __init__(self, marionette_weakref, methodName='runTest',
filepath='', **kwargs):
self._marionette_weakref = marionette_weakref
self.marionette = None
self.extra_emulator_index = -1
self.methodName = methodName
self.filepath = filepath
self.testvars = kwargs.pop('testvars', None)
CommonTestCase.__init__(self, methodName, **kwargs)
@classmethod
def add_tests_to_suite(cls, mod_name, filepath, suite, testloader, marionette, testvars, **kwargs):
test_mod = imp.load_source(mod_name, filepath)
for name in dir(test_mod):
obj = getattr(test_mod, name)
if (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, unittest.TestCase)):
testnames = testloader.getTestCaseNames(obj)
for testname in testnames:
suite.addTest(obj(weakref.ref(marionette),
methodName=testname,
filepath=filepath,
testvars=testvars,
**kwargs))
def setUp(self):
CommonTestCase.setUp(self)
self.marionette.test_name = self.test_name
self.marionette.execute_script("log('TEST-START: %s:%s')" %
(self.filepath.replace('\\', '\\\\'), self.methodName))
def tearDown(self):
self.marionette.set_context("content")
self.marionette.execute_script("log('TEST-END: %s:%s')" %
(self.filepath.replace('\\', '\\\\'), self.methodName))
self.marionette.test_name = None
CommonTestCase.tearDown(self)
def get_new_emulator(self):
self.extra_emulator_index += 1
if len(self.marionette.extra_emulators) == self.extra_emulator_index:
qemu = Marionette(emulator=self.marionette.emulator.arch,
emulatorBinary=self.marionette.emulator.binary,
homedir=self.marionette.homedir,
baseurl=self.marionette.baseurl,
noWindow=self.marionette.noWindow,
gecko_path=self.marionette.gecko_path)
qemu.start_session()
self.marionette.extra_emulators.append(qemu)
else:
qemu = self.marionette.extra_emulators[self.extra_emulator_index]
return qemu
def wait_for_condition(self, method, timeout=30):
timeout = float(timeout) + time.time()
while time.time() < timeout:
value = method(self.marionette)
if value:
return value
time.sleep(0.5)
else:
raise TimeoutException("wait_for_condition timed out")
class MarionetteJSTestCase(CommonTestCase):
context_re = re.compile(r"MARIONETTE_CONTEXT(\s*)=(\s*)['|\"](.*?)['|\"];")
timeout_re = re.compile(r"MARIONETTE_TIMEOUT(\s*)=(\s*)(\d+);")
match_re = re.compile(r"test_(.*)\.js$")
def __init__(self, marionette_weakref, methodName='runTest', jsFile=None):
assert(jsFile)
self.jsFile = jsFile
self._marionette_weakref = marionette_weakref
self.marionette = None
CommonTestCase.__init__(self, methodName)
@classmethod
def add_tests_to_suite(cls, mod_name, filepath, suite, testloader, marionette, testvars):
suite.addTest(cls(weakref.ref(marionette), jsFile=filepath))
def runTest(self):
if self.marionette.session is None:
self.marionette.start_session()
self.marionette.test_name = os.path.basename(self.jsFile)
self.marionette.execute_script("log('TEST-START: %s');" % self.jsFile.replace('\\', '\\\\'))
f = open(self.jsFile, 'r')
js = f.read()
args = []
# if this is a browser_ test, prepend head.js to it
if os.path.basename(self.jsFile).startswith('browser_'):
local_head = open(os.path.join(os.path.dirname(__file__), 'tests', 'head.js'), 'r')
js = local_head.read() + js
head = open(os.path.join(os.path.dirname(self.jsFile), 'head.js'), 'r')
for line in head:
# we need a bigger timeout than the default specified by the
# 'real' head.js
if 'const kDefaultWait' in line:
js += 'const kDefaultWait = 45000;\n'
else:
js += line
context = self.context_re.search(js)
if context:
context = context.group(3)
self.marionette.set_context(context)
if context != "chrome":
self.marionette.navigate('data:text/html,<html>test page</html>')
timeout = self.timeout_re.search(js)
if timeout:
timeout = timeout.group(3)
self.marionette.set_script_timeout(timeout)
try:
results = self.marionette.execute_js_script(js,
args,
special_powers=True,
filename=os.path.basename(self.jsFile))
self.assertTrue(not 'timeout' in self.jsFile,
'expected timeout not triggered')
if 'fail' in self.jsFile:
self.assertTrue(results['failed'] > 0,
"expected test failures didn't occur")
else:
fails = []
for failure in results['failures']:
diag = "" if failure.get('diag') is None else "| %s " % failure['diag']
name = "got false, expected true" if failure.get('name') is None else failure['name']
fails.append('TEST-UNEXPECTED-FAIL | %s %s| %s' %
(os.path.basename(self.jsFile), diag, name))
self.assertEqual(0, results['failed'],
'%d tests failed:\n%s' % (results['failed'], '\n'.join(fails)))
self.assertTrue(results['passed'] + results['failed'] > 0,
'no tests run')
except ScriptTimeoutException:
if 'timeout' in self.jsFile:
# expected exception
pass
else:
self.loglines = self.marionette.get_logs()
raise
self.marionette.execute_script("log('TEST-END: %s');" % self.jsFile.replace('\\', '\\\\'))
self.marionette.test_name = None
|
# coding: utf-8
import sys
IS_WINDOWS = (sys.platform == "win32")
|
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from apikey import apikey
import time
class YoutubeBot:
flow = InstalledAppFlow.from_client_secrets_file('client_secret.json', 'https://www.googleapis.com/auth/youtube.force-ssl')
credentials = flow.run_console()
youtube = build('youtube', 'v3', credentials=credentials)
def getVids(self):
ids = [] #stores the video ids
channelId = "UCiBfuUreTbKvBKtQbb6SIWQ"
maxResults = 2
youtube = build('youtube', 'v3', developerKey=apikey)
request = youtube.search().list(
part="snippet",
channelId=channelId,
maxResults=maxResults,
order="date",
type="video")
response = request.execute()
for item in response['items']:
print(item['snippet']['title'])
ids.append((item['id']['videoId'], item['snippet']['channelId']))
return ids
def insert_comment(self, channel_id, video_id, text):
self.youtube.commentThreads().insert(
part="snippet",
body=dict(
snippet=dict(
channelId=channel_id,
videoId=video_id,
topLevelComment=dict(
snippet=dict(
textOriginal=text
)
)
)
)
).execute()
def commentVids(self):
print("Startings CommentVideos")
ids = self.getVids()
message = "nice video"
for id in ids:
self.insert_comment(id[1], id[0], message)
print("End CommentVideos ")
bot = YoutubeBot()
def main():
while True:
timeout = 86400
bot.commentVids()
time.sleep(timeout)
if __name__ == "__main__":
main() |
import os
for filename in os.listdir('.'):
if filename.endswith('.kml'):
os.rename(filename, filename[:-3]+'xml')
|
from fastapi import APIRouter
router = APIRouter()
@router.get("/")
def test():
return {"teste": "testado!"} |
import json
import glob
import re
# onetime script to convert the data/people/*.txt files to data/people/*.json files
for txt_file in glob.glob('data/people/*.txt'):
person = {}
with open(txt_file, encoding="utf-8") as fh:
for line in fh:
line = line.rstrip('\n')
if re.search(r'\A\s*\Z', line):
continue
if line == '__DESCRIPTION__':
person['description'] = fh.read()
break
try:
k, v = line.split(':', maxsplit=1)
except Exception as e:
print(e)
print(line)
exit()
v = v.strip(' ')
if k in person:
raise Exception("Duplicate field '{}' in {}".format(k, txt_file))
person[k] = v
if 'topics' in person:
person['topics'] = re.split(r'\s*,\s*', person['topics'])
json_file = txt_file[0:-3] + 'json'
with open(json_file, 'w', encoding="utf-8") as fh:
json.dump(person, fh, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)
# splitup the topics
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss"):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy"):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.cast(vocab_size - 1, tf.float32)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.math.log(confidence) +
tf.cast(vocab_size - 1, tf.float32) * low_confidence *
tf.math.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
return xentropy * weights, weights
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.name_scope("padded_accuracy"):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
padded_labels = tf.cast(labels, tf.int32)
return tf.cast(tf.equal(outputs, padded_labels), tf.float32), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.name_scope("padded_accuracy_topk"):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.cast(outputs, tf.int32)
padded_labels = tf.cast(labels, tf.int32)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.cast(tf.equal(outputs, padded_labels), tf.float32)
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.name_scope("padded_sequence_accuracy"):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
padded_labels = tf.cast(labels, tf.int32)
not_correct = tf.cast(tf.not_equal(outputs, padded_labels),
tf.float32) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
class MetricLayer(tf.keras.layers.Layer):
"""Custom a layer of metrics for Transformer model."""
def __init__(self, vocab_size):
super(MetricLayer, self).__init__()
self.vocab_size = vocab_size
self.metric_mean_fns = []
def build(self, input_shape):
""""Builds metric layer."""
neg_log_perplexity = functools.partial(
padded_neg_log_perplexity, vocab_size=self.vocab_size)
self.metric_mean_fns = [
(tf.keras.metrics.Mean("accuracy"), padded_accuracy),
(tf.keras.metrics.Mean("accuracy_top5"), padded_accuracy_top5),
(tf.keras.metrics.Mean("accuracy_per_sequence"),
padded_sequence_accuracy),
(tf.keras.metrics.Mean("neg_log_perplexity"), neg_log_perplexity),
]
super(MetricLayer, self).build(input_shape)
def get_config(self):
return {"vocab_size": self.vocab_size}
def call(self, inputs):
logits, targets = inputs[0], inputs[1]
for mean, fn in self.metric_mean_fns:
m = mean(*fn(logits, targets))
self.add_metric(m)
return logits
def transformer_loss(logits, labels, smoothing, vocab_size):
"""Calculates total loss containing cross entropy with padding ignored.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
A scalar float tensor for loss.
"""
xentropy, weights = padded_cross_entropy_loss(logits, labels, smoothing,
vocab_size)
return tf.reduce_sum(xentropy) / tf.reduce_sum(weights)
|
__author__ = 'Kalyan'
from placeholders import *
notes = '''
Tuples are yet another sequence type along the lines of strings and lists with
its own characteristics.
'''
def test_tuple_type():
test_tuple = (1,2) # note the syntax
assert 'tuple' == type(test_tuple).__name__
def test_tuple_length():
colors = ('red', 'blue', 'green')
assert 3 == len(colors)
def test_tuple_with_no_elements():
empty = ()
assert True == isinstance(empty, tuple)
assert 0 == len(empty)
def test_tuple_with_one_element():
test1 = (1)
assert 'int' == type(test1).__name__
test2 = (1,) #note the syntax used to disambiguate
assert 'tuple' == type(test2).__name__
def test_tuple_can_be_indexed():
colors = ('red', 'blue', 'green')
assert 'red'== colors[0]
assert 'blue' == colors[1]
assert 'green' == colors[2]
def test_tuple_can_be_sliced():
colors = ('red', 'blue', 'green')
assert ('blue','green') == colors[1:3]
assert ('blue',) == colors[1:2] #remember the awkward syntax for single element tuples :)
def test_tuples_are_immutable():
colors = ('red', 'blue', 'green')
try:
colors[0] = 'orange'
except TypeError as te:
print te # note the exception
assert True
def test_tuples_can_be_nested():
top_left = (10,20)
bottom_right = (40,50)
rectangle = (top_left, bottom_right)
assert 2 == len(rectangle)
assert (10,20) == rectangle[0]
assert 10 == rectangle[0][0]
assert 50 == rectangle[1][1]
def test_tuple_unpacking():
pair = (10, 20)
a, b = pair
assert 10 == a
assert 20 == b
triplet = (10, 20, 30)
try:
a, b = triplet
assert False # should not come here.
except ValueError as ve:
print ve # observe what is printed here.
assert True
def test_sequence_conversion():
"""
sequences can be converted across forms using the builtin functions.
"""
word = "testing"
tup_1 = tuple(word)
assert ('t','e','s','t','i','n','g') == tup_1
list_1 = list(word)
assert ['t','e','s','t','i','n','g'] == list_1
list_2 = list(tup_1)
assert ['t','e','s','t','i','n','g'] == list_2
word2 = str(tup_1)
assert "('t', 'e', 's', 't', 'i', 'n', 'g')" == word2
word3 = "".join(tup_1)
assert "testing" == word3
word4 = "".join(list_1)
assert "testing" == word4
def min_max(input):
"""
Returns a tuple of min and max of the input list. Assume input is a non empty numeric list
Use only builtin functions from: https://docs.python.org/2/library/functions.html
"""
return min(input),max(input) # write a line of code to return containing min and max
def test_min_max():
a, b = min_max(range(1,10))
assert a == 1
assert b == 9
a, b = min_max([12])
assert a == 12
assert b == 12
three_things_i_learnt = """
-
-
-
""" |
from __future__ import annotations
import os
from getpass import getpass
from time import sleep
import pandas as pd
from notion_client import Client as NotionClient
def get_notion_token() -> str:
"""Gets a Notion API token.
Either from the NOTION_TOKEN environment variable or interactively.
Returns
-------
str
The Notion API token.
"""
if "NOTION_TOKEN" in os.environ:
return os.environ["NOTION_TOKEN"]
else:
return getpass("Enter Notion API Integration Token: ")
def get_notion_client(token: str) -> NotionClient:
"""Gets a Notion API client.
The Notion API client used is
https://github.com/ramnes/notion-sdk-py
Parameters
----------
token
The Notion API Integration Token.
Returns
-------
NotionClient
The Notion API client.
"""
return NotionClient(auth=token)
def _simplify_notion_property_value(value: dict):
"""Convert Notion Property Value to a simple/primitive data type
Note that "date" types return a 2-tuple of (start, end) Timestamps.
"""
type_ = value["type"]
obj = value[type_]
if obj is None:
return obj
elif type_ == "url":
return obj
elif type_ == "email":
return obj
elif type_ == "phone_number":
return obj
elif type_ == "number":
return obj
elif type_ == "created_time":
return obj
elif type_ == "last_edited_time":
return obj
elif type_ == "relation":
# extract the IDs of the relation (linked page)
return [v["id"] for v in obj]
elif type_ == "checkbox":
return bool(obj)
elif type_ == "date":
return (pd.Timestamp(obj["start"]), pd.Timestamp(obj["end"]))
elif type_ == "created_by":
return obj["name"]
elif type_ == "last_edited_by":
return obj.get("name")
elif type_ == "select":
return obj["name"]
elif type_ == "multi_select":
return [x["name"] for x in obj]
elif type_ == "people":
return [x["name"] for x in obj if "name" in x]
elif type_ == "files":
return [x[x["type"]]["url"] for x in obj]
elif type_ == "title":
return " ".join([x["plain_text"].strip() for x in obj])
elif type_ == "rich_text":
return " ".join([x["plain_text"].strip() for x in obj])
elif type_ == "formula":
return obj[obj["type"]]
elif type_ == "rollup":
if obj["type"] == "array":
return [_simplify_notion_property_value(x) for x in obj["array"]]
else:
return obj[obj["type"]]
else:
raise ValueError(f"I don't understand type {type_}")
def _page_to_simple_dict(
page: dict,
default_date_handler: str = "ignore_end",
date_handlers: dict[str, str] = None,
) -> dict:
"""Convert Notion Page objects to a "simple" dictionary suitable for Pandas.
This is suitable for objects that have `"object": "page"`
"""
if date_handlers is None:
date_handlers = {}
# these properties are defined by Notion
record = {
"_notion_id": page["id"],
"_created_time": pd.Timestamp(page["created_time"]),
"_last_edited_time": pd.Timestamp(page["last_edited_time"]),
"_notion_url": page["url"],
}
for property, value in page["properties"].items():
extracted = _simplify_notion_property_value(value)
if value["type"] == "date":
if extracted is None:
extracted = (None, None)
handler = date_handlers.get(property, default_date_handler)
if handler == "ignore_end":
record[property] = extracted[0]
elif handler == "mangle":
record[f"{property}_start"] = extracted[0]
record[f"{property}_end"] = extracted[1]
elif handler == "multiindex":
record[(property, "start")] = extracted[0]
record[(property, "end")] = extracted[1]
else:
record[property] = extracted
return record
def database_to_dataframe(
notion_client: NotionClient,
database_id: str,
default_date_handler: str = "ignore_end",
date_handlers: dict[str, str] = None,
) -> pd.DataFrame:
"""Extracts a Notion Database as a Pandas DataFrame.
Parameters
----------
notion_client
The Notion API client.
database_id
The Notion Database ID. This identifier can be found in the URL of the
database.
default_date_handler : {"ignore_end", "mangle", "multiindex"}
The default date handler. See Notes below on how to use this.
date_handlers
Specify per-column date handlers.
Returns
-------
pd.DataFrame
The Notion Database as a Pandas DataFrame.
Notes
-----
Notion date properties are represented as 2-tuples with a start and end
timestamps. If there is only a single date in the property, it is encoded as
a start date with a null end date. There are several options on how to
encode this into the resulting dataframe.
"ignore_end":
Keep only the start date of the date object and keep column name the
same as the property name.
"mangle":
For each date property named "foo" in the Notion table, create a
"foo_start" and a "foo_end" column.
"multiindex":
Create a MultiIndex for the columns where the top level contains the
property names and the second level contains "start" and "end" for
date properties.
"""
# accumulate all the pages in the database
response = notion_client.databases.query(database_id)
results = response["results"]
while response["has_more"]:
sleep(0.1)
response = notion_client.databases.query(
database_id, start_cursor=response["next_cursor"]
)
results += response["results"]
# convert each page to a simplified dict => Pandas DataFrame
records = map(
lambda page: _page_to_simple_dict(
page,
default_date_handler=default_date_handler,
date_handlers=date_handlers,
),
results,
)
df = pd.DataFrame(records)
# if any of the columns are tuples, it means those were date columns which
# were handled with "multiindex" => the dataframe needs to be given
# hierarchical columns
if any(isinstance(col, tuple) for col in df.columns):
multiindex = pd.MultiIndex.from_tuples(
[col if isinstance(col, tuple) else (col, "") for col in df.columns]
)
df.columns = multiindex
return df
def _user_to_simple_dict(user: dict) -> dict:
"""Convert Notion User objects to a "simple" dictionary suitable for Pandas.
This is suitable for objects that have `"object": "user"`
"""
record = {
"notion_id": user["id"],
"type": user["type"],
"name": user["name"],
"avatar_url": user["avatar_url"],
}
if user["type"] == "person":
record["email"] = user["person"]["email"]
return record
def users_to_dataframe(notion_client: NotionClient):
"""Extract all Notion users as a Pandas DataFrame.
Parameters
----------
notion_client
The Notion API client.
Returns
-------
pd.DataFrame
The Notion users as a Pandas DataFrame.
Notes
-----
If users are deleted from your Notion workspace, they will not be returned
by the API, even if they are still present in Person properties in your
databases.
"""
response = notion_client.users.list()
results = response["results"]
while response["has_more"]:
sleep(0.1)
response = notion_client.users.list(start_cursor=response["next_cursor"])
results += response["results"]
return pd.DataFrame(map(_user_to_simple_dict, results))
|
# Generated by Django 3.1.13 on 2021-07-21 18:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20210721_1547'),
]
operations = [
migrations.AlterField(
model_name='college',
name='name',
field=models.CharField(max_length=50, verbose_name='Student College'),
),
migrations.AlterField(
model_name='department',
name='name',
field=models.CharField(max_length=100, verbose_name='Student Department'),
),
migrations.AlterField(
model_name='room',
name='occupied',
field=models.BooleanField(default=False, verbose_name='Is the room occupied?'),
),
migrations.AlterField(
model_name='room',
name='room_condition',
field=models.CharField(choices=[('GOOD', 'Good'), ('BAD', 'Bad'), ('MAINTENANCE', 'Maintenance')], default='GOOD', max_length=50, verbose_name='Room condition'),
),
migrations.AlterField(
model_name='room',
name='room_status',
field=models.CharField(choices=[('FULL', 'Full'), ('EMPTY', 'Empty')], default='EMPTY', max_length=50, verbose_name='Is the room full or empty'),
),
]
|
import uvicorn
from fastapi import FastAPI
from dotenv import load_dotenv
from layer_view import view
load_dotenv()
app = FastAPI()
app.include_router(view.router)
if __name__ == "__main__":
uvicorn.run(
"app:app", host="0.0.0.0", port=5111, reload=True
) |
import argparse
import os
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
from data.dataset import OmniglotReactionTimeDataset
from helpers.statistical_functions import calculate_base_statistics, display_base_statistics
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
seed: int = torch.random.seed()
transform = transforms.Compose([
# you can add other transformations in this list
transforms.ToTensor()
])
# dataset = torchvision.datasets.Omniglot(os.getcwd(),
# download=True, transform=transform)
# working with the 100-class dataset achieved better results
# okay you are attempting to load in based upon the directory
# you should be loading the csv file to deal with this, and handling stuff appropriately
dataset = OmniglotReactionTimeDataset('sigma_dataset.csv', transforms=transform)
validation_split = .2
shuffle_dataset = True
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
# np.random.seed(1)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=16,
sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(dataset, batch_size=16,
sampler=valid_sampler)
# Model Work
model = torchvision.models.resnet50(pretrained=True, progress=True)
model.train()
# loss function and optimizer
criterion = torch.abs_nn.CrossEntropyLoss()
optim = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
for epoch in range(2):
running_loss = 0.0
correct = 0.0
total = 0.0
for idx, sample in enumerate(train_loader):
image1 = sample['image1']
image2 = sample['image2']
label1 = sample['label1']
label2 = sample['label2']
# if args.loss_fn == 'psych-acc':
# psych = sample['acc']
# else:
# psych = sample['rt']
# concatenate the batched images for now
inputs = torch.cat([image1, image2], dim=0).to(device)
labels = torch.cat([label1, label2], dim=0).to(device)
# psych_tensor = torch.zeros(len(labels))
# j = 0
# for i in range(len(psych_tensor)):
# if i % 2 == 0:
# psych_tensor[i] = psych[j]
# j += 1
# else:
# psych_tensor[i] = psych_tensor[i-1]
# psych_tensor = psych_tensor.to(device)
outputs = model(inputs).to(device)
loss = criterion(outputs, labels)
# if args.loss_fn == 'cross-entropy':
# loss = loss_fn(outputs, labels)
# elif args.loss_fn == 'psych-acc':
# loss = AccPsychCrossEntropyLoss(outputs, labels, psych_tensor).to(device)
# else:
# loss = PsychCrossEntropyLoss(outputs, labels, psych_tensor).to(device)
optim.zero_grad()
loss.backward()
optim.step()
running_loss += loss.item()
# labels_hat = torch.argmax(outputs, dim=1)
# correct += torch.sum(labels.data == labels_hat)
# this seemed to fix the accuracy calculation
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Finished Training')
model.eval()
# dataiter = iter(train_loader)
# sample = dataiter.next()
# X = sample['image1']
# y = sample['label1']
# X = X.permute(1,2,3,0)
# X = X.numpy()
# y = y.numpy()
# print('x shape', X.shape)
# print('y.shape', y.shape)
# dataiter = iter(validation_loader)
# sample_test = dataiter.next()
# X_test = sample_test['image1']
# y_test = sample_test['label1']
# X_test = X_test.permute(1,2,3,0)
# X_test = X_test.numpy()
# y_test = y_test.numpy()
# X = X.reshape(X.shape[0]*X.shape[1]*X.shape[2],X.shape[3]).T
# y = y.reshape(y.shape[0],)
# X_test = X_test.reshape(X_test.shape[0]*X_test.shape[1]*X_test.shape[2],X_test.shape[3]).T
# y_test = y_test.reshape(y_test.shape[0],)
# clf = \
# RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
# max_depth=None, max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
# min_samples_split=2, min_weight_fraction_leaf=0.0,
# n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
# verbose=0, warm_start=False)
# clf.fit(X, y)
# TODO: get ground truth values, insert test values
preds: list = model(...)
accuracy, precision, recall, f1_score = calculate_base_statistics(preds, ...)
display_base_statistics(seed, accuracy, precision, recall, f1_score)
|
#!/usr/bin/env python
#
# Copyright 2012 Jim Lawton. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based in part on pytrie, https://bitbucket.org/gsakkis/pytrie/
# Copyright (c) 2009, George Sakkis
# This code is part of gdrive-linux (https://code.google.com/p/gdrive-linux/).
from UserDict import DictMixin
# Singleton sentinel.
class _Null(object):
pass
class _Node(dict):
"""A class representing a node in the directory tree.
>>> n = _Node()
>>> n[1] = 1
>>> n[1]
1
>>> n = { 1: 1, 2: 2, 3: 3 }
>>> n
{1: 1, 2: 2, 3: 3}
>>> 1 in n
True
>>> 4 in n
False
>>> n[5]
Traceback (most recent call last):
File "/usr/lib64/python2.7/doctest.py", line 1289, in __run
compileflags, 1) in test.globs
File "<doctest __main__._Node[7]>", line 1, in <module>
n[5]
KeyError: 5
"""
def __init__(self, value=_Null):
super(_Node, self).__init__() # The base dictionary object.
self.path = None # Stores the path to this node.
self.value = value
self.children = {}
def numkeys(self):
'''Return the number of keys in the subtree rooted at this node.'''
numk = 0
if self.value is not _Null:
numk = sum(child.numkeys() for child in self.children.itervalues())
return numk
def __repr__(self):
valstr = '_Null'
if self.value is not _Null:
valstr = repr(self.value)
return '(%s, {%s})' % (valstr, ', '.join('%r: %r' % cstr for cstr in self.children.iteritems()))
def __getstate__(self):
return (self.value, self.children)
def __setstate__(self, state):
self.value, self.children = state
class DirectoryTree(DictMixin, object):
"""A prefix tree (Trie) implementation to represent a directory tree.
>>> t = DirectoryTree()
>>> t.add("/a/b/c/d")
>>> t.add("/a/b/c/d/e")
>>> t.add("/foo/bar")
>>> print t
DirectoryTree({'': '/', '/a/b/c/d': '/a/b/c/d', '/a/b/c/d/e': '/a/b/c/d/e', '/foo/bar': '/foo/bar'})
>>> t.keys()
['', '/a/b/c/d', '/a/b/c/d/e', '/foo/bar']
>>> t.values()
['/', '/a/b/c/d', '/a/b/c/d/e', '/foo/bar']
>>> t.items()
[('', '/'), ('/a/b/c/d', '/a/b/c/d'), ('/a/b/c/d/e', '/a/b/c/d/e'), ('/foo/bar', '/foo/bar')]
>>> t.search("/a/b/c")
['/a/b/c/d', '/a/b/c/d/e']
"""
def __init__(self, seq=None, **kwargs):
self._root = _Node('/')
self.update(seq, **kwargs)
def __len__(self):
return self._root.numkeys()
def __iter__(self):
return self.iterkeys()
def __contains__(self, key):
node = self._find(key)
return node is not None and node.value is not _Null
def __getitem__(self, key):
node = self._find(key)
if node is None or node.value is _Null:
raise KeyError
return node.value
def __setitem__(self, key, value):
node = self._root
for part in key.split('/'):
next_node = node.children.get(part)
if next_node is None:
node = node.children.setdefault(part, _Node())
else:
node = next_node
node.value = value
def __delitem__(self, key):
parts = []
node = self._root
for part in key.split('/'):
parts.append(node, part)
node = node.children.get(part)
if node is None:
break
if node is None or node.value is _Null:
raise KeyError
node.value = _Null
while node.value is _Null and not node.children and parts:
node, part = parts.pop()
del node.children[part]
def __repr__(self):
return '%s({%s})' % (self.__class__.__name__, ', '.join('%r: %r' % t for t in self.iteritems()))
def __str__(self):
lines = ["{"]
for key, value in self.iteritems():
lines.append("%s: %s" % (key, value))
lines.append("}")
return '\n'.join(lines)
def _find(self, key):
node = self._root
for part in key.split('/'):
node = node.children.get(part)
if node is None:
break
return node
def keys(self, prefix=None):
"Return a list of the trie keys."
return list(self.iterkeys(prefix))
def values(self, prefix=None):
"Return a list of the trie values."
return list(self.itervalues(prefix))
def items(self, prefix=None):
"Return a list of the trie (key, value) tuples."
return list(self.iteritems(prefix))
def iteritems(self, prefix=None):
"Return an iterator over the trie (key, value) tuples."
parts = []
def generator(node, parts=parts):
if node.value is not _Null:
yield ('/'.join(parts), node.value)
for part, child in node.children.iteritems():
parts.append(part)
for subresult in generator(child):
yield subresult
del parts[-1]
node = self._root
if prefix is not None:
for part in prefix.split('/'):
parts.append(part)
node = node.children.get(part)
if node is None:
node = _Node()
break
return generator(node)
def iterkeys(self, prefix=None):
"Return an iterator over the trie keys."
return (key for key, value in self.iteritems(prefix))
def itervalues(self, prefix=None):
"Return an iterator over the trie values."
return (value for key, value in self.iteritems(prefix))
def add(self, path, value=None):
"Add a path to the trie."
if value is not None:
self[path] = value
else:
self[path] = path
def search(self, prefix=None):
"Return a list of keys in the trie matching the supplied prefix."
return list(self.iterkeys(prefix))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import os
import imutils
import pickle
import time
import cv2
import threading
import numpy as np
from PIL import ImageFont, ImageDraw, Image
import json
import datetime
import requests
from faced import FaceDetector
from faced.utils import annotate_image
from config_reader import read_config
ZM_URL = 'http://18.179.207.49/zm'
ZM_STREAM_URL = f'{ZM_URL}/cgi-bin/nph-zms'
LOGIN_URL = f'{ZM_URL}/api/host/login.json?user=admin&pass=admin'
MAX_RETRY_FRAME = 1000
def connect_stream(monitor, stream_url):
r = requests.post(url=LOGIN_URL)
print('[INFO] openning video stream...')
auth_info = r.json()['credentials']
new_url = f'{ZM_STREAM_URL}?mode=jpeg&maxfps=5&monitor={monitor}&{auth_info}'
# start streaming with zm stream url
cap = cv2.VideoCapture(new_url)
if cap is None or not cap.isOpened():
# try to open alternative url
print('[ERROR] trying to open direct url...')
cap = cv2.VideoCapture(stream_url)
return cap
class Camera(object):
thread_list = {}
json_list = {}
frame_list = {}
last_access = {}
json_data = {}
detector = None
embedder = None
recognizer = None
le = None
max_retry_count = 0
stream_url_list = {}
confidence = 0.90
# is_ended = False
def initialize(self, monitor, stream_url):
if monitor not in Camera.thread_list:
# start background frame thread
thread = threading.Thread(target=self._thread, args=(
stream_url,), kwargs={"monitor": monitor})
thread.start()
Camera.thread_list[str(monitor)] = thread
# wait until frames start to be available
# while monitor not in self.frame_list or self.frame_list[str(monitor)] is None:
# time.sleep(0)
def __init__(self):
file_paths, configs = read_config()
if Camera.detector is None:
print('[INFO] loading face detector...')
Camera.detector = FaceDetector()
if Camera.embedder is None:
# load our serialized face embedding model from disk
print('[INFO] loading embedder from {}'.format(
file_paths['embedder_path']))
Camera.embedder = cv2.dnn.readNetFromTorch(
file_paths['embedder_path'])
if Camera.recognizer is None:
# load the actual face recognition model along with the label encoder
print('[INFO] loading face recognizer from {}'.format(
file_paths['recognizer_path']))
Camera.recognizer = pickle.loads(
open('output/recognizer.pickle', 'rb').read())
if Camera.le is None:
print('[INFO] loading le from {}'.format(file_paths['le_path']))
Camera.le = pickle.loads(open('output/le.pickle', 'rb').read())
print('[INFO] Confidence value is set to {}'.format(
configs['confidence']))
Camera.confidence = float(configs['confidence'])
Camera.max_retry_count = int(configs['max_retry_count'])
# def get_frame(self, monitor):
# try:
# return self.frame_list[str(monitor)]
# except:
# return None
def get_json(self, monitor):
try:
return self.json_list[str(monitor)]
except:
response_data = {}
response_data['detection'] = []
return response_data
def change_stream_url(self, monitor, stream_url):
if monitor in Camera.thread_list:
return None
Camera.stream_url_list[str(monitor)] = stream_url
self.initialize(monitor, stream_url)
@classmethod
def _thread(cls, stream_url, monitor=0):
# login to zm server first
r = requests.post(url=LOGIN_URL)
print('[INFO] openning video stream...')
auth_info = r.json()['credentials']
new_url = f'{ZM_STREAM_URL}?mode=jpeg&maxfps=5&monitor={monitor}&{auth_info}'
retry_count = 0
cap = None
# start trying to connect to streaming resource
while (cap is None or not cap.isOpened) and retry_count < cls.max_retry_count:
cap = connect_stream(monitor, cls.stream_url_list[str(monitor)])
retry_count += 1
if cap is None or not cap.isOpened():
print('[ERROR] unable to open remote stream...')
cls.thread_list[str(monitor)] = None
return
print('[INFO] starting face detection...')
cap_failed_count = 0
while True:
try:
response_data = {}
response_data['detection'] = []
ret, frame = cap.read()
#ret, frame = camera.read()
if not ret:
cap_failed_count += 1
cls.json_list[str(monitor)] = response_data
if (cap_failed_count > cls.max_retry_count):
if cap.isOpened():
cap.release()
retry_count = 0
while (cap is None or not cap.isOpened) and retry_count < cls.max_retry_count:
cap = connect_stream(
monitor, cls.stream_url_list[str(monitor)])
retry_count += 1
if cap is None or not cap.isOpened():
print('[ERROR] unable to open remote stream...')
cls.thread_list[str(monitor)] = None
return
continue
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
frame = imutils.resize(frame, width=600)
# (h, w) = frame.shape[:2]
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
bboxes = cls.detector.predict(frame, cls.confidence)
# ensure at least one face was found
print('[INFO] detected faces: {}'.format(len(bboxes)))
if len(bboxes) > 0:
for xb, yb, wb, hb, pb in bboxes:
startX = int(xb - wb/2)
startY = int(yb - hb/2)
endX = int(xb + wb/2)
endY = int(yb + hb/2)
# extract the face ROI
face = frame[startY:endY, startX:endX]
# (fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
# if fW < 20 or fH < 20:
# continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(
face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
cls.embedder.setInput(faceBlob)
vec = cls.embedder.forward()
# perform classification to recognize the face
preds = cls.recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = cls.le.classes_[j]
# name = 0
# if proba >= 0.6:
# name = cls.le.classes_[j]
json_data = {}
json_data['name'] = '{}'.format(name)
json_data['time'] = datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')
json_data['confidence'] = str(proba)
response_data['detection'].append(json_data)
cls.json_list[str(monitor)] = response_data
# ret, jpeg = cv2.imencode('.jpg', frame)
# cls.frame_list[str(monitor)] = jpeg.tobytes()
finally:
time.sleep(0.02)
print('[INFO] releasing stream resources...')
if cap.isOpened():
cap.release()
cls.thread_list[str(monitor)] = None
def detect_image(self, frame):
response_data = {}
response_data['detection'] = []
response_list = []
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
# frame = imutils.resize(frame, width=600)
try:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
bboxes = Camera.detector.predict(frame, Camera.confidence)
# ensure at least one face was found
print('[INFO] detected faces: {}'.format(len(bboxes)))
if len(bboxes) > 0:
for xb, yb, wb, hb, pb in bboxes:
startX = int(xb - wb/2)
startY = int(yb - hb/2)
endX = int(xb + wb/2)
endY = int(yb + hb/2)
# extract the face ROI
face = frame[startY:endY, startX:endX]
# (fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
# if fW < 20 or fH < 20:
# continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(
face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
Camera.embedder.setInput(faceBlob)
vec = Camera.embedder.forward()
# perform classification to recognize the face
preds = Camera.recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = Camera.le.classes_[j]
# name = 0
# if proba >= 0.6:
# name = Camera.le.classes_[j]
if name not in response_list:
response_list.append(name)
json_data = {}
json_data['name'] = '{}'.format(name)
json_data['time'] = datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')
json_data['confidence'] = str(proba)
response_data['detection'].append(json_data)
finally:
return response_data, response_list
def detect_video(self, event_id, monitor_id, event_date):
response_data = {}
response_data['detection'] = []
# cap = cv2.VideoCapture(0)
print(f'[INFO] starting face detection for event {event_id}...')
result_list = []
start_index = 1
while(True):
print(f'[INFO] checking still image {start_index:05}-analyse.jpg...')
img_path = f'/mnt/zoneminder/events/{monitor_id}/{event_date}/{event_id}/{start_index:05}-analyse.jpg'
if not os.path.isfile(img_path):
if start_index >= MAX_RETRY_FRAME:
break
start_index += 1
time.sleep(0.02)
continue
try:
# print(f'[INFO] parsing {img_path}...')
frame = cv2.imread(img_path)
if frame is not None:
detect_data, detect_list = self.detect_image(frame)
for detect_id in detect_list:
if detect_id not in result_list:
result_list.append(detect_id)
except Exception as e:
# print(e)
print(
f'[INFO] failed to parsing frame {start_index} for event {event_id}...')
finally:
break
print('[INFO] finish video detection...')
response_data['detection'] = result_list
return response_data
|
import cv2
from PIL import Image
import numpy as np
cam = cv2.VideoCapture(0)
## Keys ##
class Keys(object):
overall = lambda x: (int(x[0]) + int(x[1]) + int(x[2]))
red = lambda x: (int(x[0]))
green = lambda x: (int(x[1]))
blue = lambda x: (int(x[2]))
avg = lambda x: (int(x[0]) + int(x[1]) + int(x[2])) / 3
lum = lambda x: round(int(x[0]) * 299 / 1000 + int(x[1]) * 587 / 1000 + int(x[2]) * 114 / 1000)
def avg(li):
output = 0
for i in li:
x = i
output += round(int(x[0]) * 299 / 1000 + int(x[1]) * 587 / 1000 + int(x[2]) * 114 / 1000)
return output / len(li)
def csort(inp):
return sorted(inp, key=Keys.lum)
def clsort(img):
output = []
# for row in img:
return sorted(img, key=lambda x: (avg(x)))
## Load the image ##
while True:
ret_val, im = cam.read()
## Build colour map ##
raw_im = np.asarray(im)
sr_im = []
for i, row in enumerate(raw_im):
sr_im.append(sorted(row, key=Keys.lum))
# sr_im.append((0,0,0))
sr_im = clsort(sr_im)
out_im = np.array(sr_im)
cv2.imshow("out", out_im)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows() |
# dictionary
d1 = {"tom":30, "bobe":3}
print("d1 =",d1)
print("ID of d1 is", id(d1))
d2 = {"bobe":3, "tom":30}
print("d2 =",d2)
print("ID of d2 is", id(d2))
# Member
print("tom" in d1) # True
print("tom" not in d1) # False
# Relational
print(d1 == d2) # True
print(d1 != d2) # False
|
from django.core.mail import send_mail
from rest_framework.views import APIView
import bcrypt
from decouple import config
from ...models.user import User
from ...models.customer import Customer
from ...utils.helper import random_string_generator, send_email, customer_message
from ...verification.customer_validator import *
class CustomerRegistration(APIView):
def post(self, request):
data = request.data
validate_data = validate(data, User, Customer)
if (validate_data != True):
return validate_data
else:
# hash user password using bcrypt algorithm
hashed = bcrypt.hashpw(
data['password'].encode('utf-8'), bcrypt.gensalt())
# Generate token
email_token = random_string_generator()
# create user
user = User.objects.create(
user_name=data['user_name'].strip(),
password=hashed,
email_verification_token=email_token,
user_type='customer',
)
# create customer
customer = Customer.objects.create(
user=user,
first_name=data['first_name'],
last_name=data['last_name'],
email=data['email']
)
# saue user in database
user.save()
# save customer in database
customer.save()
email_verification_url = config("VERIFY_EMAIL_URL")
message = "Registration was successful"
customer_message_details = {
'subject': 'Bouncer email verification',
'text_content': "You are welcome on board.",
'to': [data["email"]],
'from_email': config("EMAIL_SENDER"),
'html_content': 'Welcome on board, complete your registration by clicking the link below',
'link_message': f'Welcome on board </br> Click on this <a href="{email_verification_url}/?token={email_token}">Link</a> to verify'
}
# send mail to the user
send = send_email(customer_message_details)
if send:
return Response({'message': message, 'user_message': customer_message(data)}, status=status.HTTP_201_CREATED)
else:
return Response(dict(message='Network Error: Could not send email at the moment You are registered'), status=status.HTTP_503_SERVICE_UNAVAILABLE)
|
import mnist
import numpy as np
import matplotlib.pylab as plt
import umap
from persistence_diagram import persistence
from plot_persistent_homology import plot_diagrams
if __name__ == '__main__':
np.random.seed(1)
num_neurons = 1024
bc_base = './results/barcode-experiment-MNIST-'
h0_base = './results/homology0-experiment-MNIST-'
h1_base = './results/homology1-experiment-MNIST-'
h2_base = './results/homology2-experiment-MNIST-'
input_space, labels = mnist.read("training")
index = np.random.choice(np.arange(0, 50000), num_neurons)
input_space = input_space[index].reshape(num_neurons, 28*28)
regular_som = np.load("results/experiment-MNIST-regular.npy")
random_som = np.load("results/experiment-MNIST-random.npy")
mapper = umap.UMAP(n_components=7).fit(input_space)
input_mapper = mapper.transform(input_space)
regular_mapper = mapper.transform(regular_som)
random_mapper = mapper.transform(random_som)
data = [input_mapper, regular_mapper, random_mapper]
# data = [input_space, regular_som, random_som]
case = ['input_space', 'regular', 'random']
per = persistence(dimension=3, max_edge_length=1, max_alpha_square=4,
is_alpha_simplex_on=True)
for i, d in enumerate(data):
per.compute_persistence(d, case='MNIST-'+case[i])
homology0_input = per.read_pdgm(h0_base+'input_space.dat')
homology1_input = per.read_pdgm(h1_base+'input_space.dat')
homology2_input = per.read_pdgm(h2_base+'input_space.dat')
homology0_regular = per.read_pdgm(h0_base+'regular.dat')
homology1_regular = per.read_pdgm(h1_base+'regular.dat')
homology2_regular = per.read_pdgm(h2_base+'regular.dat')
homology0_random = per.read_pdgm(h0_base+'random.dat')
homology1_random = per.read_pdgm(h1_base+'random.dat')
homology2_random = per.read_pdgm(h2_base+'random.dat')
regDH0, regDH1, regDH2 = per.compute_distances(homology0_input,
homology1_input,
homology0_regular,
homology1_regular,
homology2_input,
homology2_regular)
ranDH0, ranDH1, ranDH2 = per.compute_distances(homology0_input,
homology1_input,
homology0_random,
homology1_random,
homology2_input,
homology2_random)
print("=" * 30)
print("Bootstrap distance (Regular) - H0: %f, H1: %f, H2: %f"
% (regDH0, regDH1, regDH2))
print("Bootstrap distance (Random) - H0: %f, H1: %f, H2: %f"
% (ranDH0, ranDH1, ranDH2))
print("=" * 30)
dgm_input = per.read_pdgm(bc_base+'input_space.dat')
dgm_regular = per.read_pdgm(bc_base+'regular.dat')
dgm_random = per.read_pdgm(bc_base+'random.dat')
plot_diagrams(dgm_input, dgm_regular, dgm_random)
plt.savefig("experiment-MNIST-analysis.pdf")
plt.show()
|
import torch
from .registry import DATASETS
from .base import BaseDataset
@DATASETS.register_module
class ContrastiveDataset(BaseDataset):
"""Dataset for rotation prediction
"""
def __init__(self, data_source, pipeline):
super(ContrastiveDataset, self).__init__(data_source, pipeline)
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
img1 = self.pipeline(img)
img2 = self.pipeline(img)
img_cat = torch.cat((img1.unsqueeze(0), img2.unsqueeze(0)), dim=0)
return dict(img=img_cat)
def evaluate(self, scores, keyword, logger=None):
raise NotImplemented
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-30 10:45
from __future__ import unicode_literals
from django.db import migrations
def load_user_data_fields(apps, schema_editor):
UserDataFields = apps.get_model('consent', 'UserDataFields')
UserDataFields(name='Roll Number', slug='roll_number', default_position=1).save()
UserDataFields(name='Name', slug='name', default_position=2).save()
UserDataFields(name='Date of Birth', slug='date_of_birth', default_position=3).save()
UserDataFields(name='Caste Category', slug='caste_category', default_position=4).save()
UserDataFields(name='Hometown', slug='hometown', default_position=5).save()
UserDataFields(name='Aggregate SSC %', slug='ssc', default_position=6).save()
UserDataFields(name='Aggregate HSC %', slug='hsc', default_position=7).save()
UserDataFields(name='CGPA Upto Semester', slug='cgpa_upto_semester', default_position=8).save()
UserDataFields(name='Email ID', slug='email', default_position=9).save()
UserDataFields(name='Phone Number', slug='phone_number', default_position=10).save()
UserDataFields(name='SSC Passing Year', slug='ssc_passing_year', default_position=0).save()
UserDataFields(name='HSC Passing Year', slug='hsc_passing_year', default_position=0).save()
UserDataFields(name='Entrance Exam', slug='entrance_exam', default_position=0).save()
UserDataFields(name='Entrance Exam Score', slug='entrance_exam_score', default_position=0).save()
UserDataFields(name='Branch', slug='branch', default_position=0).save()
UserDataFields(name='Current Backlogs', slug='current_backlogs', default_position=0).save()
UserDataFields(name='Total Backlogs', slug='total_backlogs', default_position=0).save()
UserDataFields(name='Current Address', slug='current_address', default_position=0).save()
UserDataFields(name='Permanent Address', slug='permanent_address', default_position=0).save()
UserDataFields(name='Current City', slug='current_city', default_position=0).save()
UserDataFields(name='Permanent City', slug='permanent_city', default_position=0).save()
UserDataFields(name='Current State', slug='current_state', default_position=0).save()
UserDataFields(name='Permanent State', slug='permanent_state', default_position=0).save()
UserDataFields(name='Current Address Pincode', slug='current_pincode', default_position=0).save()
UserDataFields(name='Permanent Address Pincode', slug='permanent_pincode', default_position=0).save()
UserDataFields(name='CGPA Of Semester', slug='cgpa_of_semester', default_position=0).save()
class Migration(migrations.Migration):
dependencies = [
('consent', '0019_auto_20170520_1850'),
]
operations = [
migrations.RunPython(load_user_data_fields),
]
|
import torch
from nndct_shared.nndct_graph import Tensor
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import process_inputs_and_params
from nndct_shared.quantization import post_quant_process
import pytorch_nndct.utils as py_utils
from pytorch_nndct.utils import TorchOpClassType
__all__ = ['Module']
def creat_module(torch_op_type, torch_op_attr, *args, **kwargs):
if torch_op_attr.op_class_type == TorchOpClassType.NN_MODULE:
# creat module for module
module_cls = getattr(torch.nn, torch_op_type, None)
if module_cls:
class deephi_Module(module_cls):
r"""quantizable operation"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.valid_inputs = None
self.valid_output = None
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.need_quant_output = True
def extra_repr(self):
return f"'{module_cls.__name__}'"
def forward(self, *inputs, **kwargs):
if self.quantizer and self.quantizer.configer.is_node_quantizable(self.node, lstm=False):
inputs, _ = process_inputs_and_params(
self.node,
self.quant_mode,
self.quantizer,
inputs=list(inputs),
valid_inputs=self.valid_inputs)
output = super().forward(*inputs, **kwargs)
if (self.need_quant_output):
[output] = post_quant_process(self.node, self.valid_output, [output],
[output, output])
else:
output = super().forward(*inputs, **kwargs)
return output
return deephi_Module(*args, **kwargs)
elif torch_op_attr.op_class_type in [TorchOpClassType.NN_FUNCTION, TorchOpClassType.TORCH_FUNCTION]:
# create module for function
if getattr(torch.nn.functional, torch_op_type, None):
caller = getattr(torch.nn.functional, torch_op_type)
else:
caller = getattr(torch, torch_op_type, None)
if caller:
class deephi_Func_Module(torch.nn.Module):
r"""quantizable operation"""
def __init__(self, caller, *args, **kwards):
super().__init__()
self.valid_inputs = None
self.valid_output = None
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.need_quant_output = True
self.caller = caller
self._match_inputs = []
def extra_repr(self):
return f"'{caller.__name__}'"
def forward(self, *args, **kwargs):
if len(self._match_inputs) == 0:
def _check_kwargs(value):
if isinstance(value, Tensor) and value in self.node.in_tensors:
return True
elif isinstance(value, (tuple, list)):
check_result = [_check_kwargs(i) for i in value]
return any(check_result)
for key in kwargs.keys():
if _check_kwargs(self.node.node_config(key)):
self._match_inputs.append(key)
for key in self._match_inputs:
if isinstance(kwargs[key], (tuple, list)):
inputs = kwargs[key]
else:
inputs = [kwargs[key]]
if self.quantizer and self.quantizer.configer.is_node_quantizable(self.node, lstm=False):
inptus, _ = process_inputs_and_params(
self.node,
self.quant_mode,
self.quantizer,
inputs=inputs,
valid_inputs=self.valid_inputs)
if isinstance(kwargs[key], (tuple, list)):
kwargs[key] = inputs
else:
kwargs[key] = inputs[0]
output = caller(*args, **kwargs)
if (self.need_quant_output):
[output] = post_quant_process(self.node, self.valid_output, [output],
[output, output])
else:
output = caller(*args, **kwargs)
return output
return deephi_Func_Module(caller, *args, **kwargs)
elif torch_op_attr.op_class_type == TorchOpClassType.TENSOR:
# create module for method
if getattr(torch.Tensor, torch_op_type, None):
class deephi_Tensor_Module(torch.nn.Module):
r"""quantizable operation"""
def __init__(self, op_type, *args, **kwards):
super().__init__()
self.valid_inputs = None
self.valid_output = None
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.need_quant_output = True
self.op_type = op_type
def extra_repr(self):
return f"'{self.op_type}'"
def forward(self, input, *args, **kwargs):
if self.quantizer and self.quantizer.configer.is_node_quantizable(self.node, lstm=False):
[input], _ = process_inputs_and_params(
self.node,
self.quant_mode,
self.quantizer,
inputs=[input],
valid_inputs=self.valid_inputs)
output = getattr(input, self.op_type, None)(*args, **kwargs)
if (self.need_quant_output):
[output] = post_quant_process(self.node, self.valid_output, [output],
[output, output])
else:
output = getattr(input, self.op_type, None)(*args, **kwargs)
return output
return deephi_Tensor_Module(torch_op_type, *args, **kwargs)
else:
raise RuntimeError("Unkown op type:{torch_op_type}")
def Module(nndct_type, *args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
torch_op_type = py_utils.get_torch_op_type(nndct_type)
torch_op_attr = py_utils.get_torch_op_attr(torch_op_type)
return creat_module(torch_op_type, torch_op_attr, *args, **kwargs)
|
# DOCUMENTATION
# IMPORTS
# FUNCTIONS
def find_names(contacts_dictionary, number):
output_list = []
for name in contacts_dictionary:
if contacts_dictionary[name] == number:
output_list.append(name)
return " ".join(output_list)
def print_contacts(contacts_dictionary):
for name in sorted(contacts_dictionary):
number = str(contacts_dictionary[name])
number = "{}-{}".format(number[:3], number[3:])
print("%-10s %s" % (name, number))
# main
def main():
my_contacts = {"Fred": 7235591, "Mary": 3841212, "Bob": 3841212,
"Sarah": 2213278}
if "Mary" in my_contacts:
print("Mary:", my_contacts["Mary"])
print(find_names(my_contacts, 3841212))
print("All contacts:")
print_contacts(my_contacts)
# PROGRAM RUN
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
kaftools.sparsifiers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides sparsifier classes for Sparsification criteria.
Currently supports:
- Novelty criterion
- Approximate Linear Dependency (ALD)
Not all filters support all sparsifiers. Be sure to check the info sheet
for detailed comparisons.
"""
import numpy as np
from kaftools.filters import KrlsFilter
class SparsifyMethod(object):
"""Base class for sparsification criteria. """
def __init__(self, params=None):
if type(params) is not np.ndarray:
params = np.array(params).ravel()
self.params = params
def apply(self, kfilter):
""" Abstract method for applying a concrete sparsifier over a kernel filter.
:param kfilter: filter.Filter object
:return: None. The object itself is modified during iterations.
"""
pass
class NoveltyCriterion(SparsifyMethod):
"""Novelty criterion for KLMS filters. Hasn't been thoroughly tested on KRLS. """
def __init__(self, distance_delta, error_delta):
super().__init__([distance_delta, error_delta])
def apply(self, kfilter):
if np.max(kfilter.similarity) <= self.params[0] and np.abs(kfilter.error) >= self.params[1]:
kfilter.support_vectors = np.append(kfilter.support_vectors, [kfilter.regressor], axis=0)
kfilter.coefficients = np.append(kfilter.coefficients, [0.0])
class ApproximateLinearDependency(SparsifyMethod):
"""ALS criterion for KRLS filters. """
def __init__(self, threshold):
super().__init__([threshold])
def apply(self, kfilter):
if type(kfilter) is not KrlsFilter:
raise Exception("ALD is only implemented for KRLS filters.")
else:
kernel_regressor = kfilter.kernel(kfilter.regressor, kfilter.regressor)
kernel_support_vectors = kfilter.kernel(kfilter.support_vectors, kfilter.support_vectors)
distance = kernel_regressor - kfilter.h ** 2 / kernel_support_vectors
if np.min(distance) > self.params[0]:
q_row = np.asarray(-kfilter.z).reshape(-1, 1).T
q_col = np.asarray(-kfilter.z).reshape(-1, 1)
q_end = np.array([1]).reshape(-1, 1)
kfilter.q = kfilter.q * kfilter.r + np.outer(kfilter.z, kfilter.z.T)
kfilter.q = np.append(kfilter.q, q_row, axis=0)
kfilter.q = np.append(kfilter.q, np.concatenate((q_col, q_end), axis=0), axis=1)
kfilter.q *= kfilter.r ** (-1)
kfilter.coefficients = np.append(kfilter.coefficients, kfilter.r**(-1) * kfilter.error)
kfilter.support_vectors = np.vstack((kfilter.support_vectors, kfilter.regressor.reshape(1, -1)))
else:
kfilter.q = kfilter.q * kfilter.r + np.outer(kfilter.z, kfilter.z.T)
kfilter.q *= kfilter.r ** (-1)
|
"""
find options of deribit
"""
from archon.exchange.deribit.Wrapper import DeribitWrapper
import archon.config as config
import archon.broker as broker
import archon.exchange.exchanges as exc
from datetime import datetime
abroker = broker.Broker(setAuto=False)
abroker.set_keys_exchange_file(exchanges=[exc.DERIBIT])
client = abroker.afacade.get_client(exc.DERIBIT)
def instr():
instr = client.getinstruments()
o = list()
for x in instr:
k = x['kind']
if k != 'option':
print (x)
if __name__=='__main__':
instr()
|
''' Tensor initializers
The functions presented here are just tiny wrappers around
`numpy` functions, in order to make them compatible with nujo.
'''
from nujo.init.basic import *
from nujo.init.random import *
|
from abc import ABC, abstractmethod
import logging
import yaml
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
class LimitsReader(ABC):
@abstractmethod
def get_p_e_max_limit(self):
"""Get P/E max limit."""
@abstractmethod
def get_roe_min_limit(self):
"""Get ROE min limit."""
@abstractmethod
def get_dividend_years(self):
"""Get dividend years."""
@abstractmethod
def get_p_bv_max_limit(self):
"""Get P/BV max limit."""
@abstractmethod
def get_p_pv_g_max_limit(self):
"""Get P/BV Graham max limit."""
@abstractmethod
def get_allowed_altman_ratings(self):
"""Get allowed altman ratings."""
@abstractmethod
def get_f_score_min_limit(self):
"""Get Piotroski F-Score max limit."""
class LimitsYamlReader(LimitsReader):
def __init__(self, path):
self._source_path = path
self._limits = {}
self._read()
def _read(self):
with open(self._source_path) as f:
parsed_f = yaml.load(f, Loader=yaml.FullLoader)
self._limits = parsed_f['limits']
def get_p_e_max_limit(self):
"""Get P/E max limit."""
p_e_max = float(self._limits['p_e_max_limit'])
logger.info('Using P/E max limit: %s' % p_e_max)
return p_e_max
def get_roe_min_limit(self):
"""Get ROE min limit."""
roe_min = float(self._limits['roe_min_limit'])
logger.info('Using ROE min limit: %s' % roe_min)
return roe_min
def get_dividend_years(self):
"""Get dividend years."""
dividend_yrs = self._limits['dividend_years']
logger.info('Using dividend years: %s' % dividend_yrs)
return dividend_yrs
def get_p_bv_max_limit(self):
"""Get P/BV max limit."""
p_bv_max = float(self._limits['p_bv_max_limit'])
logger.info('Using P/BV max limit: %s' % p_bv_max)
return p_bv_max
def get_p_pv_g_max_limit(self):
"""Get P/BV Graham max limit."""
p_bv_g_max = float(self._limits['p_bv_g_max_limit'])
logger.info('Using P/BV Graham max limit: %s' % p_bv_g_max)
return p_bv_g_max
def get_allowed_altman_ratings(self):
"""Get allowed altman ratings."""
altmans = self._limits['ratings']
logger.info('Using allowed Altman Rating values %s' % altmans)
return altmans
def get_f_score_min_limit(self):
"""Get Piotroski F-Score max limit."""
f_score_min = float(self._limits['f_score'])
logger.info('Using Piotroski F-Score min limit: %s' % f_score_min)
return f_score_min
|
check_solution("console_hello.exe", f"int: {a_random_int}")
|
"""
This file defines a series of constants that represent the values used in
the API's "helper" tables.
Rather than define the values in the db setup scripts and then make db calls to
lookup the surrogate keys, we'll define everything here, in a file that can be
used by the db setup scripts *and* the application code.
"""
from collections import namedtuple, OrderedDict
from usaspending_api.accounts.models import AppropriationAccountBalances
from usaspending_api.accounts.v2.filters.account_download import account_download_filter
from usaspending_api.awards.models import Award, TransactionNormalized
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.download.helpers.elasticsearch_download_functions import (
AwardsElasticsearchDownload,
TransactionsElasticsearchDownload,
)
from usaspending_api.download.helpers.disaster_filter_functions import disaster_filter_function
from usaspending_api.search.models import AwardSearchView, TransactionSearch, SubawardView
from usaspending_api.awards.v2.filters.idv_filters import (
idv_order_filter,
idv_transaction_filter,
idv_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.award_filters import (
awards_transaction_filter,
awards_subaward_filter,
awards_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.search import (
universal_award_matview_filter,
transaction_search_filter,
)
from usaspending_api.awards.v2.filters.sub_award import subaward_download
from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass
from usaspending_api.download.helpers.download_annotation_functions import (
transaction_search_annotations,
universal_award_matview_annotations,
subaward_annotations,
idv_order_annotations,
idv_transaction_annotations,
)
LookupType = namedtuple("LookupType", ["id", "name", "desc"])
JOB_STATUS = [
LookupType(1, "ready", "job is ready to be run"),
LookupType(2, "running", "job is currently in progress"),
LookupType(3, "finished", "job is complete"),
LookupType(4, "failed", "job failed to complete"),
LookupType(5, "queued", "job sent to queue for async processing"),
LookupType(6, "resumed", "job is being reprocessed after a failure"),
LookupType(7, "created", "job product has been created and stored locally"),
LookupType(8, "uploading", "job is being uploaded to public storage"),
]
JOB_STATUS_DICT = {item.name: item.id for item in JOB_STATUS}
VALUE_MAPPINGS = {
# Award Level
"awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": universal_award_matview_filter,
"annotations_function": universal_award_matview_annotations,
},
# Elasticsearch Award Level
"elasticsearch_awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": AwardsElasticsearchDownload.query,
"annotations_function": universal_award_matview_annotations,
},
# Transaction Level
"transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": transaction_search_filter,
"annotations_function": transaction_search_annotations,
},
# Elasticsearch Transaction Level
"elasticsearch_transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": TransactionsElasticsearchDownload.query,
"annotations_function": transaction_search_annotations,
},
# SubAward Level
"sub_awards": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"type_name": "Subawards",
"download_name": "{agency}{type}_Subawards_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": subaward_download,
"annotations_function": subaward_annotations,
},
# Appropriations Account Data
"account_balances": {
"source_type": "account",
"table": AppropriationAccountBalances,
"table_name": "account_balances",
"download_name": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"filter_function": account_download_filter,
},
# Object Class Program Activity Account Data
"object_class_program_activity": {
"source_type": "account",
"table": FinancialAccountsByProgramActivityObjectClass,
"table_name": "object_class_program_activity",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"filter_function": account_download_filter,
},
"award_financial": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"filter_function": account_download_filter,
},
"idv_orders": {
"source_type": "award",
"table": Award,
"table_name": "idv_orders",
"download_name": "IDV_{piid}_Orders",
"contract_data": "latest_transaction__contract_data",
"filter_function": idv_order_filter,
"is_for_idv": True,
"annotations_function": idv_order_annotations,
},
"idv_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "IDV_{piid}_FederalAccountFunding",
"filter_function": idv_treasury_account_funding_filter,
"is_for_idv": True,
},
"idv_transaction_history": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "IDV_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": idv_transaction_filter,
"is_for_idv": True,
"annotations_function": idv_transaction_annotations,
},
"contract_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Contract_{piid}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_contract": True,
},
"assistance_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Assistance_{assistance_id}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_assistance": True,
},
"sub_contracts": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Contract_{piid}_Sub-Awards",
"contract_data": "award__latest_transaction__contract_data",
"filter_function": awards_subaward_filter,
"is_for_contract": True,
"annotations_function": subaward_annotations,
},
"sub_grants": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Assistance_{assistance_id}_Sub-Awards",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": awards_subaward_filter,
"is_for_assistance": True,
"annotations_function": subaward_annotations,
},
"contract_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "Contract_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": awards_transaction_filter,
"is_for_contract": True,
"annotations_function": idv_transaction_annotations,
},
"assistance_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "assistance_transaction_history",
"download_name": "Assistance_{assistance_id}_TransactionHistory",
"assistance_data": "assistance_data",
"filter_function": awards_transaction_filter,
"is_for_assistance": True,
"annotations_function": idv_transaction_annotations,
},
"disaster_recipient": {
"source_type": "disaster",
"table": AwardSearchView,
"table_name": "recipient",
"download_name": "COVID-19_Recipients_{award_category}_{timestamp}",
"filter_function": disaster_filter_function,
"base_fields": ["recipient_name", "recipient_unique_id"],
},
}
# Bulk Download still uses "prime awards" instead of "transactions"
VALUE_MAPPINGS["prime_awards"] = VALUE_MAPPINGS["transactions"]
# List of CFO CGACS for list agencies viewset in the correct order, names included for reference
# TODO: Find a solution that marks the CFO agencies in the database AND have the correct order
CFO_CGACS_MAPPING = OrderedDict(
[
("012", "Department of Agriculture"),
("013", "Department of Commerce"),
("097", "Department of Defense"),
("091", "Department of Education"),
("089", "Department of Energy"),
("075", "Department of Health and Human Services"),
("070", "Department of Homeland Security"),
("086", "Department of Housing and Urban Development"),
("015", "Department of Justice"),
("1601", "Department of Labor"),
("019", "Department of State"),
("014", "Department of the Interior"),
("020", "Department of the Treasury"),
("069", "Department of Transportation"),
("036", "Department of Veterans Affairs"),
("068", "Environmental Protection Agency"),
("047", "General Services Administration"),
("080", "National Aeronautics and Space Administration"),
("049", "National Science Foundation"),
("031", "Nuclear Regulatory Commission"),
("024", "Office of Personnel Management"),
("073", "Small Business Administration"),
("028", "Social Security Administration"),
("072", "Agency for International Development"),
]
)
CFO_CGACS = list(CFO_CGACS_MAPPING.keys())
FILE_FORMATS = {
"csv": {"delimiter": ",", "extension": "csv", "options": "WITH CSV HEADER"},
"tsv": {"delimiter": "\t", "extension": "tsv", "options": r"WITH CSV DELIMITER E'\t' HEADER"},
"pstxt": {"delimiter": "|", "extension": "txt", "options": "WITH CSV DELIMITER '|' HEADER"},
}
VALID_ACCOUNT_SUBMISSION_TYPES = ("account_balances", "object_class_program_activity", "award_financial")
|
import random
import tempfile
from machi import MachiStore
def test_smoke():
testdir = tempfile.TemporaryDirectory()
machi = MachiStore(maxlen=37, temp=True)
try:
key = machi.append(b"1")
data = machi.get(*key)
assert b"1" == data
machi.trim(*key)
data = machi.get(*key)
assert data is None
keys = {}
repeat = 683
for i in random.sample(range(repeat), repeat):
key = machi.append(str(i).encode())
keys[i] = key
for i in random.sample(range(repeat), repeat):
key = keys[i]
data = machi.get(*key)
assert str(i).encode() == data
for key in machi.keys():
assert isinstance(key, tuple)
for i in random.sample(range(repeat), repeat):
key = keys[i]
machi.trim(*key)
for i in random.sample(range(repeat), repeat):
key = keys[i]
data = machi.get(*key)
assert data is None
finally:
machi.close()
testdir.cleanup()
def test_persistence():
with tempfile.TemporaryDirectory() as testdir:
machi = MachiStore(maxlen=29, temp=False, dir=testdir)
try:
for key in machi.keys():
assert isinstance(key, tuple)
key = machi.append(b"1")
assert b"1" == machi.get(*key)
finally:
machi.close()
machi = MachiStore(maxlen=29, temp=False, dir=testdir)
try:
for key in machi.keys():
assert isinstance(key, tuple)
keys = list(machi.keys())
assert 1 == len(keys)
key = keys[0]
assert b"1" == machi.get(*key)
finally:
machi.close()
import os
def test_file_deletion():
with tempfile.TemporaryDirectory() as testdir:
keys = []
with MachiStore(maxlen=29, temp=False, dir=testdir) as machi:
key = machi.append(b"1")
keys.append(key)
assert b"1" == machi.get(*key)
with MachiStore(maxlen=29, temp=False, dir=testdir) as machi:
for key in keys:
machi.trim(*key)
assert 0 == len(list(machi.keys()))
with MachiStore(maxlen=29, temp=False, dir=testdir) as machi:
assert 0 == len(list(machi.keys()))
def test_file_deletion2():
with tempfile.TemporaryDirectory() as testdir:
keys = []
with MachiStore(maxlen=29, temp=False, dir=testdir) as machi:
key = machi.append(b"1")
keys.append(key)
assert b"1" == machi.get(*key)
with open(os.path.join(testdir, '1.machi'), 'wb') as fp:
pass
with open(os.path.join(testdir, '1.machd'), 'wb') as fp:
pass
for f in os.scandir(testdir):
print(f.name, f.stat().st_size)
with MachiStore(maxlen=29, temp=False, dir=testdir) as machi:
for i in range(30):
key = machi.append(str(i).encode())
keys.append(key)
assert str(i).encode() == machi.get(*key)
assert 6 == len(os.listdir(testdir))
for key in keys:
machi.trim(*key)
assert 0 == len(list(machi.keys()))
with MachiStore(maxlen=29, temp=False, dir=testdir) as machi:
assert 0 == len(list(machi.keys()))
|
class Agent:
def __init__(self):
self.property_list = []
def display_properties():
for property in self.property_list:
property.display()
|
from flask_restful import Resource, reqparse
from flask_jwt_extended import (jwt_optional, get_jwt_identity,
fresh_jwt_required, jwt_required,
get_jwt_claims)
from src.user import User
from src.db import db
from typing import List
proj_allocation = db.Table('proj_allocation',
db.Column('user_id', db.Integer,
db.ForeignKey('users.id')),
db.Column('project_id', db.Integer,
db.ForeignKey('projects.id')),
db.UniqueConstraint('user_id', 'project_id',
name='UC_UID_PID')
)
class Project(db.Model):
__tablename__ = 'projects'
id = db.Column(db.Integer, primary_key=True)
project_name = db.Column(db.String(80))
project_desc = db.Column(db.String(80))
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'))
task = db.relationship("Task", backref="project", lazy='dynamic')
members = db.relationship("User", secondary=proj_allocation,
backref=db.backref('curr_projects',
lazy='dynamic')
)
def __init__(self, id: int, project_name: str,
project_desc: str, owner: int, **kwargs):
self.id = id
self.project_name = project_name
self.project_desc = project_desc
self.owner_id = owner
@classmethod
def find_by_project_id(cls, project_id):
return cls.query.filter_by(id=project_id).first()
@classmethod
def find_by_project_name(cls, project_name):
return cls.query.filter_by(project_name=project_name).first()
def has_member(self, user):
if user in self.members:
return True
return False
def create_project(self):
db.session.add(self)
db.session.commit()
def delete_project(self):
db.session.delete(self)
db.session.commit()
def json(self):
return {'id': self.id,
'project_name': self.project_name,
'project_desc': self.project_desc,
'owner': self.owner.basicDetails()['username'],
# 'members': [usr.json() for usr in self.members],
# 'task': [tsk.json() for tsk in self.task.all()]
}
def editMembers(self, members: List):
original_users = self.members
updated_users = []
if not members:
return
for mem in members:
uname = mem['username']
mem = User.find_by_username(uname)
if mem:
updated_users.append(mem)
if self.owner not in updated_users:
updated_users.append(self.owner)
# Above line is to prevent owner to to lose ownership
deleted_members = set(original_users) - set(updated_users)
new_members = set(updated_users) - set(original_users)
for member in deleted_members:
self.members.remove(member)
for member in new_members:
self.members.append(member)
class ProjectRes(Resource):
parser = reqparse.RequestParser()
parser.add_argument('project_name', type=str, required=True,
help='Project Name Required')
parser.add_argument('project_desc', type=str, required=True,
help='Project Description Required')
parser.add_argument('project_members', type=dict, required=False,
action="append", help='Project Members are Required')
@jwt_optional
def get(self):
user = get_jwt_identity()
projects = []
resp = {}
# if not user:
# for project in Project.query.all():
# projects.append(
# project.project_name
# # project.json()
# )
# resp['msg'] = 'Login for more details'
# else:
# print(User.query.filter_by(id=user).first().curr_projects.all())
for project in User.find_by_id(user).curr_projects:
projects.append(
project.json()
)
resp['Projects'] = projects
return resp, 200
@jwt_required
def post(self):
user = get_jwt_identity()
# claims = get_jwt_claims()
# if not claims['manager']:
# return {'msg': 'Manager rights needed'}, 403
data = ProjectRes.parser.parse_args()
print(data['project_members'])
if Project.find_by_project_name(data['project_name']):
return {'msg': 'Project already exists'}, 400
proj = Project(id=None, **data, owner=user)
proj.members.append(User.find_by_id(user))
err = []
resp = {'msg': 'Project created successfully', 'err': err}
if data['project_members']:
for member in data['project_members']:
mem = User.find_by_username(member['username'])
if mem:
proj.members.append(mem)
else:
err.append(member['username'])
proj.create_project()
return resp, 201
@fresh_jwt_required
def put(self):
logged_in_user_id = get_jwt_identity()
logged_in_user = User.find_by_id(logged_in_user_id)
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, required=True,
help='Project ID Required')
parser.add_argument('project_desc', type=str, required=True,
help='Project Description Required')
parser.add_argument('project_members', type=dict, required=False,
action="append", help='Project Members are Required')
data = parser.parse_args()
project = Project.find_by_project_id(data['id'])
if project:
if logged_in_user is not project.owner:
return {'msg': 'You can not update this project'}, 403
project.editMembers(data['project_members'])
project.project_desc = data['project_desc']
project.create_project()
return {'msg': 'Project updated successfully'}, 200
return {'msg': 'No such project found in your account'}, 404
@fresh_jwt_required
def delete(self):
logged_in_user_id = get_jwt_identity()
logged_in_user = User.find_by_id(logged_in_user_id)
# claims = get_jwt_claims()
# if not claims['admin']:
# return {'msg': 'Admin rights needed'}, 403
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, required=True,
help='Project ID Required')
data = parser.parse_args()
project = Project.find_by_project_id(data['id'])
if logged_in_user is not project.owner:
return {'msg': 'You can not delete this project'}, 403
if project:
project.delete_project()
return {'msg': 'Project deleted successfully'}, 200
return {'msg': 'No such project found in your account'}, 404
class ProjectAllocate(Resource):
parser = reqparse.RequestParser()
parser.add_argument('project_id', type=str, required=True,
help='Project ID Required')
parser.add_argument('user_id', type=str, required=True,
help='User ID Required')
@jwt_required
def post(self):
logged_in_user_id = get_jwt_identity()
logged_in_user = User.find_by_id(logged_in_user_id)
# claims = get_jwt_claims()
# if not claims['manager']:
# return {'msg': 'Manager rights needed'}, 403
data = ProjectAllocate.parser.parse_args()
proj = Project.find_by_project_id(data['project_id'])
user = User.find_by_id(data['user_id'])
if not user:
return {'msg': 'User not found'}, 404
if not proj:
return {'msg': 'Project not found'}, 404
if logged_in_user.has_project(proj):
proj.members.append(user)
proj.create_project()
return {'msg': 'Members added to project'}, 200
# Project(id=None, **data, owner=user).create_project()
return {'msg': 'Project not found in your account'}, 404
class ProjectMembers(Resource):
parser = reqparse.RequestParser()
parser.add_argument('project_id', type=str, required=True,
help='Project ID Required')
@jwt_required
def get(self, project_id):
logged_in_user_id = get_jwt_identity()
logged_in_user = User.find_by_id(logged_in_user_id)
project = Project.find_by_project_id(project_id)
if not project:
return {'msg': 'Project not found'}, 404
if not logged_in_user_id:
return {'msg': 'User not found'}, 404
if logged_in_user.has_project(project):
members = [member.basicDetails() for member in project.members]
return {'members': members}, 200
return {'msg': 'Project not found in your account'}, 404
|
/* {"title":"multiplication de matrices","platform":"python","tags":["python"]} */
_VIEW_A = "showArray2D(A, rowCursors=[i], colCursors=[k], rows=2, cols=2, width=.33)"
_VIEW_B = "showArray2D(B, rowCursors=[k], colCursors=[j], rows=2, cols=2, width=.33)"
_VIEW_C = "showArray2D(C, rowCursors=[i], colCursors=[j], rows=2, cols=2, width=.33)"
A = [[0.866, -0.500], [0.500, 0.866]]
B = [[0.500, -0.866], [0.866, 0.500]]
C = [[0, 0], [0, 0]]
for i in range(0, 2):
for j in range(0, 2):
C[i][j] = 0
for k in range(0, 2):
C[i][j] += A[i][k] * B[k][j]
for i in range(0, 2):
for j in range(0, 2):
print(C[i][j])
print('')
|
"""Test for the longest_prefix_match definitions."""
from ipaddress import AddressValueError, NetmaskValueError
import pytest
from netutils.route import NoRouteFound, longest_prefix_match
def test_longest_prefix_match():
"""Test Success."""
lookup = "10.1.1.245"
routes = [{"network": "192.168.1.1", "mask": "255.255.255.255"}, {"network": "10.1.1.0", "mask": "24"}]
winner = longest_prefix_match(lookup, routes)
assert str(winner) == "10.1.1.0/24"
def test_route_table_not_list():
"""Test raise when routing_table is not a list."""
with pytest.raises(TypeError):
lookup = "10.1.1.245"
routes = {"network": "192.168.1.1"}
longest_prefix_match(lookup, routes)
def test_route_table_no_len():
"""Test raise when routing_table is empty list."""
with pytest.raises(IndexError):
lookup = "10.1.1.245"
routes = []
longest_prefix_match(lookup, routes)
def test_route_table_ip_bad_type():
"""Test raise when ip not string or ipaddress object."""
with pytest.raises(TypeError):
lookup = ["10.1.1.245"]
routes = [{"network": "192.168.1.1", "mask": "255.255.255.255"}]
longest_prefix_match(lookup, routes)
@pytest.mark.parametrize("test_input", ["/24", "/255.255.255.0"])
def test_route_table_except_address_value(test_input):
"""Test raise on address value."""
with pytest.raises(AddressValueError):
lookup = "10.1.1.245"
routes = [
{"network": "192.168.1.1", "mask": "255.255.255.255"},
{"network": "10.1.1.0", "mask": test_input},
]
longest_prefix_match(lookup, routes)
@pytest.mark.parametrize("test_input", ["100", "255.255.255.155"])
def test_route_table_except_mask_error(test_input):
"""Test raise on address value."""
with pytest.raises(NetmaskValueError):
lookup = "10.1.1.245"
routes = [
{"network": "192.168.1.1", "mask": "255.255.255.255"},
{"network": "10.1.1.0", "mask": test_input},
]
longest_prefix_match(lookup, routes)
@pytest.mark.parametrize("test_input", ["259.1.1.0", "1.1.1.256"])
def test_route_table_bad_ip_address(test_input):
"""Test raise on address value."""
with pytest.raises(AddressValueError):
lookup = "10.1.1.245"
routes = [{"network": "192.168.1.1", "mask": "255.255.255.255"}, {"network": test_input, "mask": "24"}]
longest_prefix_match(lookup, routes)
def test_route_bad_ip_addr():
"""Test bad 'search' IP address."""
with pytest.raises(ValueError):
lookup = "299.1.1.245"
routes = [{"network": "192.168.1.1", "mask": "255.255.255.255"}, {"network": "10.1.1.0", "mask": "24"}]
longest_prefix_match(lookup, routes)
def test_route_multiple_overlapping():
"""Test longest route with overlapping routes."""
lookup = "10.1.1.245"
routes = [
{"network": "10.1.1.240", "mask": "255.255.255.240"},
{"network": "10.1.1.128", "mask": "255.255.255.128"},
{"network": "10.1.1.0", "mask": "24"},
]
winner = longest_prefix_match(lookup, routes)
assert str(winner) == "10.1.1.240/28"
def test_route_no_best_route_found():
"""Test no route found."""
with pytest.raises(NoRouteFound):
lookup = "192.168.1.245"
routes = [
{"network": "10.1.1.240", "mask": "255.255.255.240"},
{"network": "10.1.1.128", "mask": "255.255.255.128"},
{"network": "10.1.1.0", "mask": "24"},
]
longest_prefix_match(lookup, routes)
def test_route_non_ip_sent():
"""Test when sending a non-ip."""
with pytest.raises(TypeError):
lookup = 12345
routes = [
{"network": "10.1.1.240", "mask": "255.255.255.240"},
]
longest_prefix_match(lookup, routes)
|
class Message1:
_field1: str
_field2: int
def __init__(self, **kwargs):
self._field1 = kwargs.get('field1', str())
self._field2 = kwargs.get('field2', int())
@property
def field(self) -> str:
return self._field1
def __str__(self):
return "Message1:(field1: {} field2:{}".format(self._field1, self._field2)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._field1 == other._field1 and self._field2 == other._field2
else:
return False
|
import staging_schemas as ss
from google.oauth2 import service_account
from google.cloud import bigquery
from google.cloud import exceptions
import argparse
import logging
logging.basicConfig(level=logging.INFO)
def get_client(project_id, _service_account):
"""
Get client object based on the project_id.
Parameters:
- project_id (str): id of project
- service_account (str): path to a JSON service account. If the path
is blanked, use application default authentication instead.
"""
if _service_account:
logging.info(f'Getting client from json file path {_service_account}')
credentials = service_account.Credentials.from_service_account_file(
_service_account)
_client = bigquery.Client(project_id, credentials=credentials)
else:
logging.info('Getting client from application default authentication')
_client = bigquery.Client(project_id)
return _client
def create_dataset(dataset_id, _client):
"""
Create dataset in a project
Parameters:
- dataset_id (str): ID of dataset to be created
- client (obj): client object
"""
try:
_client.get_dataset(dataset_id)
except exceptions.NotFound:
logging.info(f'Creating dataset {dataset_id}')
_client.create_dataset(dataset_id)
else:
logging.info(f'Dataset not created. {dataset_id} already exists.')
def create_table(_table_id, _schema, dataset_id, _client):
dataset_ref = _client.dataset(dataset_id)
table_ref = dataset_ref.table(_table_id)
table = bigquery.Table(table_ref, schema=_schema)
try:
_client.create_table(table)
except exceptions.Conflict:
logging.info(f'Table not created. {_table_id} already exists')
else:
logging.info(f'Created table {dataset_id}.{_table_id}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Setup data-lake infrastructures')
parser.add_argument('--project_id', default='cloud-data-lake')
parser.add_argument('--service_account', default='')
args = parser.parse_args()
# Get client
client = get_client(args.project_id, args.service_account)
# Create datasets
create_dataset('immigration_dwh_staging', client)
create_dataset('immigration_dwh', client)
# Create staging tables
for table_id in ss.tables:
schema = eval('ss.' + table_id)
create_table(table_id, schema, 'immigration_dwh_staging', client)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import csv
import datetime
import sys
import time
reload(sys)
sys.setdefaultencoding("utf-8")
from googleapiclient.discovery import build
from healthtrends.decorators import retry, timeit
class TrendsSession(object):
''' Calls the Google Trends API on a list of search queries.
The API is rate-limited so search queries are broken
up into batches for separate API calls.
'''
# server constants
SERVER = 'https://www.googleapis.com'
API_NAME = 'trends'
API_VERSION = 'v1beta'
DISCOVERY_URL_SUFFIX = '/discovery/v1/apis/trends/' + API_VERSION + '/rest'
DISCOVERY_URL = SERVER + DISCOVERY_URL_SUFFIX
# set global start and end dates as defaults for requesting trends
GLOBAL_START = '2004-01-04'
GLOBAL_END = time.strftime('%Y-%m-%d')
def __init__(self, api_key, query_lim=2):
print('------- Google Trends API session -------')
print(datetime.datetime.now())
if api_key is None:
raise ValueError('API key not set.')
self.api_key = api_key
self.query_lim = query_lim
self.output = {}
# set up session
self.service = build(self.API_NAME,
self.API_VERSION,
developerKey=self.api_key,
discoveryServiceUrl=self.DISCOVERY_URL)
self.geo_level = None
self.geo_id = None
self.start_date = None
self.end_date = None
self.freq = None
@timeit
def request_trends(self, term_list, geo_level, geo_id,
start_date=None, end_date=None, freq='week'):
''' performs a complete Google Trends request using a list of terms
and relevant parameters.
@Params:
term_list: list of search term queries
geo_level: geographic location, one of 'country', 'region', or 'dma'
geo_id: name of the geographic location, e.g. 'US' (country),
'US-NY' (region), '501' (dma)
start_date: first day to download data from, in form 'YYYY-MM-DD'. Defaults to
'2004-01-04'.
end_date: last day to download data from, in form 'YYYY-MM-DD'. Defaults to
today's date.
freq: time interval of data, one of 'day', 'week', 'month', 'year'. Defaults
to 'week'.
'''
# default behavior for download date range
if start_date is None:
start_date = self.GLOBAL_START
if end_date is None:
end_date = self.GLOBAL_END
self.geo_level = geo_level
self.geo_id = geo_id
self.start_date = start_date
self.end_date = end_date
self.freq = freq
print('Starting download:')
print('\tgeo_level: ', geo_level, '\tgeo_id: ', geo_id)
# split query list into batches and download each batch separately
dat = {}
for batch_start in range(0, len(term_list), self.query_lim):
batch_end = min(batch_start + self.query_lim, len(term_list))
batch = term_list[batch_start:batch_end]
dat.update(self._batch_request(batch))
# Convert dictionary to list of lists that will be written to file
res = [['date'] + term_list]
for date in sorted( list( set([x[1] for x in dat] )) ):
vals = [dat.get((term, date), 0) for term in term_list]
res.append([date] + vals)
self.output = res
print('Download completed.')
def save_to_csv(self, full_path=None, directory=None, fname='default'):
''' save Google Trends output as csv file
'''
if full_path:
csv_out = open(full_path, 'wb')
elif directory:
if fname == 'default':
csv_out = open(directory + '/GTdata_{0}.csv'.format(self.geo_id), 'wb')
else:
csv_out = open(directory + '/' + fname, 'wb')
else:
raise ValueError('Either full_path or directory must be specified to save file.')
writr = csv.writer(csv_out)
for row in self.output:
writr.writerow(row)
csv_out.close()
@staticmethod
def _date_to_ISO(datestring):
''' Default function from Google Trends documentation.
Convert date from (eg) 'Jul 04 2004' to '2004-07-11'.
Args:
datestring: A date in the format 'Jul 11 2004', 'Jul 2004', or '2004'
Returns:
The same date in the format '2004-07-11'
Raises:
ValueError: when date doesn't match one of the three expected formats.
'''
try:
new_date = datetime.datetime.strptime(datestring, '%b %d %Y')
except ValueError:
try:
new_date = datetime.datetime.strptime(datestring, '%b %Y')
except ValueError:
try:
new_date = datetime.datetime.strptime(datestring, '%Y')
except:
raise ValueError("Date doesn't match any of '%b %d %Y', '%b %Y', '%Y'.")
return new_date.strftime('%Y-%m-%d')
@retry(count=10, delay=2)
def _batch_request(self, batch):
''' executes the API request on a batch of search terms.
This is default code from Google Trends documentation.
'''
if self.geo_level == 'country':
# Country format is ISO-3166-2 (2-letters), e.g. 'US'
req = self.service.getTimelinesForHealth(terms=batch,
time_startDate=self.start_date,
time_endDate=self.end_date,
timelineResolution=self.freq,
geoRestriction_country=self.geo_id)
elif self.geo_level == 'dma':
# See https://support.google.com/richmedia/answer/2745487
req = self.service.getTimelinesForHealth(terms=batch,
time_startDate=self.start_date,
time_endDate=self.end_date,
timelineResolution=self.freq,
geoRestriction_dma=self.geo_id)
elif self.geo_level == 'region':
# Region format is ISO-3166-2 (4-letters), e.g. 'US-NY' (see more examples
# here: en.wikipedia.org/wiki/ISO_3166-2:US)
req = self.service.getTimelinesForHealth(terms=batch,
time_startDate=self.start_date,
time_endDate=self.end_date,
timelineResolution=self.freq,
geoRestriction_region=self.geo_id)
else:
raise ValueError("geo_level must be one of 'country', 'region' or 'dma'")
# execute command and sleep to avoid rate limiting
res = req.execute()
time.sleep(1.1)
# Convert returned data into a dictionary of the form {(query, date): count, ...}
res_dict = {(line['term'], self._date_to_ISO(point['date'])): point['value']
for line in res['lines']
for point in line['points']}
return res_dict
|
from social.backends.appsfuel import AppsfuelOAuth2 as AppsfuelBackend, \
AppsfuelOAuth2Sandbox as AppsfuelSandboxBackend
|
"""
Server for people directory
"""
from datetime import datetime, timedelta
from functools import partial
import json
import logging
import re
import os
from tornado.web import RequestHandler, HTTPError
from tornado.escape import xhtml_escape
from rest_tools.server import RestServer, from_environment
from .people import People
CLEANR = re.compile('<.*?>')
def recursive_escape(data):
if isinstance(data, dict):
return {recursive_escape(k): recursive_escape(v) for k,v in data.items()}
elif isinstance(data, list):
return [recursive_escape(v) for v in data]
else:
return re.sub(CLEANR, '', data)
def escape_json(data, key=None):
ret = {} if key else []
for item in data:
if key and key not in item:
continue
item = recursive_escape(item)
if key:
ret[item[key]] = item
else:
ret.append(val)
return ret
class Main(RequestHandler):
def initialize(self, people):
self.people = people
async def get(self, *args):
# escape data, just in case
insts = escape_json(self.people.institutions.values(), 'group_path')
users = escape_json(self.people.users.values(), 'username')
for u in users.values():
if 'institution' not in u:
logging.info(f'{u}')
self.render('index.html', json=json, insts=insts, users=users)
class Health(RequestHandler):
def initialize(self, people):
self.people = people
async def get(self):
self.write({
'now': datetime.utcnow().isoformat(),
'last_update': self.people.last_update.isoformat() if self.people.last_update else 'None',
})
if (not self.people.last_update) or datetime.utcnow() - self.people.last_update > timedelta(hours=1):
self.set_status(400)
def create_server():
static_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
default_config = {
'HOST': 'localhost',
'PORT': 8080,
'DEBUG': False,
'EXPERIMENT': 'IceCube',
}
config = from_environment(default_config)
rest_config = {
'debug': config['DEBUG'],
}
kwargs = {}
kwargs['people'] = People(config['EXPERIMENT'])
server = RestServer(static_path=static_path, template_path=static_path, debug=config['DEBUG'])
server.add_route('/healthz', Health, kwargs)
server.add_route(r'/(.*)', Main, kwargs)
server.startup(address=config['HOST'], port=config['PORT'])
return server
|
from smite import SmiteClient
from smite import Endpoint
# Create a new instance of the client
smite = SmiteClient(1700, '2djsa8231jlsad92ka9d2jkad912j')
# Print JSON data for all of the gods in the game on PC
print(smite.get_gods())
# Make the library use the Xbox endpoint for future requests
smite._switch_endpoint(Endpoint.XBOX)
# Print JSON data for all of the gods in the game on Xbox
print(smite.get_gods()) |
import collections
import netaddr
from oslo_log import log
from oslo_config import cfg
from oslo_utils import timeutils
from neutronclient.common import exceptions as neutron_exc
from ceilometer.agent import plugin_base
from ceilometer import nova_client, neutron_client
from ceilometer import sample
from ceilometer.i18n import _
from pprint import pformat, pprint
LOG = log.getLogger(__name__)
class FitterFixedIPPollster(plugin_base.PollsterBase):
@property
def default_discovery(self):
return 'local_instances'
def list_resources_with_long_filters(self, list_method, filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI lenght exceed the
maximum lenght supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values.
"""
try:
params[filter_attr] = filter_values
return list_method(**params)
except neutron_exc.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many filter values.
# Use the excess attribute of the exception to know how many
# filter values can be inserted into a single request.
# We consider only the filter condition from (filter_attr,
# filter_values) and do not consider other filter conditions
# which may be specified in **params.
if type(filter_values) != list:
filter_values = [filter_values]
# Length of each query filter is:
# <key>=<value>& (e.g., id=<uuid>)
# The length will be key_len + value_maxlen + 2
all_filter_len = sum(len(filter_attr) + len(val) + 2
for val in filter_values)
allowed_filter_len = all_filter_len - uri_len_exc.excess
val_maxlen = max(len(val) for val in filter_values)
filter_maxlen = len(filter_attr) + val_maxlen + 2
chunk_size = allowed_filter_len / filter_maxlen
resources = []
for i in range(0, len(filter_values), chunk_size):
params[filter_attr] = filter_values[i:i + chunk_size]
resources.extend(list_method(**params))
return resources
def port_list(self, **params):
ports = self.neutron.list_ports(**params).get('ports')
return ports
def subnet_list(self, **params):
subnets = self.neutron.list_subnets(**params).get('subnets')
return [s for s in subnets]
def network_list(self, **params):
networks = self.neutron.list_networks(**params).get('networks')
# Get subnet list to expand subnet info in network list.
subnets = self.subnet_list()
subnet_dict = dict([(s['id'], s) for s in subnets])
# Expand subnet list from subnet_id to values.
for n in networks:
# Due to potential timing issues, we can't assume the subnet_dict data
# is in sync with the network data.
n['subnets'] = [subnet_dict[s] for s in n.get('subnets', []) if
s in subnet_dict]
return [n for n in networks]
def list(self, all_tenants=False, **search_opts):
if not all_tenants:
tenant_id = self.request.user.tenant_id
# In Neutron, list_floatingips returns Floating IPs from
# all tenants when the API is called with admin role, so
# we need to filter them with tenant_id.
search_opts['tenant_id'] = tenant_id
port_search_opts = {'tenant_id': tenant_id}
else:
port_search_opts = {}
fips = self.neutron.list_floatingips(**search_opts)
fips = fips.get('floatingips')
# Get port list to add instance_id to floating IP list
# instance_id is stored in device_id attribute
ports = self.port_list(**port_search_opts)
port_dict = collections.OrderedDict([(p['id'], p) for p in ports])
for fip in fips:
self._set_instance_info(fip, port_dict.get(fip['port_id']))
return [fip for fip in fips]
def port_get(self, port_id, **params):
port = self.neutron.show_port(port_id, **params).get('port')
return port
def _get_instance_type_from_device_owner(self, device_owner):
for key, value in self.device_owner_map.items():
if device_owner.startswith(key):
return value
return device_owner
def _set_instance_info(self, fip, port=None):
if fip['port_id']:
if not port:
port = self.port_get(fip['port_id'])
fip['instance_id'] = port['device_id']
fip['instance_type'] = self._get_instance_type_from_device_owner(
port['device_owner'])
else:
fip['instance_id'] = None
fip['instance_type'] = None
def _server_get_addresses(self, server, ports, floating_ips, network_names):
def _format_address(mac, ip, type, device_id, port_id):
try:
version = netaddr.IPAddress(ip).version
except Exception as e:
error_message = 'Unable to parse IP address %s.' % ip
pprint(error_message)
raise e
return {u'OS-EXT-IPS-MAC:mac_addr': mac,
u'version': version,
u'addr': ip,
u'OS-EXT-IPS:type': type,
u'device_id': device_id,
u'port_id': port_id}
addresses = collections.defaultdict(list)
instance_ports = ports.get(server.id, [])
for port in instance_ports:
network_name = network_names.get(port['network_id'])
if network_name is not None:
for fixed_ip in port['fixed_ips']:
addresses[network_name].append(
_format_address(port['mac_address'],
fixed_ip['ip_address'],
u'fixed',
port['device_id'],
port['id']))
port_fips = floating_ips.get(port['id'], [])
for fip in port_fips:
addresses[network_name].append(
_format_address(port['mac_address'],
fip['floating_ip_address'],
u'floating',
port['device_id'],
port['id']))
return dict(addresses)
def get_samples(self, manager, cache, resources):
self.neutron = neutron_client.Client().client
for server in resources:
search_opts = {'device_id': server.id}
ports = self.port_list(**search_opts)
networks = self.list_resources_with_long_filters(
self.network_list, 'id', set([port['network_id'] for port in ports]))
# Map instance to its ports
instances_ports = collections.defaultdict(list)
for port in ports:
instances_ports[port['device_id']].append(port)
# Map network id to its name
network_names = dict(((network['id'], network['name']) for network in networks))
try:
addresses = self._server_get_addresses(server, instances_ports, {}, network_names)
except Exception as e:
LOG.info("[FitterFixedIPPollster] Error: %s" % e)
else:
server.addresses = addresses
for network_name, nets in server.addresses.items():
for net in nets:
yield sample.Sample(
name='ip.fixed',
unit='ip',
type=sample.TYPE_GAUGE,
volume=1,
user_id=server.user_id,
project_id=server.tenant_id,
resource_id=net['port_id'],
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={
'address': net['addr'],
}
)
|
# A neural network implementation.
import numpy as np
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def softmax(logits):
exponentials = np.exp(logits)
return exponentials / np.sum(exponentials, axis=1).reshape(-1, 1)
def sigmoid_gradient(sigmoid):
return np.multiply(sigmoid, (1 - sigmoid))
def loss(Y, y_hat):
return -np.sum(Y * np.log(y_hat)) / Y.shape[0]
def prepend_bias(X):
return np.insert(X, 0, 1, axis=1)
def forward(X, w1, w2):
h = sigmoid(np.matmul(prepend_bias(X), w1))
y_hat = softmax(np.matmul(prepend_bias(h), w2))
return (y_hat, h)
def back(X, Y, y_hat, w2, h):
w2_gradient = np.matmul(prepend_bias(h).T, (y_hat - Y)) / X.shape[0]
w1_gradient = np.matmul(prepend_bias(X).T, np.matmul(y_hat - Y, w2[1:].T)
* sigmoid_gradient(h)) / X.shape[0]
return (w1_gradient, w2_gradient)
def classify(X, w1, w2):
y_hat, _ = forward(X, w1, w2)
labels = np.argmax(y_hat, axis=1)
return labels.reshape(-1, 1)
def initialize_weights(n_input_variables, n_hidden_nodes, n_classes):
w1_rows = n_input_variables + 1
w1 = np.random.randn(w1_rows, n_hidden_nodes) * np.sqrt(1 / w1_rows)
w2_rows = n_hidden_nodes + 1
w2 = np.random.randn(w2_rows, n_classes) * np.sqrt(1 / w2_rows)
return (w1, w2)
def report(iteration, X_train, Y_train, X_test, Y_test, w1, w2):
y_hat, _ = forward(X_train, w1, w2)
training_loss = loss(Y_train, y_hat)
classifications = classify(X_test, w1, w2)
accuracy = np.average(classifications == Y_test) * 100.0
print("Iteration: %5d, Loss: %.8f, Accuracy: %.2f%%" %
(iteration, training_loss, accuracy))
def train(X_train, Y_train, X_test, Y_test, n_hidden_nodes, iterations, lr):
n_input_variables = X_train.shape[1]
n_classes = Y_train.shape[1]
w1, w2 = initialize_weights(n_input_variables, n_hidden_nodes, n_classes)
for iteration in range(iterations):
y_hat, h = forward(X_train, w1, w2)
w1_gradient, w2_gradient = back(X_train, Y_train, y_hat, w2, h)
w1 = w1 - (w1_gradient * lr)
w2 = w2 - (w2_gradient * lr)
report(iteration, X_train, Y_train, X_test, Y_test, w1, w2)
return (w1, w2)
|
name = 'python'
version = '2.7.10'
tools = ['python']
variants = [
['platform-linux', 'arch-x86_64', 'os-CentOS-6']
]
def commands():
env.PATH.append('/home/cmartin/opt/python/python2.7.10/bin')
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
import unittest
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def glu(x, dim=-1):
a, b = np.split(x, 2, axis=dim)
out = a * sigmoid(b)
return out
class TestGLUCase(unittest.TestCase):
def setUp(self):
self.x = np.random.randn(5, 20)
self.dim = -1
self.out = glu(self.x, self.dim)
def check_identity(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.x)
y_var = fluid.nets.glu(x_var, self.dim)
y_np = y_var.numpy()
np.testing.assert_allclose(y_np, self.out)
def test_case(self):
self.check_identity(fluid.CPUPlace())
if fluid.is_compiled_with_cuda():
self.check_identity(fluid.CUDAPlace(0))
if __name__ == '__main__':
unittest.main()
|
from pygments.token import Token
from nubia import context
from nubia import statusbar
from pandas import DataFrame
from suzieq.version import SUZIEQ_VERSION
class NubiaSuzieqStatusBar(statusbar.StatusBar):
def __init__(self, ctx):
self._last_status = None
self.ctx = ctx
def get_rprompt_tokens(self):
if not isinstance(self._last_status, DataFrame) and self._last_status:
return [(Token.RPrompt, "Error: {}".format(self._last_status))]
return []
def set_last_command_status(self, status):
self._last_status = status
def get_tokens(self):
spacer = (Token.Spacer, " ")
if context.get_context().pager:
is_pager = (Token.Warn, "ON")
else:
is_pager = (Token.Info, "OFF")
return [
(Token.Toolbar, "Suzieq"),
spacer,
(Token.Toolbar, "Version "),
spacer,
(Token.Info, SUZIEQ_VERSION),
spacer,
(Token.Toolbar, "Pager "),
spacer,
is_pager,
spacer,
(Token.Toolbar, "Namespace "),
spacer,
(Token.Info, ", ".join(self.ctx.namespace)),
spacer,
(Token.Toolbar, "Hostname "),
spacer,
(Token.Info, ", ".join(self.ctx.hostname)),
spacer,
(Token.Toolbar, "StartTime "),
spacer,
(Token.Info, self.ctx.start_time),
spacer,
(Token.Toolbar, "EndTime "),
spacer,
(Token.Info, self.ctx.end_time),
spacer,
(Token.Toolbar, "Engine "),
spacer,
(Token.Info, self.ctx.engine),
spacer,
(Token.Toolbar, "Query Time "),
spacer,
(Token.Info, self.ctx.exec_time),
]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Bayesian Blocks for Time Series Analysis
========================================
Dynamic programming algorithm for solving a piecewise-constant model for
various datasets. This is based on the algorithm presented in Scargle
et al 2012 [1]_. This code was ported from the astroML project [2]_.
Applications include:
- finding an optimal histogram with adaptive bin widths
- finding optimal segmentation of time series data
- detecting inflection points in the rate of event data
The primary interface to these routines is the :func:`bayesian_blocks`
function. This module provides fitness functions suitable for three types
of data:
- Irregularly-spaced event data via the :class:`Events` class
- Regularly-spaced event data via the :class:`RegularEvents` class
- Irregularly-spaced point measurements via the :class:`PointMeasures` class
For more fine-tuned control over the fitness functions used, it is possible
to define custom :class:`FitnessFunc` classes directly and use them with
the :func:`bayesian_blocks` routine.
One common application of the Bayesian Blocks algorithm is the determination
of optimal adaptive-width histogram bins. This uses the same fitness function
as for irregularly-spaced time series events. The easiest interface for
creating Bayesian Blocks histograms is the :func:`astropy.stats.histogram`
function.
References
----------
.. [1] http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
.. [2] http://astroML.org/ https://github.com//astroML/astroML/
"""
import warnings
import numpy as np
import types
#from funcsigs import signature
from inspect import getargspec
# TODO: implement other fitness functions from appendix B of Scargle 2012
__all__ = ['FitnessFunc', 'Events', 'RegularEvents', 'PointMeasures',
'bayesian_blocks']
def bayesian_blocks(t, x=None, sigma=None,
fitness='events', **kwargs):
r"""Compute optimal segmentation of data with Scargle's Bayesian Blocks
This is a flexible implementation of the Bayesian Blocks algorithm
described in Scargle 2012 [1]_.
Parameters
----------
t : array_like
data times (one dimensional, length N)
x : array_like (optional)
data values
sigma : array_like or float (optional)
data errors
fitness : str or object
the fitness function to use for the model.
If a string, the following options are supported:
- 'events' : binned or unbinned event data. Arguments are ``gamma``,
which gives the slope of the prior on the number of bins, or
``ncp_prior``, which is :math:`-\ln({\tt gamma})`.
- 'regular_events' : non-overlapping events measured at multiples of a
fundamental tick rate, ``dt``, which must be specified as an
additional argument. Extra arguments are ``p0``, which gives the
false alarm probability to compute the prior, or ``gamma``, which
gives the slope of the prior on the number of bins, or ``ncp_prior``,
which is :math:`-\ln({\tt gamma})`.
- 'measures' : fitness for a measured sequence with Gaussian errors.
Extra arguments are ``p0``, which gives the false alarm probability
to compute the prior, or ``gamma``, which gives the slope of the
prior on the number of bins, or ``ncp_prior``, which is
:math:`-\ln({\tt gamma})`.
In all three cases, if more than one of ``p0``, ``gamma``, and
``ncp_prior`` is chosen, ``ncp_prior`` takes precedence over ``gamma``
which takes precedence over ``p0``.
Alternatively, the fitness parameter can be an instance of
:class:`FitnessFunc` or a subclass thereof.
**kwargs :
any additional keyword arguments will be passed to the specified
:class:`FitnessFunc` derived class.
Returns
-------
edges : ndarray
array containing the (N+1) edges defining the N bins
Examples
--------
Event data:
>>> t = np.random.normal(size=100)
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Event data with repeats:
>>> t = np.random.normal(size=100)
>>> t[80:] = t[:20]
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Regular event data:
>>> dt = 0.05
>>> t = dt * np.arange(1000)
>>> x = np.zeros(len(t))
>>> x[np.random.randint(0, len(t), len(t) // 10)] = 1
>>> edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
Measured point data with errors:
>>> t = 100 * np.random.random(100)
>>> x = np.exp(-0.5 * (t - 50) ** 2)
>>> sigma = 0.1
>>> x_obs = np.random.normal(x, sigma)
>>> edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
References
----------
.. [1] Scargle, J et al. (2012)
http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
See Also
--------
astropy.stats.histogram : compute a histogram using bayesian blocks
"""
FITNESS_DICT = {'events': Events,
'regular_events': RegularEvents,
'measures': PointMeasures}
fitness = FITNESS_DICT.get(fitness, fitness)
if isinstance(fitness, type) and issubclass(fitness, FitnessFunc):
#if (type(fitness) is type or str(type(fitness)) is '<type classobj>') and issubclass(fitness, FitnessFunc):
fitfunc = fitness(**kwargs)
elif isinstance(fitness, FitnessFunc):
fitfunc = fitness
else:
raise ValueError("fitness parameter not understood")
return fitfunc.fit(t, x, sigma)
class FitnessFunc(object):
"""Base class for bayesian blocks fitness functions
Derived classes should overload the following method:
``fitness(self, **kwargs)``:
Compute the fitness given a set of named arguments.
Arguments accepted by fitness must be among ``[T_k, N_k, a_k, b_k, c_k]``
(See [1]_ for details on the meaning of these parameters).
Additionally, other methods may be overloaded as well:
``__init__(self, **kwargs)``:
Initialize the fitness function with any parameters beyond the normal
``p0`` and ``gamma``.
``validate_input(self, t, x, sigma)``:
Enable specific checks of the input data (``t``, ``x``, ``sigma``)
to be performed prior to the fit.
``compute_ncp_prior(self, N)``: If ``ncp_prior`` is not defined explicitly,
this function is called in order to define it before fitting. This may be
calculated from ``gamma``, ``p0``, or whatever method you choose.
``p0_prior(self, N)``:
Specify the form of the prior given the false-alarm probability ``p0``
(See [1]_ for details).
For examples of implemented fitness functions, see :class:`Events`,
:class:`RegularEvents`, and :class:`PointMeasures`.
References
----------
.. [1] Scargle, J et al. (2012)
http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
self.p0 = p0
self.gamma = gamma
self.ncp_prior = ncp_prior
def validate_input(self, t, x=None, sigma=None):
"""Validate inputs to the model.
Parameters
----------
t : array_like
times of observations
x : array_like (optional)
values observed at each time
sigma : float or array_like (optional)
errors in values x
Returns
-------
t, x, sigma : array_like, float or None
validated and perhaps modified versions of inputs
"""
# validate array input
t = np.asarray(t, dtype=float)
if x is not None:
x = np.asarray(x)
if sigma is not None:
sigma = np.asarray(sigma)
# find unique values of t
t = np.array(t)
if t.ndim != 1:
raise ValueError("t must be a one-dimensional array")
unq_t, unq_ind, unq_inv = np.unique(t, return_index=True,
return_inverse=True)
# if x is not specified, x will be counts at each time
if x is None:
if sigma is not None:
raise ValueError("If sigma is specified, x must be specified")
else:
sigma = 1
if len(unq_t) == len(t):
x = np.ones_like(t)
else:
x = np.bincount(unq_inv)
t = unq_t
# if x is specified, then we need to simultaneously sort t and x
else:
# TODO: allow broadcasted x?
x = np.asarray(x)
if x.shape not in [(), (1,), (t.size,)]:
raise ValueError("x does not match shape of t")
x += np.zeros_like(t)
if len(unq_t) != len(t):
raise ValueError("Repeated values in t not supported when "
"x is specified")
t = unq_t
x = x[unq_ind]
# verify the given sigma value
if sigma is None:
sigma = 1
else:
sigma = np.asarray(sigma)
if sigma.shape not in [(), (1,), (t.size,)]:
raise ValueError('sigma does not match the shape of x')
return t, x, sigma
def fitness(self, **kwargs):
raise NotImplementedError()
def p0_prior(self, N):
"""
Empirical prior, parametrized by the false alarm probability ``p0``
See eq. 21 in Scargle (2012)
Note that there was an error in this equation in the original Scargle
paper (the "log" was missing). The following corrected form is taken
from https://arxiv.org/abs/1304.2818
"""
return 4 - np.log(73.53 * self.p0 * (N ** -0.478))
# the fitness_args property will return the list of arguments accepted by
# the method fitness(). This allows more efficient computation below.
@property
def _fitness_args(self):
return getargspec(self.fitness)[0]
#return signature(self.fitness).parameters.keys()
def compute_ncp_prior(self, N):
"""
If ``ncp_prior`` is not explicitly defined, compute it from ``gamma``
or ``p0``.
"""
if self.ncp_prior is not None:
return self.ncp_prior
elif self.gamma is not None:
return -np.log(self.gamma)
elif self.p0 is not None:
return self.p0_prior(N)
else:
raise ValueError("``ncp_prior`` is not defined, and cannot compute "
"it as neither ``gamma`` nor ``p0`` is defined.")
def fit(self, t, x=None, sigma=None):
"""Fit the Bayesian Blocks model given the specified fitness function.
Parameters
----------
t : array_like
data times (one dimensional, length N)
x : array_like (optional)
data values
sigma : array_like or float (optional)
data errors
Returns
-------
edges : ndarray
array containing the (M+1) edges defining the M optimal bins
"""
t, x, sigma = self.validate_input(t, x, sigma)
# compute values needed for computation, below
if 'a_k' in self._fitness_args:
ak_raw = np.ones_like(x) / sigma ** 2
if 'b_k' in self._fitness_args:
bk_raw = x / sigma ** 2
if 'c_k' in self._fitness_args:
ck_raw = x * x / sigma ** 2
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1],
0.5 * (t[1:] + t[:-1]),
t[-1:]])
block_length = t[-1] - edges
# arrays to store the best configuration
N = len(t)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# Compute ncp_prior if not defined
if self.ncp_prior is None:
ncp_prior = self.compute_ncp_prior(N)
# ----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
# ----------------------------------------------------------------
for R in range(N):
# Compute fit_vec : fitness of putative last block (end at R)
kwds = {}
# T_k: width/duration of each block
if 'T_k' in self._fitness_args:
kwds['T_k'] = block_length[:R + 1] - block_length[R + 1]
# N_k: number of elements in each block
if 'N_k' in self._fitness_args:
kwds['N_k'] = np.cumsum(x[:R + 1][::-1])[::-1]
# a_k: eq. 31
if 'a_k' in self._fitness_args:
kwds['a_k'] = 0.5 * np.cumsum(ak_raw[:R + 1][::-1])[::-1]
# b_k: eq. 32
if 'b_k' in self._fitness_args:
kwds['b_k'] = - np.cumsum(bk_raw[:R + 1][::-1])[::-1]
# c_k: eq. 33
if 'c_k' in self._fitness_args:
kwds['c_k'] = 0.5 * np.cumsum(ck_raw[:R + 1][::-1])[::-1]
# evaluate fitness function
fit_vec = self.fitness(**kwds)
A_R = fit_vec - ncp_prior
A_R[1:] += best[:R]
i_max = np.argmax(A_R)
last[R] = i_max
best[R] = A_R[i_max]
# ----------------------------------------------------------------
# Now find changepoints by iteratively peeling off the last block
# ----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
class Events(FitnessFunc):
r"""Bayesian blocks fitness for binned or unbinned events
Parameters
----------
p0 : float (optional)
False alarm probability, used to compute the prior on
:math:`N_{\rm blocks}` (see eq. 21 of Scargle 2012). For the Events
type data, ``p0`` does not seem to be an accurate representation of the
actual false alarm probability. If you are using this fitness function
for a triggering type condition, it is recommended that you run
statistical trials on signal-free noise to determine an appropriate
value of ``gamma`` or ``ncp_prior`` to use for a desired false alarm
rate.
gamma : float (optional)
If specified, then use this gamma to compute the general prior form,
:math:`p \sim {\tt gamma}^{N_{\rm blocks}}`. If gamma is specified, p0
is ignored.
ncp_prior : float (optional)
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`.
If ``ncp_prior`` is specified, ``gamma`` and ``p0`` is ignored.
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
if p0 is not None and gamma is None and ncp_prior is None:
warnings.warn('p0 does not seem to accurately represent the false '
'positive rate for event data. It is highly '
'recommended that you run random trials on signal-'
'free noise to calibrate ncp_prior to achieve a '
'desired false positive rate.', AstropyUserWarning)
super().__init__(p0, gamma, ncp_prior)
def fitness(self, N_k, T_k):
# eq. 19 from Scargle 2012
return N_k * (np.log(N_k) - np.log(T_k))
def validate_input(self, t, x, sigma):
t, x, sigma = super().validate_input(t, x, sigma)
if x is not None and np.any(x % 1 > 0):
raise ValueError("x must be integer counts for fitness='events'")
return t, x, sigma
class RegularEvents(FitnessFunc):
r"""Bayesian blocks fitness for regular events
This is for data which has a fundamental "tick" length, so that all
measured values are multiples of this tick length. In each tick, there
are either zero or one counts.
Parameters
----------
dt : float
tick rate for data
p0 : float (optional)
False alarm probability, used to compute the prior on :math:`N_{\rm
blocks}` (see eq. 21 of Scargle 2012). If gamma is specified, p0 is
ignored.
ncp_prior : float (optional)
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are
ignored.
"""
def __init__(self, dt, p0=0.05, gamma=None, ncp_prior=None):
self.dt = dt
super().__init__(p0, gamma, ncp_prior)
def validate_input(self, t, x, sigma):
t, x, sigma = super().validate_input(t, x, sigma)
if not np.all((x == 0) | (x == 1)):
raise ValueError("Regular events must have only 0 and 1 in x")
return t, x, sigma
def fitness(self, T_k, N_k):
# Eq. 75 of Scargle 2012
M_k = T_k / self.dt
N_over_M = N_k / M_k
eps = 1E-8
if np.any(N_over_M > 1 + eps):
warnings.warn('regular events: N/M > 1. '
'Is the time step correct?', AstropyUserWarning)
one_m_NM = 1 - N_over_M
N_over_M[N_over_M <= 0] = 1
one_m_NM[one_m_NM <= 0] = 1
return N_k * np.log(N_over_M) + (M_k - N_k) * np.log(one_m_NM)
class PointMeasures(FitnessFunc):
r"""Bayesian blocks fitness for point measures
Parameters
----------
p0 : float (optional)
False alarm probability, used to compute the prior on :math:`N_{\rm
blocks}` (see eq. 21 of Scargle 2012). If gamma is specified, p0 is
ignored.
ncp_prior : float (optional)
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are
ignored.
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
super(PointMeasures, self).__init__(p0, gamma, ncp_prior)
def fitness(self, a_k, b_k):
# eq. 41 from Scargle 2012
return (b_k * b_k) / (4 * a_k)
def validate_input(self, t, x, sigma):
if x is None:
raise ValueError("x must be specified for point measures")
return super(PointMeasures, self).validate_input(t, x, sigma)
|
# Copyright (c) 2014 Thomas Scholtes
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import re
import os
import sys
from subprocess import Popen, PIPE, STDOUT, check_call
from hashlib import sha256
from optparse import OptionParser
from concurrent import futures
import beets
from beets import importer, config, logging
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, decargs, colorize, input_yn, UserError
from beets.library import ReadError
from beets.util import cpu_count, displayable_path, syspath
log = logging.getLogger('beets.check')
def set_checksum(item):
item['checksum'] = compute_checksum(item)
item.store()
def compute_checksum(item):
hash = sha256()
with open(syspath(item.path), 'rb') as file:
hash.update(file.read())
return hash.hexdigest()
def verify_checksum(item):
if item['checksum'] != compute_checksum(item):
raise ChecksumError(item.path,
'checksum did not match value in library.')
def verify_integrity(item):
for checker in IntegrityChecker.allAvailable():
checker.check(item)
class ChecksumError(ReadError):
pass
class CheckPlugin(BeetsPlugin):
def __init__(self):
super(CheckPlugin, self).__init__()
self.config.add({
'import': True,
'write-check': True,
'write-update': True,
'integrity': True,
'convert-update': True,
'threads': cpu_count(),
'external': {
'mp3val': {
'cmdline': 'mp3val {0}',
'formats': 'MP3',
'error': '^WARNING: .* \(offset 0x[0-9a-f]+\): (.*)$',
'fix': 'mp3val -nb -f {0}'
},
'flac': {
'cmdline': 'flac --test --silent {0}',
'formats': 'FLAC',
'error': '^.*: ERROR,? (.*)$'
},
'oggz-validate': {
'cmdline': 'oggz-validate {0}',
'formats': 'OGG'
}
}
})
if self.config['import']:
self.register_listener('item_imported', self.item_imported)
self.import_stages = [self.copy_original_checksum]
self.register_listener('album_imported', self.album_imported)
if self.config['write-check']:
self.register_listener('write', self.item_before_write)
if self.config['write-update']:
self.register_listener('after_write', self.item_after_write)
if self.config['convert-update']:
self.register_listener('after_convert', self.after_convert)
if self.config['integrity']:
self.register_listener('import_task_choice',
self.verify_import_integrity)
def commands(self):
return [CheckCommand(self.config)]
def album_imported(self, lib, album):
for item in album.items():
if not item.get('checksum', None):
set_checksum(item)
def item_imported(self, lib, item):
if not item.get('checksum', None):
set_checksum(item)
def item_before_write(self, item, path, **kwargs):
if path != item.path:
return
if item.get('checksum', None):
verify_checksum(item)
def item_after_write(self, item, path, **kwargs):
if path != item.path:
return
set_checksum(item)
def after_convert(self, item, dest, keepnew):
if keepnew:
set_checksum(item)
def copy_original_checksum(self, config, task):
for item in task.imported_items():
checksum = None
for replaced in task.replaced_items[item]:
try:
checksum = replaced['checksum']
except KeyError:
continue
if checksum:
break
if checksum:
item['checksum'] = checksum
item.store()
def verify_import_integrity(self, session, task):
integrity_errors = []
if not task.items:
return
for item in task.items:
try:
verify_integrity(item)
except IntegrityError as ex:
integrity_errors.append(ex)
if integrity_errors:
log.warning(u'Warning: failed to verify integrity')
for error in integrity_errors:
log.warning(
u' {}: {}'.format(displayable_path(item.path), error)
)
if beets.config['import']['quiet'] \
or input_yn(u'Do you want to skip this album (Y/n)'):
log.info(u'Skipping.')
task.choice_flag = importer.action.SKIP
class CheckCommand(Subcommand):
def __init__(self, config):
self.threads = config['threads'].get(int)
self.check_integrity = config['integrity'].get(bool)
parser = OptionParser(usage='%prog [options] [QUERY...]')
parser.add_option(
'-e', '--external',
action='store_true', dest='external', default=False,
help=u'run external tools'
)
parser.add_option(
'-a', '--add',
action='store_true', dest='add', default=False,
help=u'add checksum for all files that do not already have one'
)
parser.add_option(
'-u', '--update',
action='store_true', dest='update', default=False,
help=u'compute new checksums and add the to the database'
)
parser.add_option(
'-f', '--force',
action='store_true', dest='force', default=False,
help=u'force updating the whole library or fixing all files'
)
parser.add_option(
'--export',
action='store_true', dest='export', default=False,
help=u'print paths and corresponding checksum'
)
parser.add_option(
'-x', '--fix',
action='store_true', dest='fix', default=False,
help=u'fix errors with external tools'
)
parser.add_option(
'-l', '--list-tools',
action='store_true', dest='list_tools', default=False,
help=u'list available third-party used to check integrity'
)
parser.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help=u'only show errors'
)
super(CheckCommand, self).__init__(
parser=parser,
name='check',
help=u'compute and verify checksums'
)
def func(self, lib, options, arguments):
self.quiet = options.quiet
self.lib = lib
arguments = decargs(arguments)
self.query = arguments
self.force_update = options.force
if options.add:
self.add()
elif options.update:
self.update()
elif options.export:
self.export()
elif options.fix:
self.fix(ask=not options.force)
elif options.list_tools:
self.list_tools()
else:
self.check(options.external)
def add(self):
self.log(u'Looking for files without checksums...')
items = [i for i in self.lib.items(self.query)
if not i.get('checksum', None)]
def add(item):
log.debug(
u'adding checksum for {0}'.format(displayable_path(item.path))
)
set_checksum(item)
if self.check_integrity:
try:
verify_integrity(item)
except IntegrityError as ex:
log.warning(u'{} {}: {}'.format(
colorize('yellow', u'WARNING'), ex.reason,
displayable_path(item.path)))
self.execute_with_progress(add, items, msg='Adding missing checksums')
def check(self, external):
if external and not IntegrityChecker.allAvailable():
no_checkers_warning = u"No integrity checkers found. " \
"Run 'beet check --list-tools'"
raise UserError(no_checkers_warning)
if external:
progs = list(map(lambda c: c.name, IntegrityChecker.allAvailable()))
plural = 's' if len(progs) > 1 else ''
self.log(u'Using integrity checker{} {}'
.format(plural, ', '.join(progs)))
items = list(self.lib.items(self.query))
failures = [0]
def check(item):
try:
if external:
verify_integrity(item)
elif item.get('checksum', None):
verify_checksum(item)
log.debug(u'{}: {}'.format(colorize('green', u'OK'),
displayable_path(item.path)))
except ChecksumError:
log.error(u'{}: {}'.format(colorize('red', u'FAILED'),
displayable_path(item.path)))
failures[0] += 1
except IntegrityError as ex:
log.warning(u'{} {}: {}'.format(colorize('yellow', u'WARNING'),
ex.reason,
displayable_path(item.path)))
failures[0] += 1
except IOError as exc:
log.error(u'{} {}'.format(colorize('red', u'ERROR'), exc))
failures[0] += 1
if external:
msg = u'Running external tests'
else:
msg = u'Verifying checksums'
self.execute_with_progress(check, items, msg)
failures = failures[0]
if external:
if failures:
self.log(u'Found {} integrity error(s)'.format(failures))
sys.exit(15)
else:
self.log(u'Integrity successfully verified')
else:
if failures:
self.log(u'Failed to verify checksum of {} file(s)'
.format(failures))
sys.exit(15)
else:
self.log(u'All checksums successfully verified')
def update(self):
if not self.query and not self.force_update:
if not input_yn(u'Do you want to overwrite all '
'checksums in your database? (y/n)', require=True):
return
items = self.lib.items(self.query)
def update(item):
log.debug(u'updating checksum: {}'
.format(displayable_path(item.path)))
try:
set_checksum(item)
except IOError as exc:
log.error(u'{} {}'.format(colorize('red', u'ERROR'), exc))
self.execute_with_progress(update, items, msg=u'Updating checksums')
def export(self):
for item in self.lib.items(self.query):
if item.get('checksum', None):
print(u'{} *{}'
.format(item.checksum, displayable_path(item.path)))
def fix(self, ask=True):
items = list(self.lib.items(self.query))
failed = []
def check(item):
try:
if 'checksum' in item:
verify_checksum(item)
fixer = IntegrityChecker.fixer(item)
if fixer:
fixer.check(item)
log.debug(u'{}: {}'.format(colorize('green', u'OK'),
displayable_path(item.path)))
except IntegrityError:
failed.append(item)
except ChecksumError:
log.error(u'{}: {}'.format(colorize('red', u'FAILED checksum'),
displayable_path(item.path)))
except IOError as exc:
log.error(u'{} {}'.format(colorize('red', u'ERROR'), exc))
self.execute_with_progress(check, items, msg=u'Verifying integrity')
if not failed:
self.log(u'No MP3 files to fix')
return
for item in failed:
log.info(displayable_path(item.path))
if ask and not input_yn(u'Do you want to fix these files? {} (y/n)',
require=True):
return
def fix(item):
fixer = IntegrityChecker.fixer(item)
if fixer:
fixer.fix(item)
log.debug(u'{}: {}'.format(colorize('green', u'FIXED'),
displayable_path(item.path)))
set_checksum(item)
self.execute_with_progress(fix, failed, msg=u'Fixing files')
def list_tools(self):
checkers = [(checker.name, checker.available())
for checker in IntegrityChecker.all()]
prog_length = max(map(lambda c: len(c[0]), checkers)) + 3
for name, available in checkers:
msg = name + (prog_length-len(name))*u' '
if available:
msg += colorize('green', u'found')
else:
msg += colorize('red', u'not found')
print(msg)
def log(self, msg):
if not self.quiet:
print(msg)
def log_progress(self, msg, index, total):
if self.quiet or not sys.stdout.isatty():
return
msg = u'{}: {}/{} [{}%]'.format(msg, index, total, index*100/total)
sys.stdout.write(msg + '\r')
sys.stdout.flush()
if index == total:
sys.stdout.write('\n')
else:
sys.stdout.write(len(msg)*' ' + '\r')
def execute_with_progress(self, func, args, msg=None):
"""Run `func` for each value in the iterator `args` in a thread pool.
When the function has finished it logs the progress and the `msg`.
"""
total = len(args)
finished = 0
with futures.ThreadPoolExecutor(max_workers=self.threads) as e:
for _ in e.map(func, args):
finished += 1
self.log_progress(msg, finished, total)
class IntegrityError(ReadError):
pass
class IntegrityChecker(object):
@classmethod
def all(cls):
if hasattr(cls, '_all'):
return cls._all
cls._all = []
for name, tool in config['check']['external'].items():
cls._all.append(cls(name, tool))
return cls._all
@classmethod
def allAvailable(cls):
if not hasattr(cls, '_all_available'):
cls._all_available = [c for c in cls.all() if c.available()]
return cls._all_available
def __init__(self, name, config):
self.name = name
self.cmdline = config['cmdline'].get(str)
if config['formats'].exists():
self.formats = config['formats'].as_str_seq()
else:
self.formats = True
if config['error'].exists():
self.error_match = re.compile(config['error'].get(str), re.M)
else:
self.error_match = False
if config['fix'].exists():
self.fixcmd = config['fix'].get(str)
else:
self.fixcmd = False
def available(self):
try:
with open(os.devnull, 'wb') as devnull:
check_call([self.cmdline.split(' ')[0], '-v'],
stdout=devnull, stderr=devnull)
except OSError:
return False
else:
return True
@classmethod
def fixer(cls, item):
"""Return an `IntegrityChecker` instance that can fix this item.
"""
for checker in cls.allAvailable():
if checker.can_fix(item):
return checker
def can_check(self, item):
return self.formats is True or item.format in self.formats
def check(self, item):
if not self.can_check(item):
return
process = Popen(
self.cmdline.format(self.shellquote(syspath(item.path).decode('utf-8'))),
shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT
)
stdout = process.communicate()[0]
if self.error_match:
match = self.error_match.search(stdout.decode('utf-8'))
else:
match = False
if match:
raise IntegrityError(item.path, match.group(1))
elif process.returncode:
raise IntegrityError(item.path, "non-zero exit code for {}"
.format(self.name))
def can_fix(self, item):
return self.can_check(item) and self.fixcmd
def fix(self, item):
check_call(self.fixcmd.format(self.shellquote(syspath(item.path).decode('utf-8'))),
shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
def shellquote(self, s):
return "'" + s.replace("'", r"'\''") + "'"
|
import logging
import os
import shutil
import sys
import gym
import os.path
import tensorflow as tf
from model import universeHead
from pretrain_cifar.data import Cifar10
from pretrain_cifar import summary, Experiment
logger = logging.getLogger(__name__)
def train(opt):
################################################################################################
# Read experiment to run
################################################################################################
model_ctr = universeHead
logger.info(opt.name)
################################################################################################
################################################################################################
# Define training and validation datasets through Dataset API
################################################################################################
# Initialize dataset and creates TF records if they do not exist
dataset = Cifar10(opt)
# Repeatable datasets for training
train_dataset = dataset.create_dataset(augmentation=opt.hyper['augmentation'], standarization=True,
set_name='train',
repeat=True)
val_dataset = dataset.create_dataset(augmentation=False, standarization=True, set_name='val', repeat=True)
# No repeatable dataset for testing
train_dataset_full = dataset.create_dataset(augmentation=False, standarization=True, set_name='train', repeat=False)
val_dataset_full = dataset.create_dataset(augmentation=False, standarization=True, set_name='val', repeat=False)
test_dataset_full = dataset.create_dataset(augmentation=False, standarization=True, set_name='test', repeat=False)
# Hadles to switch datasets
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.contrib.data.Iterator.from_string_handle(
handle, train_dataset.output_types, train_dataset.output_shapes)
train_iterator = train_dataset.make_one_shot_iterator()
val_iterator = val_dataset.make_one_shot_iterator()
train_iterator_full = train_dataset_full.make_initializable_iterator()
val_iterator_full = val_dataset_full.make_initializable_iterator()
test_iterator_full = test_dataset_full.make_initializable_iterator()
################################################################################################
################################################################################################
# Declare DNN
################################################################################################
# Get data from dataset dataset
image, y_ = iterator.get_next()
image = tf.image.resize_images(image, opt.ob_space[:-1])
if opt.extense_summary:
tf.summary.image('input', image)
# Call DNN
dropout_rate = tf.placeholder(tf.float32)
with tf.variable_scope("global"):
y = model_ctr(image)
# add linear readout
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
num_outs = len(dataset.list_labels) * 4
y = tf.layers.dense(y, units=num_outs, activation=None)
# parameters = list(y.trainable_variables())
# Loss function
with tf.name_scope('loss'):
# weights_norm = tf.reduce_sum(
# input_tensor=opt.hyper['weight_decay'] * tf.stack(
# [tf.nn.l2_loss(i) for i in parameters]
# ),
# name='weights_norm')
# tf.summary.scalar('weight_decay', weights_norm)
cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_, logits=y))
tf.summary.scalar('cross_entropy', cross_entropy)
# total_loss = weights_norm + cross_entropy
total_loss = cross_entropy
tf.summary.scalar('total_loss', total_loss)
global_step = tf.Variable(0, name='global_step', trainable=False)
################################################################################################
################################################################################################
# Set up Training
################################################################################################
# Learning rate
num_batches_per_epoch = dataset.num_images_epoch / opt.hyper['batch_size']
decay_steps = int(opt.hyper['num_epochs_per_decay'])
lr = tf.train.exponential_decay(opt.hyper['learning_rate'],
global_step,
decay_steps,
opt.hyper['learning_rate_factor_per_decay'],
staircase=True)
tf.summary.scalar('learning_rate', lr)
tf.summary.scalar('weight_decay', opt.hyper['weight_decay'])
# Accuracy
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y, 1), y_)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
tf.summary.scalar('accuracy', accuracy)
################################################################################################
with tf.Session() as sess:
################################################################################################
# Set up Gradient Descent
################################################################################################
all_var = tf.trainable_variables()
train_step = tf.train.MomentumOptimizer(learning_rate=lr, momentum=opt.hyper['momentum']).minimize(total_loss,
var_list=all_var)
inc_global_step = tf.assign_add(global_step, 1, name='increment')
raw_grads = tf.gradients(total_loss, all_var)
grads = list(zip(raw_grads, tf.trainable_variables()))
for g, v in grads:
summary.gradient_summaries(g, v, opt)
################################################################################################
################################################################################################
# Set up checkpoints and data
################################################################################################
saver = tf.train.Saver(max_to_keep=opt.max_to_keep_checkpoints, save_relative_paths=True)
# Automatic restore model, or force train from scratch
flag_testable = False
# Set up directories and checkpoints
if not os.path.isfile(opt.log_dir_base + opt.name + '/models/checkpoint'):
sess.run(tf.global_variables_initializer())
elif opt.restart:
logger.info("RESTART")
shutil.rmtree(opt.log_dir_base + opt.name + '/models/')
shutil.rmtree(opt.log_dir_base + opt.name + '/train/')
shutil.rmtree(opt.log_dir_base + opt.name + '/val/')
sess.run(tf.global_variables_initializer())
else:
logger.info("RESTORE")
saver.restore(sess, tf.train.latest_checkpoint(opt.log_dir_base + opt.name + '/models/'))
flag_testable = True
# Datasets
# The `Iterator.string_handle()` method returns a tensor that can be evaluated
# and used to feed the `handle` placeholder.
training_handle = sess.run(train_iterator.string_handle())
validation_handle = sess.run(val_iterator.string_handle())
################################################################################################
################################################################################################
# RUN TRAIN
################################################################################################
if not opt.test:
# Prepare summaries
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(opt.log_dir_base + opt.name + '/train', sess.graph)
val_writer = tf.summary.FileWriter(opt.log_dir_base + opt.name + '/val')
logger.info("STARTING EPOCH = {}".format(sess.run(global_step)))
################################################################################################
# Loop alternating between training and validation.
################################################################################################
counter_stop = 0
for iEpoch in range(int(sess.run(global_step)), opt.hyper['max_num_epochs']):
# Save metadata every epoch
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summ = sess.run([merged], feed_dict={handle: training_handle,
dropout_rate: opt.hyper['drop_train']},
options=run_options, run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'epoch%03d' % iEpoch)
saver.save(sess, opt.log_dir_base + opt.name + '/models/model', global_step=iEpoch)
# Steps for doing one epoch
for iStep in range(int(dataset.num_images_epoch / opt.hyper['batch_size'])):
# Epoch counter
k = iStep * opt.hyper['batch_size'] + dataset.num_images_epoch * iEpoch
# Print accuray and summaries + train steps
if iStep == 0:
# !train_step
logger.info("* epoch: {}".format(float(k) / float(dataset.num_images_epoch)))
summ, acc_train = sess.run([merged, accuracy],
feed_dict={handle: training_handle,
dropout_rate: opt.hyper['drop_train']})
train_writer.add_summary(summ, k)
logger.info("train acc: {}".format(acc_train))
if acc_train >= 0.95:
counter_stop += 1
logger.info("Counter stop: {}".format(counter_stop))
logger.info('Done :)')
sys.exit()
else:
counter_stop = 0
sys.stdout.flush()
summ, acc_val = sess.run([merged, accuracy], feed_dict={handle: validation_handle,
dropout_rate: opt.hyper['drop_test']})
val_writer.add_summary(summ, k)
logger.info("val acc: {}".format(acc_val))
sys.stdout.flush()
else:
sess.run([train_step], feed_dict={handle: training_handle,
dropout_rate: opt.hyper['drop_train']})
sess.run([inc_global_step])
logger.info("----------------")
sys.stdout.flush()
################################################################################################
flag_testable = True
train_writer.close()
val_writer.close()
################################################################################################
# RUN TEST
################################################################################################
if flag_testable:
test_handle_full = sess.run(test_iterator_full.string_handle())
validation_handle_full = sess.run(val_iterator_full.string_handle())
train_handle_full = sess.run(train_iterator_full.string_handle())
# Run one pass over a batch of the validation dataset.
sess.run(train_iterator_full.initializer)
acc_tmp = 0.0
for num_iter in range(15):
acc_val = sess.run([accuracy], feed_dict={handle: train_handle_full,
dropout_rate: opt.hyper['drop_test']})
acc_tmp += acc_val[0]
val_acc = acc_tmp / float(15)
logger.info("Full train acc = {}".format(val_acc))
sys.stdout.flush()
# Run one pass over a batch of the validation dataset.
sess.run(val_iterator_full.initializer)
acc_tmp = 0.0
for num_iter in range(15):
acc_val = sess.run([accuracy], feed_dict={handle: validation_handle_full,
dropout_rate: opt.hyper['drop_test']})
acc_tmp += acc_val[0]
val_acc = acc_tmp / float(15)
logger.info("Full val acc = {}".format(val_acc))
sys.stdout.flush()
# Run one pass over a batch of the test dataset.
sess.run(test_iterator_full.initializer)
acc_tmp = 0.0
for num_iter in range(int(dataset.num_images_test / opt.hyper['batch_size'])):
acc_val = sess.run([accuracy], feed_dict={handle: test_handle_full,
dropout_rate: opt.hyper['drop_test']})
acc_tmp += acc_val[0]
val_acc = acc_tmp / float(int(dataset.num_images_test / opt.hyper['batch_size']))
logger.info("Full test acc: {}".format(val_acc))
sys.stdout.flush()
logger.info(":)")
else:
logger.info("MODEL WAS NOT TRAINED")
if __name__ == '__main__':
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s %(name)s:%(levelname)s:%(message)s'))
logger.handlers = []
logger.addHandler(ch)
opt = Experiment()
print(opt)
# env = gym.make('Breakout-v0') # or Pong-v0, same observation space shape
# opt.ob_space = env.observation_space.shape
# opt.ob_space = (210, 160, 3) # Breakout-v0, Pong-v0
# opt.ob_space = (480, 640, 3) # doom
# opt.ob_space = (42, 42, 3) # PongDeterministic-v3
# opt.ob_space = (42, 42, 4) # doom envWrap
# opt.dataset['n_channels'] = 4
opt.ob_space = (42, 42, 1) # PongDeterministic-v3 envWrap
opt.dataset['n_channels'] = 1
logger.info("Running with: {}".format(opt))
train(opt)
|
import os
import shutil
import subprocess
import torch
from src import log_setup
from src.srt.infer import generate_srt
from src.media_convertor import media_conversion
LOGGER = log_setup.get_logger(__name__)
def noise_suppression(dir_name, denoiser_path):
cwd = os.getcwd()
os.chdir(denoiser_path)
subprocess.call([
"python -m denoiser.enhance --dns48 --noisy_dir {} --out_dir {} --sample_rate {} --num_workers {} --device cpu".format(
dir_name, dir_name, 16000, 1)], shell=True)
os.chdir(cwd)
def get_srt(file, model, generator, dict_path, denoiser_path, audio_threshold=5, language='hi', half=False):
dir_name = media_conversion(file, duration_limit=audio_threshold)
noise_suppression(dir_name, denoiser_path)
audio_file = dir_name / 'clipped_audio_enhanced.wav'
result = generate_srt(wav_path=audio_file, language=language, model=model, generator=generator,
cuda=torch.cuda.is_available(), dict_path=dict_path, half=half)
return result |
from scene_helper import *
import sys
print('=== Wineglass Scene ===',file=sys.stderr)
w=1920
h=1080
obj=[]
light=[]
#wall_mat=Material((0.9,0.9,0.9),(0.07,0.07,0.07),0.3)
blue_wall_mat=Material((0,0,1),(0,0,0.7),0.3)
red_wall_mat=Material((0.85,0,0),(0.07,0,0),0.3)
mr_mat=MirrorMaterial((0.95,0.95,0.95))
gold_metal_mat=Material((0.85,0.85,0.85),(0.03,0.03,0.03),0.1)
bg_glass_mat=GlassMaterial(1.55,(1,1,1),(0.8,0.9,0.9))
yellow_glass_mat=GlassMaterial(1.55,(1,1,1),(0.9,0.9,0.6))
blue_glass_mat=GlassMaterial(1.55,(1,1,1),(0.7,0.7,0.9))
red_glass_mat=GlassMaterial(1.55,(1,1,1),(0.9,0.8,0.7))
#bg_solid_glass_mat=SolidGlassMaterial(1.55,(1,1,1),(0.95,0.95,0.95))
bg_solid_glass_mat=SolidGlassMaterial(1.55,(1,1,1),(0.8,0.9,0.9))
wall_mat=Material(("chessboard.jpg",(0.2,0.2)),(0.04,0.04,0.04),0.3)
env_light_mat=Material((0.01,0.01,0.01),(0.03,0.03,0.03),0.8,'hdrsky_249.hdr')
#light_mat=Material((0.1,0.1,0.1),(0.1,0.1,0.1),0.3,"hdrsky_249.hdr")
wood_mat=Material(("wood.jpg",(0.1,0.1)),(0.04,0.04,0.04),0.2)
#light_mat=Material((1,1,1),(0.7,0.7,0.7),0.3,(100,100,100))
ball_mat=Material((0.85,0.85,0.85),(0.9,0.9,0.9),0.05)
#light.append(PointLightDecay((-15,13,18),(0.7*30,0.7*30,0.7*30)))
#wineglass_seq=[[-0.459543, -0.0], [-0.459544, -0.0], [-0.459545, -0.0], [-0.426747, 0.351882], [-0.278898, 0.848656], [0.084005, 1.112097], [1.105511, 1.164785], [2.328629, 0.991667], [2.50336, 1.029301], [2.3456, 1.0888], [1.1628, 1.278], [0.0552, 1.2148], [-0.3812, 0.9156], [-0.622, 0.3804], [-0.9684, 0.144], [-1.48, 0.0968], [-2.1124, 0.1284], [-2.2028, 0.3172], [-2.2628, 0.9944], [-2.3232, 1.2148], [-2.3984, 1.1992], [-2.4588, 1.0576], [-2.4588, 0.7112], [-2.4588, -0.0], [-2.458801, -0.0], [-2.458802, -0.0]]
glass_seq=[[-0.8, -0.459543], [-0.8, -0.459544], [-0.8, -0.459545], [-0.871882, -0.426747], [-0.948656, -0.278898], [-1.112097, 0.084005], [-1.164785, 1.105511], [-0.991667, 2.328629], [-1.029301, 2.50336], [-1.0888, 2.3456], [-1.278, 1.1628], [-1.2148, 0.0552], [-0.9156, -0.3812], [-0.3804, -0.522], [-0.0, -0.6684]]
obj.append(Sphere((-13,2,9),25,env_light_mat))
#obj.append(Sphere((2,1,0),1,yellow_glass_mat))
obj.append(RotateBSpline([3,0.6684,3],glass_seq,blue_glass_mat))
#obj.append(Sphere((5,2,8),2,bg_solid_glass_mat))
obj.append(Sphere((5,2,8),2,bg_solid_glass_mat))
ParseObj("bunny_1k.obj",obj,lambda x:(x[0]*10,(x[1]-0.07)*10,x[2]*10-3),mr_mat)
#obj.append(Triangle((3,1,0),(3,5,0),(3,1,3),glass_mat))
obj.append(Plane((0,1,0),0,wood_mat))
#obj.append(Plane((1,0,0),7,wall_mat))
cam=PerspectiveCamera((-13,5,9),
(0.8968228648554929,-0.2638730619654009,-0.3550772914081536),
(0.2453429990417326,0.9645575115980382,-0.09713815385474979),w,h,1500)
setup={'obj':obj,'light':light,'cam':cam}
dump_setup(setup) |
import requests
class Payload:
"""
This would instantiate a new payload.
The reason to have this as a class is that there might be verifications and checks to be peformed on the payload to ensure it is valid.
We will include those verifications here, although for now it is going to be a simple text field.
We will have mediatype specific verifications too as this block starts taking on more responsibility.
"""
def __init__(self, media):
self.payload = media
self.status = self.link_verifier()
def link_verifier(self):
"""
Want this to check if the link passed is a valid link or not, check if the api errors out with status.code > 200
"""
return requests.head(self.payload, allow_redirects = True).status_code
def print_attributes(self):
print(f"{'payload:':15} {self.payload}")
print(f"{'status code:':15} {self.status}")
|
import pandas as pd
import numpy as np
import h5py
import geopandas as gp
import os
import datetime
import dask
import dask.dataframe as dd
from tqdm import tqdm
def latlon_iter(latdf, londf, valdf, date):
out_df = pd.concat([latdf, londf, valdf], axis = 1, keys = ['lat', 'lon', 'sst']).stack().reset_index().drop(columns = ['level_0', 'level_1'])
out_df['date'] = date
return out_df
def latlon_iter_chlorpar(latdf, londf, valdf1, valdf2, date):
out_df = pd.concat([latdf, londf, valdf1, valdf2], axis = 1, keys = ['lat', 'lon', 'chlor', 'par']).stack().reset_index().drop(columns = ['level_0', 'level_1'])
out_df['date'] = date
return out_df
# @dask.delayed
def get_outdf(h5file):
ds = h5py.File(h5file, 'r')
date = datetime.datetime.strptime(ds.attrs['time_coverage_start'].decode("utf-8"), "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y-%m-%d")
if 'L2_LAC_OC' in h5file:
#get chlor and par
lat_df = pd.DataFrame(ds['navigation_data']['latitude'])
lon_df = pd.DataFrame(ds['navigation_data']['longitude'])
chlor_df = pd.DataFrame(ds['geophysical_data']['chlor_a'])
par_df = pd.DataFrame(ds['geophysical_data']['par']) * 0.002 + 65.5
chlorpar_out = latlon_iter_chlorpar(lat_df, lon_df, chlor_df, par_df, date)
chlorpar_ref = chlorpar_out.loc[(chlorpar_out['chlor'] != -32767.0) & (chlorpar_out['par'] > 0)]
return chlorpar_ref
elif 'L2.SST.NRT' in h5file:
sst_df = pd.DataFrame(ds['geophysical_data']['sst'])
sst_df = sst_df*0.005
lat_df = pd.DataFrame(ds['navigation_data']['latitude'])
long_df = pd.DataFrame(ds['navigation_data']['longitude'])
sst_out = latlon_iter(lat_df, long_df, sst_df, date)
sst_ref = sst_out.loc[sst_out['sst'] != -163.835]
return sst_ref
#get filepaths for raw data
data_path = "data/michigan/requested_files/"
data_files = []
for filename in os.listdir(data_path):
if any(x in filename for x in ['L2_LAC_OC', 'L2.SST.NRT']):
data_files.append(data_path + filename)
shp_path = "shapefiles/"
location_list_chlorpar = {
'erie':pd.DataFrame(),
'michigan':pd.DataFrame(),
}
location_list_sst = {
'erie':pd.DataFrame(),
'michigan':pd.DataFrame(),
}
# @dask.delayed
def add_df(location, loc_file, out_df, filename):
location_df_chlorpar = location_list_chlorpar[location]
location_df_sst = location_list_sst[location]
shp = gp.read_file(shp_path + location + '/' + loc_file).geometry[0]
minLon, minLat, maxLon, maxLat = shp.bounds
out_df_filtered = out_df.loc[((out_df['lat'] < maxLat) & (out_df['lat'] > minLat)) & ((out_df['lon'] < maxLon) & (out_df['lon'] > minLon))]
gdf = gp.GeoDataFrame(out_df_filtered, geometry = gp.points_from_xy(out_df_filtered.lon, out_df_filtered.lat))
clipped = gp.clip(gdf, shp)
date_df = pd.DataFrame(clipped.drop(columns = 'geometry'))
# print(filename, location)
# print(date_df)
if 'L2_LAC_OC' in filename:
location_list_chlorpar[location] = location_df_chlorpar.append(date_df)
else:
location_list_sst[location] = location_df_sst.append(date_df)
# @dask.delayed
def to_csv(location_list_chlorpar, location_list_sst):
for location in location_list_chlorpar.keys():
loc_df_chlorpar = location_list_chlorpar[location]
loc_df_sst = location_list_sst[location]
merged_df = dd.merge(loc_df_chlorpar, loc_df_sst)
merged_df.to_csv(location+'.csv', index = False)
def f(filenames):
for filename in tqdm(filenames):
# for filename in filenames:
out_df = get_outdf(filename)
# print(out_df)
for location in os.listdir(shp_path):
for loc_file in os.listdir(shp_path + location):
if loc_file.endswith('.shp'):
add_df(location, loc_file, out_df, filename)
to_csv(location_list_chlorpar, location_list_sst)
# dask.compute(f(data_files))
f(data_files)
# print(location_list_chlorpar)
# print(location_list_sst)
|
import os
from config import db
from models import Product
# # Objetos iniciados ao criar o database
PRODUCTS = [
{"name": "Samsung Galaxy S11", "brand": "Samsung", "price": 3400.00, "stock": 150},
{"name": "Samsung Galaxy M21s", "brand": "Samsung", "price": 1400.00, "stock": 1150},
{"name": "Redmi Note 10 Pro", "brand": "Xiaomi", "price": 2100.00, "stock": 240}
]
# # Exclui o database, caso já exista um na pasta raíz do projeto
if os.path.exists("products.db"):
os.remove("products.db")
# # Cria o database e adiciona os objetos iniciais
db.create_all()
for product in PRODUCTS:
p = Product(
name=product.get("name"),
brand=product.get("brand"),
price=product.get("price"),
stock=product.get("stock")
)
db.session.add(p)
db.session.commit()
|
import numpy as np
from tqdm import tqdm
from math import sqrt
def read_file(path='nn.txt'):
with open(path) as f:
num_cities = int(f.readline().strip('\n'))
graph = {}
for line in f.readlines():
temp_list = list(map(float, line.strip('\n').split(' ')))
graph[int(temp_list[0])] = (temp_list[1], temp_list[2])
return num_cities, graph
def euc_dist(x, y):
distance = sqrt(pow(x[0] - y[0], 2) + pow(x[1] - y[1], 2))
return distance
# def com_dist_graph(num_cities, graph):
# distance_matrix = np.ones([num_cities, num_cities])
# for i in tqdm(range(num_cities)):
# for j in range(num_cities):
# distance_matrix[i][j] = euc_dist(graph[i + 1], graph[j + 1] )
# return distance_matrix
def comp_shortest_dist_from_one_city(source_city, destination_cites, graph):
dist_array = []
for city in destination_cites:
dist_array.append([city, euc_dist(graph[source_city], graph[city])])
dist_array = sorted(dist_array, key=lambda x: (x[1], x[0]))
return dist_array[0]
def find_next_city(start_city, graph, visited_cites, cities_set, tour_length):
destination_cites = cities_set - visited_cites
(next_city, s_dist) = \
comp_shortest_dist_from_one_city(start_city, destination_cites, graph)
visited_cites.add(next_city)
tour_length += s_dist
return next_city, visited_cites, tour_length
def nn_tsp(num_cities, graph):
visited_cites = set([1])
cities_set = set([i for i in range(1, num_cities + 1)])
tour_length = 0
next_city, visited_cites, tour_length = \
find_next_city(1, graph, visited_cites, cities_set, tour_length)
while len(visited_cites) != num_cities:
if (len(visited_cites) -1) % 100 == 0:
print('Have processed %d cities'%len(visited_cites))
next_city, visited_cites, tour_length = \
find_next_city(next_city, graph, visited_cites, cities_set, tour_length)
last_hop = euc_dist(graph[next_city], graph[1])
tour_length += last_hop
return tour_length
def main():
num_cities, graph = read_file()
tour_length = nn_tsp(num_cities, graph)
print(tour_length)
if __name__ == '__main__':
main()
|
# Copyright (c) 2021 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""EIS Message Bus echo service Python example.
"""
import json
import argparse
import eis.msgbus as mb
# Argument parsing
ap = argparse.ArgumentParser()
ap.add_argument('config', help='JSON configuration')
ap.add_argument('-s', '--service-name', dest='service_name',
default='echo_service', help='Service name')
args = ap.parse_args()
msgbus = None
service = None
with open(args.config, 'r') as f:
config = json.load(f)
try:
print('[INFO] Initializing message bus context')
msgbus = mb.MsgbusContext(config)
print(f'[INFO] Initializing service for topic \'{args.service_name}\'')
service = msgbus.new_service(args.service_name)
print('[INFO] Running...')
while True:
request = service.recv()
print(f'[INFO] Received request: {request.get_meta_data()}')
service.response(request.get_meta_data())
except KeyboardInterrupt:
print('[INFO] Quitting...')
finally:
if service is not None:
service.close()
|
# -*- coding: utf-8 -*-
#
# PySceneDetect: Python-Based Video Scene Detector
# ---------------------------------------------------------------
# [ Site: http://www.bcastell.com/projects/PySceneDetect/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
# [ Documentation: http://pyscenedetect.readthedocs.org/ ]
#
# Copyright (C) 2014-2021 Brandon Castellano <http://www.bcastell.com>.
#
# PySceneDetect is licensed under the BSD 3-Clause License; see the included
# LICENSE file, or visit one of the following pages for details:
# - https://github.com/Breakthrough/PySceneDetect/
# - http://www.bcastell.com/projects/PySceneDetect/
#
# This software uses Numpy, OpenCV, click, tqdm, simpletable, and pytest.
# See the included LICENSE files or one of the above URLs for more information.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
""" ``scenedetect.platform`` Module
This file contains all platform/library/OS-specific compatibility fixes,
intended to improve the systems that are able to run PySceneDetect, and allow
for maintaining backwards compatibility with existing libraries going forwards.
Other helper functions related to the detection of the appropriate dependency
DLLs on Windows and getting uniform line-terminating csv reader/writer objects
are also included in this module.
With respect to the Python standard library itself and Python 2 versus 3,
this module adds compatibility wrappers for Python's Queue/queue (Python 2/3,
respectively) as scenedetect.platform.queue.
For OpenCV 2.x, the scenedetect.platform module also makes a copy of the
OpenCV VideoCapture property constants from the cv2.cv namespace directly
to the cv2 namespace. This ensures that the cv2 API is consistent
with those changes made to it in OpenCV 3.0 and above.
This module also includes an alias for the unicode/string types in Python 2/3
as STRING_TYPE intended to help with parsing string types from the CLI parser.
"""
#
# TODO(v0.6): Move this into scenedetect.cli.
#
# Standard Library Imports
from __future__ import print_function
import csv
import logging
import os
import os.path
import platform
import struct
import subprocess
import sys
# Third-Party Library Imports
import cv2
# pylint: disable=unused-import
# pylint: disable=no-member
##
## tqdm Library (scenedetect.platform.tqdm will be tqdm object or None)
##
try:
from tqdm import tqdm
except ImportError:
tqdm = None
# pylint: enable=unused-import
##
## click/Command-Line Interface String Type
##
# String type (used to allow FrameTimecode object to take both unicode and native
# string objects when being constructed via scenedetect.platform.STRING_TYPE).
# pylint: disable=invalid-name, undefined-variable
if sys.version_info[0] == 2:
STRING_TYPE = unicode
else:
STRING_TYPE = str
# pylint: enable=invalid-name, undefined-variable
# Compatibility fix for OpenCV v2.x (copies CAP_PROP_* properties from the
# cv2.cv namespace to the cv2 namespace, as the cv2.cv namespace was removed
# with the release of OpenCV 3.0).
if not 'CAP_PROP_FPS' in dir(cv2):
cv2.CAP_PROP_FRAME_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
cv2.CAP_PROP_FRAME_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
cv2.CAP_PROP_FPS = cv2.cv.CV_CAP_PROP_FPS
cv2.CAP_PROP_POS_MSEC = cv2.cv.CV_CAP_PROP_POS_MSEC
cv2.CAP_PROP_POS_FRAMES = cv2.cv.CV_CAP_PROP_POS_FRAMES
cv2.CAP_PROP_FRAME_COUNT = cv2.cv.CV_CAP_PROP_FRAME_COUNT
cv2.CAP_PROP_FOURCC = cv2.cv.CV_CAP_PROP_FOURCC
cv2.INTER_CUBIC = cv2.cv.INTER_CUBIC
def get_aspect_ratio(cap, epsilon=0.01):
# type: (cv2.VideoCapture, float) -> float
""" Compatibility fix for OpenCV < v3.4.1 to get the aspect ratio
of a video. For older versions, this function always returns 1.0.
Argument:
cap: cv2.VideoCapture object. Must be opened and in valid state.
epsilon: Used to compare numerator/denominator to zero.
Returns:
float: Display aspect ratio CAP_PROP_SAR_NUM / CAP_PROP_SAR_DEN,
or 1.0 if using a version of OpenCV < 3.4.1. Also returns 1.0
if for some reason the numerator/denominator returned is zero
(can happen if the video was not opened correctly).
"""
if not 'CAP_PROP_SAR_NUM' in dir(cv2):
return 1.0
num = cap.get(cv2.CAP_PROP_SAR_NUM)
den = cap.get(cv2.CAP_PROP_SAR_DEN)
# If numerator or denominator are zero, fall back to 1.0 aspect ratio.
if abs(num) < epsilon or abs(den) < epsilon:
return 1.0
return num / den
##
## OpenCV DLL Check Function (Windows Only)
##
def check_opencv_ffmpeg_dll():
# type: () -> Tuple[bool, str]
""" Check OpenCV FFmpeg DLL: Checks if OpenCV video I/O support is available,
on Windows only, by checking for the appropriate opencv_ffmpeg*.dll file.
On non-Windows systems always returns True, or for OpenCV versions that do
not follow the X.Y.Z version numbering pattern. Thus there may be false
positives (True) with this function, but not false negatives (False).
In those cases, PySceneDetect will report that it could not open the
video file, and for Windows users, also gives an additional warning message
that the error may be due to the missing DLL file.
Returns:
(True, DLL_NAME) if OpenCV video support is detected (e.g. the appropriate
opencv_ffmpegXYZ.dll file is in PATH), (False, DLL_NAME) otherwise,
where DLL_NAME is the name of the expected DLL file that OpenCV requires.
On Non-Windows platforms, DLL_NAME will be a blank string.
"""
if platform.system() == 'Windows' and (
cv2.__version__[0].isdigit() and cv2.__version__.find('.') > 0):
is_64_bit_str = '_64' if struct.calcsize("P") == 8 else ''
dll_filename = 'opencv_ffmpeg{OPENCV_VERSION}{IS_64_BIT}.dll'.format(
OPENCV_VERSION=cv2.__version__.replace('.', ''),
IS_64_BIT=is_64_bit_str)
return any([os.path.exists(os.path.join(path_path, dll_filename))
for path_path in os.environ['PATH'].split(';')]), dll_filename
return True, ''
##
## OpenCV imwrite Supported Image Types & Quality/Compression Parameters
##
def get_cv2_imwrite_params():
# type: () -> Dict[str, Union[int, None]]
""" Get OpenCV imwrite Params: Returns a dict of supported image formats and
their associated quality/compression parameter.
Returns:
(Dict[str, int]) Dictionary of image formats/extensions ('jpg',
'png', etc...) mapped to the respective OpenCV quality or
compression parameter (e.g. 'jpg' -> cv2.IMWRITE_JPEG_QUALITY,
'png' -> cv2.IMWRITE_PNG_COMPRESSION)..
"""
def _get_cv2_param(param_name):
# type: (str) -> Union[int, None]
if param_name.startswith('CV_'):
param_name = param_name[3:]
try:
return getattr(cv2, param_name)
except AttributeError:
return None
return {
'jpg': _get_cv2_param('IMWRITE_JPEG_QUALITY'),
'png': _get_cv2_param('IMWRITE_PNG_COMPRESSION'),
'webp': _get_cv2_param('IMWRITE_WEBP_QUALITY')
}
##
## Python csv Module Wrapper (for StatsManager, and CliContext/list-scenes command)
##
def get_csv_reader(file_handle):
# type: (File) -> csv.reader
""" Returns a csv.reader object using the passed file handle. """
return csv.reader(file_handle, lineterminator='\n')
def get_csv_writer(file_handle):
# type: (File) -> csv.writer
""" Returns a csv.writer object using the passed file handle. """
return csv.writer(file_handle, lineterminator='\n')
##
## File I/O
##
def get_and_create_path(file_path, output_directory=None):
# type: (str, Optional[str]) -> str
""" Get & Create Path: Gets and returns the full/absolute path to file_path
in the specified output_directory if set, creating any required directories
along the way.
If file_path is already an absolute path, then output_directory is ignored.
Arguments:
file_path (str): File name to get path for. If file_path is an absolute
path (e.g. starts at a drive/root), no modification of the path
is performed, only ensuring that all output directories are created.
output_dir (Optional[str]): An optional output directory to override the
directory of file_path if it is relative to the working directory.
Returns:
(str) Full path to output file suitable for writing.
"""
if file_path is None:
return None
# If an output directory is defined and the file path is a relative path, open
# the file handle in the output directory instead of the working directory.
if output_directory is not None and not os.path.isabs(file_path):
file_path = os.path.join(output_directory, file_path)
# Now that file_path is an absolute path, let's make sure all the directories
# exist for us to start writing files there.
try:
os.makedirs(os.path.split(os.path.abspath(file_path))[0])
except OSError:
pass
return file_path
##
## Logging
##
def init_logger(log_level=logging.INFO, show_stdout=False, log_file=None):
""" Initializes the Python logging module for PySceneDetect.
Mainly used by the command line interface, but can also be used by other modules
by calling init_logger(). The logger instance used is named 'pyscenedetect-logger'.
All existing log handlers are removed every time this function is invoked.
Arguments:
log_level: Verbosity of log messages.
quiet_mode: If True, no output will be generated to stdout.
log_file: File to also send messages to, in addition to stdout.
"""
# Format of log messages depends on verbosity.
format_str = '[PySceneDetect] %(message)s'
if log_level == logging.DEBUG:
format_str = '%(levelname)s: %(module)s.%(funcName)s(): %(message)s'
# Get the named logger and remove any existing handlers.
logger_instance = logging.getLogger('pyscenedetect')
logger_instance.handlers = []
logger_instance.setLevel(log_level)
# Add stdout handler if required.
if show_stdout:
handler = logging.StreamHandler(stream=sys.stdout)
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(fmt=format_str))
logger_instance.addHandler(handler)
# Add file handler if required.
if log_file:
log_file = get_and_create_path(log_file)
handler = logging.FileHandler(log_file)
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(fmt=format_str))
logger_instance.addHandler(handler)
return logger_instance
# Default logger to be used by library objects.
logger = init_logger()
##
## Running External Commands
##
class CommandTooLong(Exception):
""" Raised when the length of a command line argument doesn't play nicely
with the Windows command prompt. """
# pylint: disable=unnecessary-pass
pass
def invoke_command(args):
# type: (List[str]) -> None
""" Same as calling Python's subprocess.call() method, but explicitly
raises a different exception when the command length is too long.
See https://github.com/Breakthrough/PySceneDetect/issues/164 for details.
Arguments:
args (List[str]): List of strings to pass to subprocess.call().
Returns:
int: Return code of command.
Raises:
CommandTooLong when passed command list exceeds built in command line
length limit on Windows.
"""
try:
return subprocess.call(args)
except OSError as err:
if os.name != 'nt':
raise
exception_string = str(err)
# Error 206: The filename or extension is too long
# Error 87: The parameter is incorrect
to_match = ('206', '87')
if any([x in exception_string for x in to_match]):
raise CommandTooLong()
raise
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_ext
short_description: Add or remove PostgreSQL extensions from a database.
description:
- Add or remove PostgreSQL extensions from a database.
version_added: "1.9"
options:
name:
description:
- name of the extension to add or remove
required: true
default: null
db:
description:
- name of the database to add or remove the extension to/from
required: true
default: null
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
port:
description:
- Database port to connect to.
required: false
default: 5432
state:
description:
- The database extension state
required: false
default: present
choices: [ "present", "absent" ]
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed
on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using
this module.
requirements: [ psycopg2 ]
author: "Daniel Schep (@dschep)"
'''
EXAMPLES = '''
# Adds postgis to the database "acme"
- postgresql_ext:
name: postgis
db: acme
'''
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def ext_exists(cursor, ext):
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
cursor.execute(query, {'ext': ext})
return cursor.rowcount == 1
def ext_delete(cursor, ext):
if ext_exists(cursor, ext):
query = "DROP EXTENSION \"%s\"" % ext
cursor.execute(query)
return True
else:
return False
def ext_create(cursor, ext):
if not ext_exists(cursor, ext):
query = 'CREATE EXTENSION "%s"' % ext
cursor.execute(query)
return True
else:
return False
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
port=dict(default="5432"),
db=dict(required=True),
ext=dict(required=True, aliases=['name']),
state=dict(default="present", choices=["absent", "present"]),
),
supports_check_mode = True
)
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
db = module.params["db"]
ext = module.params["ext"]
port = module.params["port"]
state = module.params["state"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.items()
if k in params_map and v != '' )
try:
db_connection = psycopg2.connect(database=db, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
try:
if module.check_mode:
if state == "present":
changed = not ext_exists(cursor, ext)
elif state == "absent":
changed = ext_exists(cursor, ext)
else:
if state == "absent":
changed = ext_delete(cursor, ext)
elif state == "present":
changed = ext_create(cursor, ext)
except NotSupportedError:
e = get_exception()
module.fail_json(msg=str(e))
except Exception:
e = get_exception()
module.fail_json(msg="Database query failed: %s" % e)
module.exit_json(changed=changed, db=db, ext=ext)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""Flux Calculation class tests.
This script tests the operation of the Flux Calculation Class.
Created on Fri Apr 16 11:53:12 2021
@author: denis
"""
from PSF import Point_Spread_Function
from CHC import Concrete_Channel_1
import pytest
from astropy.table import Table
from photutils.datasets import make_gaussian_sources_image
import numpy as np
dic = {'em_gain': 1, 'binn': 1, 't_exp': 1}
@pytest.fixture
def chc1():
return Concrete_Channel_1(ccd_temp=-70,
sparc4_acquisition_mode='phot')
@pytest.fixture
def psf(chc1):
return Point_Spread_Function(chc1, dic, 3, 3)
# ------------------------ Initialize the class --------------------------
def test_CHC(psf):
var = 0
if psf.CHC:
var = 1
assert var == 1
def test_FC(psf):
var = 0
if psf.FC:
var = 1
assert var == 1
def test_TSR(psf):
var = 0
if psf.TSR:
var = 1
assert var == 1
def test_ASR(psf):
var = 0
if psf.ASR:
var = 1
assert var == 1
def test_em_gain(psf):
assert psf.em_gain == 1
def test_bin(psf):
assert psf.binn == 1
def test_t_exp(psf):
assert psf.t_exp == 1
def test_ccd_gain(psf):
assert psf.ccd_gain == 3
# ----------------------- Calculate star PSF -----------------------------
def test_calc_star_flux(psf):
psf._calculate_star_flux()
assert psf.star_flux == 100
# ----------------------- Calculate star PSF -----------------------------
def test_calculate_star_PSF(psf):
em_gain = dic['em_gain']
binn = dic['binn']
t_exp = dic['t_exp']
ccd_gain = 3
gaussian_std = 3
star_flux = 100
gaussian_amplitude = star_flux * t_exp * em_gain * binn**2 / ccd_gain
shape = (200, 200)
table = Table()
table['amplitude'] = [gaussian_amplitude]
table['x_mean'] = [100]
table['y_mean'] = [100]
table['x_stddev'] = [gaussian_std/binn]
table['y_stddev'] = [gaussian_std/binn]
table['theta'] = np.radians(np.array([0]))
star_image = make_gaussian_sources_image(shape, table)
assert np.sum(psf.create_star_PSF()) == np.sum(star_image)
|
import polymath as pm
from .template_utils import _get_single_node_indices
from polymath.srdfg.util import squeeze_shape
from numbers import Integral
import numpy as np
import functools
class tensor_transpose(pm.Template):
def define_graph(self, data, out, perm=None):
temp = pm.transpose(data, perm)
indices = _get_single_node_indices(temp)
out[indices] = temp[indices]
@property
def inputs(self):
return (self.args[0],)
@property
def outputs(self):
return (self.args[1],)
@property
def perm(self):
return self.kwargs["perm"] or tuple(reversed(range(len(self.args[0].shape))))
class tensor_flip(pm.Template):
def define_graph(self, data, out, axis=None):
temp = pm.flip(data, axis)
indices = _get_single_node_indices(temp)
out[indices] = temp[indices]
@property
def inputs(self):
return (self.args[0],)
@property
def outputs(self):
return (self.args[1],)
class tensor_reshape(pm.Template):
def define_graph(self, data, out, new_shape):
temp = pm.reshape(data, new_shape)
indices = _get_single_node_indices(temp)
out[indices] = temp[indices]
@property
def inputs(self):
return (self.args[0],)
@property
def outputs(self):
return (self.args[1],)
class tensor_pad(pm.Template):
def define_graph(self, data, out, pad_start, pad_end=None):
assert isinstance(pad_start, (list, tuple)) and len(pad_start) >= 1
if isinstance(pad_start[0], (list, tuple)):
assert pad_end is None
pad_end = tuple([pad_start[i][1] for i in range(len(pad_start))])
pad_start = tuple([pad_start[i][0] for i in range(len(pad_start))])
temp = pm.pad(data, pad_start, pad_end=pad_end)
indices = _get_single_node_indices(temp)
out.set_shape(temp.shape)
out[indices] = temp[indices]
@property
def inputs(self):
return (self.args[0],)
@property
def outputs(self):
return (self.args[1],)
class coarse_flatten(pm.Template):
def define_graph(self, data, out, axis=1):
o_indices = _get_single_node_indices(out, shape=out.shape)
i_indices = _get_single_node_indices(data, shape=out.shape)
out[o_indices] = data[i_indices]
@property
def inputs(self):
return (self.args[0],)
@property
def outputs(self):
return (self.args[1],)
class elem_gather(pm.Template):
def define_graph(self, data, indices, output, axis=0):
# TODO: Fix this to use manual implementation
output.write(pm.gather(data, indices, axis=axis))
class elem_expand(pm.Template):
def define_graph(self, data, new_shape, output, axis=0):
# TODO: Fix this to use manual implementation
in_dims = data.shape[0]
new_dims = new_shape[0]
update_shape_bool = in_dims < new_dims
in_shape = in_dims * update_shape_bool + (1-update_shape_bool)
@property
def inputs(self):
return (self.args[0], self.args[1])
@property
def outputs(self):
return (self.args[2],)
# TODO: Need to fix this functionality to create a new node
def onnx_unsqueeze(x, *args, axes=None, shape=None, name=None, **kwargs):
out = pm.unsqueeze(x, axis=axes, name=name, shape=shape)
return out
# TODO: Check this works after changes
def onnx_squeeze(x, *args, axes=None, shape=None, name=None, **kwargs):
out = pm.squeeze(x, axis=axes, name=name, shape=shape)
return out
# TODO: Check this works after changes
def onnx_reshape(data, *args, shape=None, name=None, **kwargs):
data._shape = shape
data.graph.nodes[name] = data
return data
# TODO: Convert this to a template node
def onnx_resize(data, *args, shape=None, name=None, **kwargs):
data._shape = shape
data.graph.nodes[name] = data
return data
def onnx_identity(data, shape=None, name=None, **kwargs):
data.set_name(name)
return data |
from argparse import ArgumentParser
import logging
from .exception import RfcDLArgumentException
logger = logging.getLogger("rfcdl")
def parse_arguments():
parser = ArgumentParser(prog="rfcdl", description="A tool for downloading RFCs in high-speed.")
parser.add_argument("-v", "--debug", action="store_true", help="Print debug information.")
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="Print errors and warnings only.",
)
parser.add_argument("-c", "--config-file", type=str, help="File to read configuration from.")
parser.add_argument("-d", "--directory", type=str, help="Directory to store documents in.")
parser.add_argument(
"-n",
"--samples",
type=int,
default=0,
help="Only load this many random documents in total.",
)
parser.add_argument(
"--limit",
type=int,
default=200,
help="Only load this many documents at once.",
)
parser.add_argument(
"--retries",
type=int,
default=10,
help="How often a document is tried to be received on failure.",
)
parser.add_argument(
"--delete-obsolete",
action="store_true",
help="Delete obsolete RFCs from the specified directory.",
)
args = parser.parse_args()
if args.debug and args.quiet:
msg = "Cannot be quiet in debug mode."
raise RfcDLArgumentException(msg)
return args
|
#!/usr/bin/env python
import sys
from optparse import OptionParser
def _get_opt_parser():
msg_usage = '''%prog -o <origin_file> -l <latest_file>'''
opt_parser = OptionParser(msg_usage)
opt_parser.add_option(
"-o", "--origin_file", action="store", type="string", default=None,
dest="origin_file",
help='Path of the file which contains origin route results')
opt_parser.add_option(
"-l", "--latest_file", action="store", type="string", default=None,
dest="latest_file",
help='Path of the file which contains latest route results')
return opt_parser
def print_vni_route_prefix(route_prefix, prefix=""):
print "%s%s" % (prefix, route_prefix)
def print_vni_route_nexthop(nexthop, prefix="", via_prefix=""):
print "%s %s via %s" % (prefix, via_prefix, nexthop)
def print_vni_route_best_entry(nexthop, route_prefix, prefix="", via_prefix=""):
print_vni_route_prefix(route_prefix, prefix=prefix)
print_vni_route_nexthop(nexthop, prefix,
via_prefix="%s best" % via_prefix)
def print_vni_route_backup_entry(nexthop, route_prefix,
prefix="", via_prefix=""):
print_vni_route_prefix(route_prefix, prefix=prefix)
print_vni_route_nexthop(nexthop, prefix,
via_prefix="%s backup" % via_prefix)
def print_vni_route_entry(route_prefix, vni_route, prefix="", via_prefix=""):
print_vni_route_prefix(route_prefix, prefix)
print_vni_route_best_entry(vni_route["best"], prefix, via_prefix)
for v in vni_route["backup"]:
print_vni_route_backup_entry(v, prefix, via_prefix)
def print_vni_route_map(vni_route_map, prefix="", via_prefix=""):
for route_prefix, vni_route in vni_route_map.items():
print_vni_route_entry(route_prefix, vni_route, prefix, via_prefix)
def print_format_content(route_map, file_path):
vni_list = route_map.keys()
vni_list = sorted(vni_list)
with open(file_path, 'w') as fp:
for vni in vni_list:
fp.write("%s\n" % vni)
vni_route_map = route_map.get(vni)
if vni_route_map:
route_prefix_list = vni_route_map.keys()
route_prefix_list = sorted(route_prefix_list)
for route_prefix in route_prefix_list:
fp.write(" %s\n" % route_prefix)
vni_route = vni_route_map[route_prefix]
best_nexthop = vni_route["best"]
if best_nexthop:
fp.write(" best via %s\n" % best_nexthop)
backup_nexthop_list = vni_route["backup"]
if backup_nexthop_list:
for backup_nexthop in backup_nexthop_list:
fp.write(" backup via %s\n" % backup_nexthop)
return 0
def parse_section(section_lines, to_print=False, ingore_best_indicate=True):
vni_route_map = {}
route_num = len(section_lines) / 2
for i in range(0, route_num):
parts = section_lines[2 * i].split()
indicate = parts[1]
is_best = False
if not ingore_best_indicate:
if indicate.find(">") != -1:
is_best = True
route_prefix = parts[2]
parts = section_lines[2* i + 1].split()
nexthop = parts[0]
if route_prefix not in vni_route_map:
vni_route_map[route_prefix] = {"best": None, "backup": []}
if is_best:
vni_route_map[route_prefix]["best"] = nexthop
else:
vni_route_map[route_prefix]["backup"].append(nexthop)
# sort
for route_prefix, vni_route in vni_route_map.items():
vni_route["backup"] = sorted(vni_route["backup"])
if to_print:
print_vni_route_map(vni_route_map)
return vni_route_map
def parse_file(file_path):
route_map = {}
cur_vni = None
section_lines = []
with open(file_path) as fp:
for line in fp:
line = line.strip()
if line and line != '\n':
index = line.find("Route distinguisher:")
if index != -1:
vni = str(line[index + len(
"Route distinguisher:"):]).strip()
if cur_vni:
vni_routes = parse_section(section_lines)
route_map[cur_vni] = vni_routes
# reset
cur_vni = vni
section_lines = []
else:
if not cur_vni:
continue
if line.find("routes") != -1:
continue
if line.find("Network") != -1:
continue
section_lines.append(line)
vni_routes = parse_section(section_lines)
route_map[cur_vni] = vni_routes
return route_map
def compare_vni_route_backup(origin_vni_route_backup,
latest_vni_route_backup,
route_prefix,
prefix=""):
for latest_nexthop in latest_vni_route_backup:
if latest_nexthop not in origin_vni_route_backup:
# added
print_vni_route_backup_entry(latest_nexthop,
route_prefix,
prefix=prefix,
via_prefix="add latest")
for origin_nexthop in origin_vni_route_backup:
if origin_nexthop not in latest_vni_route_backup:
# delete
print_vni_route_backup_entry(origin_nexthop,
route_prefix,
prefix=prefix,
via_prefix="delete origin")
return 0
def compare_vni_route(origin_vni_route_map, latest_vni_route_map, prefix=""):
prefix = "%s " % prefix
for route_prefix, latest_vni_route in latest_vni_route_map.items():
if route_prefix not in origin_vni_route_map:
# added
print_vni_route_entry(route_prefix, latest_vni_route,
prefix=prefix,
via_prefix="add latest")
else:
# updated
origin_vni_route = origin_vni_route_map[route_prefix]
if latest_vni_route["best"] != origin_vni_route["best"]:
print_vni_route_best_entry(origin_vni_route["best"],
route_prefix=route_prefix,
prefix=prefix,
via_prefix="origin")
print_vni_route_best_entry(latest_vni_route["best"],
route_prefix=route_prefix,
prefix=prefix,
via_prefix="latest")
compare_vni_route_backup(origin_vni_route["backup"],
latest_vni_route["backup"],
route_prefix=route_prefix,
prefix=prefix)
for route_prefix, origin_vni_route in origin_vni_route_map.items():
if route_prefix not in latest_vni_route_map:
# deleted route
print_vni_route_entry(route_prefix, latest_vni_route,
prefix=prefix,
via_prefix="delete origin")
return 0
def compare_route(origin_file, latest_file):
origin_route_map = parse_file(origin_file)
print_format_content(origin_route_map, "%s.bak" % origin_file)
latest_route_map = parse_file(latest_file)
print_format_content(latest_route_map, "%s.bak" % latest_file)
# compare
prefix = " "
for vni, latest_vni_route_map in latest_route_map.items():
print vni
if vni not in origin_route_map:
# added vni
print_vni_route_map(latest_vni_route_map,
prefix=prefix,
via_prefix="add latest")
else:
# may updated vni
origin_vni_route_map = origin_route_map[vni]
compare_vni_route(origin_vni_route_map,
latest_vni_route_map,
prefix=prefix)
# deleted
for vni, origin_vni_route_map in origin_route_map.items():
if vni not in latest_route_map:
# deleted vni
print vni
print_vni_route_map(latest_vni_route_map,
prefix=prefix,
via_prefix="delete origin")
return 0
def main(args):
parser = _get_opt_parser()
(options, _) = parser.parse_args(args)
if not options.origin_file:
print "please specify origin_file parameter"
return -1
if not options.latest_file:
print "please specify latest_file parameter"
return -1
return compare_route(options.origin_file, options.latest_file)
if __name__ == '__main__':
main(sys.argv[1:])
|
from BooleanEncoding import *
identity = lambda a: a;
kestral = lambda x: lambda y: x;
kite = lambda x: lambda y: y;
mockingbird = lambda f: f(f);
cardinal = lambda f: lambda a: lambda b: f(b)(a);
# function composition B-Combinator
bluebird = lambda f: lambda g: lambda x: f(g(x));
# T-Combinator
thrush = lambda a: lambda f: f(a);
vireo = lambda x: lambda y: lambda f: f(x)(y);
blackbird = lambda f: lambda g: lambda a: lambda b: f(g(a)(b))
#print(bluebird(NOT)(NOT)(FALSE));
|
#!/usr/bin/env python
# setup.py for Pycolor
from distutils.core import setup
setup(name='Pycolor',
version='1.2.1',
description='Ansi color for python',
author='Will Drach',
license='Beerware',
author_email='will.drach@live.com',
url='http://www.drach.co/pycolor',
py_modules=['pycolor'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: Public Domain',
'Natural Language :: English',
'Operating System :: Unix',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords='ansi color terminal colors',
)
|
#!/usr/bin/env python
#
# Copyright 2017 Import.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from importio2 import ExtractorAPI
from pprint import pprint
# Given the contents in the file urls.txt
extractor_id = '7d9122f6-f293-49e4-8035-1961dac6f049'
url_list = list()
url_list.append("http://www1.example.com")
url_list.append("http://www2.example.com")
url_list.append("http://www3.example.com")
url_list.append("http://www4.example.com")
url_list.append("http://www5.example.com")
url_list.append("http://www6.example.com")
url_list.append("http://www7.example.com")
url_list.append("http://www8.example.com")
url_list.append("http://www9.example.com")
api = ExtractorAPI()
# Upload a list of URLs to an extractor
api.put_url_list(extractor_id, url_list)
# Download a list of URLs to an extractor
url_list = api.get_url_list(extractor_id)
pprint(url_list)
|
"""
Django settings for PcapDB interface project.
The basic model of the PCAPdb interface is as follows:
- The search head/s are the only hosts that serve the HTML based interface.
- The search head also provides an API to all functionality served by the HTML interface.
- Authentication and permissions are governed by the search head.
- All interactions with capture nodes happens via celery tasks initiated via the search head.
- Permissions to search or modify the capture nodes are managed on a per-site basis. Sites are
really just django.contrib.auth.models.Group objects.
- For each site group (which gives search permissions) there is a site admin group for host
management permissions.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from configparser import ConfigParser
import datetime
from distutils.spawn import find_executable
from path import Path
import pytz
import socket
import sys
# The root directory of the project. The 'core' directory in the repo.
PROJECT_ROOT = Path(__file__).abspath().dirname().dirname()
# The root directory of the site. This is where our virtual environment, logs,
# and everything else related to the running of the system will live.
# It will also include the project root.
SITE_ROOT = PROJECT_ROOT.dirname()
# The path to where our virtual environment python should be located.
SITE_PYTHON = SITE_ROOT/'bin'/'python'
CAPTURE_USER = 'capture'
CAPTURE_GROUP = 'capture'
# Ensure our project libraries are in the python path.
sys.path.append(SITE_ROOT)
sys.path.append(PROJECT_ROOT/'apps')
sys.path.append(PROJECT_ROOT/'libs')
# These should be pulled from a system config file
config = ConfigParser()
config.read(SITE_ROOT/'etc'/'pcapdb.cfg')
IS_SEARCH_HEAD = config.getboolean('pcapdb', 'search_head', fallback=False)
IS_CAPTURE_NODE = config.getboolean('pcapdb', 'capture_node', fallback=False)
# The node name for this instance is just the host's fqdn.
NODE_NAME = socket.getfqdn()
UI_HOST = config.get('pcapdb', 'search_head_ui_host', fallback=NODE_NAME)
# If this is the search head, we don't need to be told who the search head is explicitely.
if IS_SEARCH_HEAD and not config.has_option('pcapdb', 'search_head_host'):
config.set('pcapdb', 'search_head_host', NODE_NAME)
if IS_SEARCH_HEAD:
SEARCH_HEAD_HOST = config.get('pcapdb', 'search_head_host', fallback=NODE_NAME)
else:
SEARCH_HEAD_HOST = config.get('pcapdb', 'search_head_host')
# Group that defines admin access to pcapdb.
ADMIN_GROUP = config.get('pcapdb', 'admin_group', fallback='pcapdb_admin')
# Get the default timezone. fallback the fallback to America/Denver
DEFAULT_TZ = config.get('pcapdb', 'default_timezone', fallback='America/Denver')
if DEFAULT_TZ not in pytz.common_timezones:
# If the timezone is bad, fallback to UTC
DEFAULT_TZ = 'UTC'
# Get SMTP settings for sending users email
SMTP_USER = config.get('smtp', 'user', fallback=None)
SMTP_PASSWORD = config.get('smtp', 'password', fallback='')
SMTP_HOST = config.get('smtp', 'host', fallback='localhost')
SMTP_PORT = config.get('smtp', 'port', fallback=25)
SMTP_FROM = config.get('smtp', 'from', fallback='pcapdb@' + socket.getfqdn())
# Do everything in UTC. Trust the browser to convert timestamps to local time.
TIME_ZONE = 'UTC'
if not (IS_CAPTURE_NODE or IS_SEARCH_HEAD):
raise RuntimeError("Must set each node to be a search head, capture node, or both.")
# Paths to commonly used executables
SUDO_PATH = find_executable('sudo')
CAPTURE_CMD = SITE_ROOT/'bin'/'capture'
MERGECAP_PATH = find_executable('mergecap')
SPLASH_TITLE = config.get('pcapdb', 'splash_title', fallback='System Usage Warning')
SPLASH_TEXT = config.get('pcapdb', 'splash_text',
fallback='This is a system owned by people who probably consider it '
'illegal, bad, or at least rude if you used it without '
'authorization. If you have authorization, then you probably '
'signed away your right to privacy in regards to using this '
'system. NOTE: This message is entirely configurable.')
# How big should our FCAP files be?
# This should not be toyed with lightly; A change here requires rebuilding all of the capture
# disks to have slots of this size.
# The default size, 4 GiB, was chosen because that is the limit of 32 bit addressing.
# The system can handle larger or smaller, and will dynamically choose 32 or 64 bit addressing
# for the index files depending on the size of the file they're indexing. Since the addressing is
# dynamic per index file, larger FCAP's shouldn't come with that big of a size penalty. The FLOW
# index will be 4 bytes larger per record, but the sub-indexes won't have to use 64 bit
# addressing unless the FLOW index is larger than 4 GiB.
FCAP_SIZE = 4*(1024**3)
# The MTU to set on all interfaces. Depending on the capture system used, this may not actually
# have any effect. We set it to 9000 because that's the largest MTU most interfaces will accept.
# Note that the MTU for actual capture is set separately, at compile time, about twice this. We
# have seen packets far larger than this, and IPv6 can support jumbo frames of up to 2^32 bytes.
# The reason for this discrepancy that dynamic packet reassembly at the interface can result
# in huge packets.
# Note that I've seen interfaces that support an MTU up to 9710.
MTU = 9000
# Protocols to support directly filtering in the interface. The search system supports
# every transport protocol, but by default we only allow filtering by TCP or UDP.
# Protocol 0 is used as a wildcard.
SEARCH_TRANSPORT_PROTOS = {
0: 'all',
6: 'tcp',
17: 'udp'
}
# The label that the device that will host the PCAPdb indexes is expected to have. If you build
# the device with PCAPdb, it will be given this label. When the system starts, PCAPdb will expect
# to find a device with this label, and will mount it if necessary.
INDEX_DEV_LABEL = 'pcapdb_index'
if len(INDEX_DEV_LABEL) > 12:
raise ValueError("Invalid device label {}.")
# The base directory for all our capture mount points.
CAPTURE_PATH = SITE_ROOT/'capture'
# The mount point for our index device
INDEX_PATH = CAPTURE_PATH/'index'
# The fraction of index disk reserved as 'slack' indexes are only deleted
# to make room for this slack.
INDEX_DISK_RESERVED = 0.2
# How long should our index and capture slot names be?
# NOTE: These should either be passed as a parameter to the capture system (it's currently
# hard-coded), or loaded directly from the capture system libraries.
INDEX_NAME_LEN = 20
SLOT_NAME_LEN = 9
# How often to tell the server about progress, in seconds
CELERY_TASK_STATE_UPDATE_PERIOD = 1.5
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
if IS_SEARCH_HEAD:
ALLOWED_HOSTS.append(NODE_NAME)
if config.has_option('pcapdb', 'allowed_hosts'):
ALLOWED_HOSTS.append(config.get('pcapdb', 'allowed_hosts'))
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
LOGIN_URL = '/auth/unauthorized'
# Application definition
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djcelery',
'libs.custom_tags',
'rest_framework',
# The celery tasks, at least, will need to be understood by the search head.
'apps.capture_node_api',
'apps.core',
'apps.task_api',
'apps.search_head_gui',
'apps.search_head_api',
'apps.stats_api',
'apps.login_gui',
'apps.login_api'
]
# These should only be installed if this is a search head.
# This is super dumb, but necessary thanks to PyCharm and the fact that it hacks INSTALLED_APPS
# out of this file rather than actually run it.
SEARCH_HEAD_ONLY_APPS = ['django.contrib.admin',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'libs.custom_tags',
'apps.login_api',
'apps.login_gui',
'apps.task_api',
'apps.search_head_gui']
if not IS_SEARCH_HEAD:
for app in SEARCH_HEAD_ONLY_APPS:
INSTALLED_APPS.remove(app)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'settings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [PROJECT_ROOT/'static'/'templates'],
# 'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
]
},
},
]
WSGI_APPLICATION = 'settings.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config.get('pcapdb', 'db_name', fallback='pcapdb'),
'USER': config.get('pcapdb', 'db_user'),
'PASSWORD': config.get('pcapdb', 'db_pass')
}
}
# Use peer auth unless this isn't the search head
if not IS_SEARCH_HEAD:
DATABASES['default']['HOST'] = SEARCH_HEAD_HOST
if IS_CAPTURE_NODE:
# Use peer authentication
DATABASES['capture_node'] = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config.get('pcapdb', 'capnode_db_name', fallback='capture_node'),
}
DATABASE_ROUTERS = ['apps.core.routers.BaseRouter',
'apps.capture_node_api.CaptureNodeRouter']
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = SITE_ROOT/'static'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_DIRS = []
# How many things we can combine in a single command when searching.
MAX_SEARCH_BATCH = 500
# Django 1.8 Security settings
# SECURE_HSTS_SECONDS = 600
SECURE_CONTENT_TYPE_NOSNIFF = True
# SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_SSL_REDIRECT = False # Force SSL
SECURE_BROWSER_XSS_FILTER = True # Tell browsers to enable XSS tools
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
# If this is set to True, client-side JavaScript will not
# to be able to access the CSRF cookie, it should be set to
# true in prod
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_NAME = 'pcapdbcsrftoken'
CSRF_FAILURE_VIEW = 'apps.search_head_api.views.csrf_failure'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_AGE = 60 * 60 * 8 # 8 hours
SESSION_COOKIE_NAME = "pcapdb"
PRIMARY_LOG_FILE = SITE_ROOT/'log'/'django.log'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': PRIMARY_LOG_FILE,
'maxBytes': 1024 * 1024 * 10, # 10MB log file limit
'backupCount': 10,
'formatter': 'standard',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': sys.stdout
},
},
'loggers': {
'django': {
'handlers': ['console', 'logfile'],
'propagate': True,
'level': 'ERROR',
},
'django.db.backends': {
'handlers': ['console', 'logfile'],
'propagate': False,
'level': 'INFO',
},
'': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
},
}
}
# Acceptable password alphabet
PASSWORD_RE = r'^[a-zA-Z0-9+=!@#$%^&*().,><;:/?\][}{` ~_-]{8,100}$'
# lib zxcvbn password strength, on a scale of 0-4
PASSWORD_STRENGTH_MIN = 3
AUTHENTICATION_BACKENDS = []
if IS_SEARCH_HEAD:
# We only need user based authentication for the search head.
AUTHENTICATION_BACKENDS.append('django.contrib.auth.backends.ModelBackend')
LDAP_AUTH_ENABLED = False
LDAP_GROUPS_ENABLED = False
# LDAP Authentication Configuration
if config.has_section('ldap_auth'):
from django_auth_ldap.config import *
AUTHENTICATION_BACKENDS.append('django_auth_ldap.backend.LDAPBackend')
LDAP_AUTH_ENABLED = True
# LDAP configuration is notoriously hard to debug. Make sure to log everything.
import logging
ldap_logger = logging.getLogger('django_auth_ldap')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
# Baseline configuration.
AUTH_LDAP_SERVER_URI = config.get('ldap_auth', 'server')
# Default user group to put new users in.
LDAP_REQUIRED_GROUP = config.get('ldap_auth', 'default_group', fallback='pcapdb_user')
AUTH_LDAP_BIND_DN = config.get('ldap_auth', 'auth_bind_dn', fallback='')
AUTH_LDAP_BIND_PASSWORD = config.get('ldap_auth', 'auth_bind_password', fallback='')
_USER_SEARCH_BASE = config.get('ldap_auth', 'user_search_base')
_USER_ATTR = config.get('ldap_auth', 'user_attr', fallback='uid')
AUTH_LDAP_USER_SEARCH = LDAPSearch(_USER_SEARCH_BASE,
ldap.SCOPE_SUBTREE,
"({}=%(user)s)".format(_USER_ATTR))
AUTH_LDAP_USER_ATTR_MAP = {}
for attr in 'username', 'first_name', 'last_name', 'email':
key_name = '{}_attr'.format(attr)
if config.has_option('ldap_auth', key_name):
AUTH_LDAP_USER_ATTR_MAP[attr] = config['ldap_auth'][key_name]
import logging
log = logging.getLogger(__name__)
if 'ldap_groups' in config.sections():
LDAP_GROUPS_ENABLED = False
AUTH_LDAP_MIRROR_GROUPS = True
# Use LDAP group membership to calculate group permissions.
AUTH_LDAP_FIND_GROUP_PERMS = True
_GROUP_NAME_ATTR = config.get('ldap_groups', 'group_name_attr', fallback='cn')
_GROUP_SEARCH_BASE = config.get('ldap_groups', 'group_search_base')
_GROUP_TYPE = config.get('ldap_groups', 'group_type')
if _GROUP_TYPE == 'posix':
AUTH_LDAP_GROUP_TYPE = PosixGroupType(_GROUP_NAME_ATTR)
_GROUP_OBJECT_CLASS = 'posixGroup'
elif _GROUP_TYPE == 'memberdn':
AUTH_LDAP_GROUP_TYPE = MemberDNGroupType(['owner', 'member'], _GROUP_NAME_ATTR)
_GROUP_OBJECT_CLASS = 'posixGroup'
else:
raise ValueError("Invalid LDAP group type in config: {}. Choices: {}"
.format(_GROUP_TYPE, ', '.join(['posix', 'memberdn'])))
_GROUP_NAME_PREFIX = config.get('ldap_groups', 'group_name_prefix', fallback=None)
if _GROUP_NAME_PREFIX is not None:
_group_filter = '(&(objectClass={})({}={}*))'.format(_GROUP_OBJECT_CLASS,
_GROUP_NAME_ATTR,
_GROUP_NAME_PREFIX)
else:
_group_filter = '(objectClass={})'.format(_GROUP_OBJECT_CLASS)
# Set up the basic group parameters.
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(_GROUP_SEARCH_BASE,
ldap.SCOPE_SUBTREE,
_group_filter)
# log.error('require group: {}'.format(AUTH_LDAP_REQUIRE_GROUP))
log.error('group_filter: {}'.format(_group_filter))
# Cache group memberships for an hour to minimize LDAP traffic
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 3600
# Celery Configuration variables.
BROKER_URL = 'amqp://{user}:{password}@{host}:{port}//'.format(
user=config.get('celery', 'amqp_user', fallback='pcapdb'),
password=config.get('celery', 'amqp_password'),
host=SEARCH_HEAD_HOST,
port=config.get('celery', 'amqp_port', fallback='5672'),
)
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
from kombu import Queue, Exchange
CELERY_QUEUES = []
if IS_SEARCH_HEAD:
CELERY_QUEUES.append(Queue('celery'))
CELERY_QUEUES.append(Queue('search_head', exchange=Exchange('search_head')))
if IS_CAPTURE_NODE:
CELERY_QUEUES.append(Queue(NODE_NAME, exchange=Exchange('capture_node')))
CELERY_ROUTES = ['capture_node_api.routers.capture_node_router']
CELERY_EVENT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERYBEAT_SCHEDULE = {}
CELERYBEAT_SCHEDULE_FILENAME = SITE_ROOT/'celerybeat-schedule'
CELERYD_PREFETCH_MULTIPLIER = 1
if IS_CAPTURE_NODE:
# Send stats to the search head every five minutes
CELERYBEAT_SCHEDULE['gather_if_stats'] = {
'options': {'queue': NODE_NAME},
'task': 'apps.capture_node_api.tasks.stats.update_stats',
'schedule': datetime.timedelta(seconds=60)
}
CELERYBEAT_SCHEDULE['clean_indexes'] = {
'options': {'queue': NODE_NAME},
'task': 'apps.capture_node_api.tasks.maint.clean_indexes',
'schedule': datetime.timedelta(minutes=15)
}
|
import pygame as pg
import random
import load_images
import variables
from VisualManagement import *
from variables import *
pg.init()
pg.display.set_caption('Backgammon Project 2')
screen_size = (900, 790) # a tuple of size (width, height)
screen = pg.display.set_mode(screen_size)
# import board images:
white_pawn_outside, black_pawn_outside, white_pawn, black_pawn, background_image = load_images.board(pg)
# import animation images:
inactive_adversary_dice_button, active_adversary_dice_button, inactive_player_dice_button, \
active_player_dice_button, white_wins, black_wins, dest_light_bottom, dest_light_upper, \
house_lights_green, blank_player_dice, blank_adversary_dice, white_highlight, black_highlight \
= load_images.animations(pg)
# create the dice lists
player_dice_list, adversary_dice_list = load_images.dices()
# LIST CONTAINS EACH TIME: [destination_stack, piece_name]
white_light_pawns = []
black_light_pawns = []
# LIST OF ALL STACKS THE THE PIECE CAN GO TO
white_possible_dest = []
black_possible_dest = []
# PIECE NAMES THAT ARE HOME
white_home = []
black_home = []
# ROLL AND SAVE DICE VALUES
def player_dice_values():
v1 = random.randint(1, 6)
v2 = random.randint(1, 6)
player_dice_1.my_dice = pg.image.load(player_dice_list[v1 - 1])
player_dice_2.my_dice = pg.image.load(player_dice_list[v2 - 1])
variables.player_dice_values = (v1, v2)
def adversary_dice_values():
v1 = random.randint(1, 6)
v2 = random.randint(1, 6)
adversary_dice_1.my_dice = pg.image.load(adversary_dice_list[v1 - 1])
adversary_dice_2.my_dice = pg.image.load(adversary_dice_list[v2 - 1])
variables.adversary_dice_values = (v1, v2)
# KEY HIGHLIGHTING
def light_white_keys(stack_list):
for i in stack_list:
i[1].image = white_highlight
def light_black_keys(stack_list):
for i in stack_list:
i[1].image = black_highlight
# UPDATE DICE IMAGE
class adversary_dice:
def __init__(self, pic):
self.my_dice = pg.image.load(pic)
class player_dice:
def __init__(self, pic):
self.my_dice = pg.image.load(pic)
# - - - create the middle stack
my_middle_stack = Pieces.ColumnStacks(0, None)
temp_middle = []
temp_x = 426
temp_y = 340
for i in range(0, 6):
temp_middle.append((temp_x, temp_y + (i * 56)))
my_middle_stack.positions = temp_middle
# - - -
white_pawn_outside_stack = Pieces.StackMotions(0, "white")
black_pawn_outside_stack = Pieces.StackMotions(0, "black")
# INITIALISE THE PLAYER AND ADVERSARY DICES TO BLANK/EMPTY
player_dice_1 = player_dice(blank_player_dice)
player_dice_2 = player_dice(blank_player_dice)
adversary_dice_1 = adversary_dice(blank_adversary_dice)
adversary_dice_2 = adversary_dice(blank_adversary_dice)
VM = VisualManagement()
all_stacks = VM.init_stacks() # generate the pieces and put them in a stack
# set pieces that are in home to Home
for k in all_stacks:
val = all_stacks[k]
if val.loc <= 6:
for j in val.pawns:
if j.id == "white":
white_home.append(j)
elif val.loc >= 19:
for j in val.pawns:
if j.id == "black":
black_home.append(j)
# move piece from current stack to another
def move(current_stack, destination_stack): # TODO RECHECK param type
# pop from the current stack
deleted_piece = current_stack.remove_pawn()
if turn == "player":
emplacements = white_light_pawns
else:
emplacements = black_light_pawns
# if the deleted piece name is in the highlighted list, delete it from the list
for i in emplacements: # emplacements format: [destination_stack, piece_name]
if i[1] == deleted_piece:
del i
destination_stack.add_pawn(deleted_piece) # add the piece to the destination stack
emplacements.append([destination_stack, deleted_piece]) # add the piece as a new destination possible
# then push in desired stack
def check_end_turn():
global player_dice1_moved, player_dice2_moved, white_possible_dest, player_dice_rolled, white_light_pawns, \
white_light_triggered, turn, black_light_triggered, adversary_dice1_moved, adversary_dice2_moved, \
black_possible_dest, adversary_dice_rolled, black_light_pawns
if (player_dice1_moved and player_dice2_moved) or \
((len(white_possible_dest) == 0) and (player_dice_rolled and
((player_dice1_moved == player_dice2_moved == False) or
player_dice1_moved or player_dice2_moved))):
player_dice1_moved = False
player_dice2_moved = False
white_light_triggered = False
player_dice_rolled = False
player_dice_1.my_dice = pg.image.load(blank_player_dice)
player_dice_2.my_dice = pg.image.load(blank_player_dice)
for i in white_light_pawns:
i[1].image = white_pawn
white_light_pawns = []
white_possible_dest = []
turn = "adversary"
if (adversary_dice1_moved and adversary_dice2_moved) or \
((len(black_possible_dest) == 0) and (adversary_dice_rolled and
((adversary_dice1_moved == adversary_dice2_moved == False) or
adversary_dice1_moved or adversary_dice2_moved))):
adversary_dice_rolled = False
adversary_dice1_moved = False
adversary_dice2_moved = False
black_light_triggered = False
adversary_dice_1.my_dice = pg.image.load(blank_adversary_dice)
adversary_dice_2.my_dice = pg.image.load(blank_adversary_dice)
for i in black_light_pawns:
i[1].image = black_pawn
black_light_pawns = []
black_possible_dest = []
turn = "player"
counter = 0
# MAIN LOOP
while running:
mouse = pg.mouse.get_pos()
click = pg.mouse.get_pressed()
# first, determine which color starts, happens only at the start of the game/once
if not turn_rolling:
if counter < 40: # make a rolling animation by rolling 40 times and then take the last dice score
turn_adv = random.randint(1, 6)
adversary_dice_2.my_dice = pg.image.load(adversary_dice_list[turn_adv - 1])
turn_pla = random.randint(1, 6)
player_dice_1.my_dice = pg.image.load(player_dice_list[turn_pla - 1])
counter += 1
else:
pg.time.delay(1500) # let the user see the score of who starts
if turn_adv != turn_pla:
if turn_adv > turn_pla:
turn = "adversary"
elif turn_adv < turn_pla:
turn = "player"
player_dice_1.my_dice = pg.image.load(blank_player_dice)
adversary_dice_2.my_dice = pg.image.load(blank_adversary_dice)
turn_rolling = True
counter = 0
else: # if dices are the same, start the rolling animation again...
counter = 0
turn_rolling = False
# convert all white pawns (might be lighted or not) to normal white pawns
if turn == "player":
for k in all_stacks:
for j in all_stacks[k].pawns:
if j.id == "white":
j.image = white_pawn
# convert all black pawns (might be lighted or not) to normal black pawns image
if turn == "adversary":
for k in all_stacks:
for j in all_stacks[k].pawns:
if j.id == "black":
j.image = black_pawn
# if the turn is not completed, turn/keep the lights on
if white_light_triggered:
light_white_keys(white_light_pawns)
# if the turn is not completed, turn/keep the lights on
if black_light_triggered:
light_black_keys(black_light_pawns)
# when the PLAYER/ADVERSARY finishes his turn or no other possible moves
check_end_turn()
screen.fill((0, 0, 0)) # add a black layer behind the background image
screen.blit(background_image, (0, 0)) # background image on top of the filled black screen
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_RIGHT:
pass
if event.type == pg.MOUSEBUTTONUP and turn == "player" and 840 <= mouse[0] <= 885 and 330 <= mouse[1] <= 450:
if event.button == 1:
light_white_keys(white_light_pawns)
player_dice_rolled = True
white_light_triggered = True
if event.type == pg.MOUSEBUTTONUP and turn == "adversary" and 3 <= mouse[0] <= 48 and 310 <= mouse[1] <= 430:
if event.button == 1:
light_black_keys(black_light_pawns)
adversary_dice_rolled = True
black_light_triggered = True
# turn of the player
if turn == "player":
for i in white_light_pawns:
dice_player = variables.player_dice_values
if event.type == pg.KEYDOWN and (event.key == pg.K_UP or event.key == pg.K_DOWN) and player_dice_rolled:
if len(my_middle_stack.pawns) > 0 and my_middle_stack.pawns[-1].id == "white":
d1, d2 = 25 - (-(i[0].loc - dice_player[0])), 25 - (-(i[0].loc - dice_player[1]))
else:
d1, d2 = i[0].loc - dice_player[0], i[0].loc - dice_player[1]
if click[0] == 1 and i[1].coordinates[0] <= mouse[0] <= i[1].coordinates[0] + 56 and \
i[1].coordinates[1] <= mouse[1] <= i[1].coordinates[1] + 56:
if event.key == pg.K_UP:
if len(white_home) <= 15:
if d1 > 0 and not player_dice1_moved:
if all_stacks[d1] in white_possible_dest:
if len(all_stacks[d1].pawns) == 1 and \
(all_stacks[d1].pawns[0].id == "black"):
move(all_stacks[d1], my_middle_stack)
move(i[0], all_stacks[d1])
if all_stacks[d1].loc <= 6:
if all_stacks[d1].pawns[-1] not in white_home:
white_home.append(all_stacks[d1].pawns[-1])
player_dice_1.my_dice = pg.image.load(blank_player_dice)
player_dice1_moved = True
if d1 == 0 and not player_dice1_moved and len(white_home) == 15:
move(i[0], white_pawn_outside_stack)
player_dice1_moved = True
player_dice_1.my_dice = pg.image.load(blank_player_dice)
if event.key == pg.K_DOWN:
if len(white_home) <= 15:
if d2 > 0 and player_dice2_moved == False:
if all_stacks[d2] in white_possible_dest:
if len(all_stacks[d2].pawns) == 1 and \
(all_stacks[d2].pawns[0].id == "black"):
move(all_stacks[d2], my_middle_stack)
move(i[0], all_stacks[d2])
if all_stacks[d2].loc <= 6:
if all_stacks[d2].pawns[-1] not in white_home:
white_home.append(all_stacks[d2].pawns[-1])
player_dice_2.my_dice = pg.image.load(blank_player_dice)
player_dice2_moved = True
if d2 == 0 and not player_dice2_moved and len(white_home) == 15:
move(i[0], white_pawn_outside_stack)
player_dice_2.my_dice = pg.image.load(blank_player_dice)
player_dice2_moved = True
# turn of the adversary
if turn == "adversary":
for i in black_light_pawns:
dice_adversary = variables.adversary_dice_values
if event.type == pg.KEYDOWN and (event.key == pg.K_UP or event.key == pg.K_DOWN) and \
adversary_dice_rolled:
if len(my_middle_stack.pawns) > 0 and my_middle_stack.pawns[-1].id == "black":
d1, d2 = -(i[0].loc - dice_adversary[0]), -(i[0].loc - dice_adversary[1])
else:
d1, d2 = i[0].loc + dice_adversary[0], i[0].loc + dice_adversary[1]
if click[0] == 1 and i[1].coordinates[0] <= mouse[0] <= i[1].coordinates[0] + 56 and \
i[1].coordinates[1] <= mouse[1] <= i[1].coordinates[1] + 56:
if event.key == pg.K_UP:
if len(black_home) <= 15:
if d1 < 25 and not adversary_dice1_moved:
if all_stacks[d1] in black_possible_dest:
if len(all_stacks[d1].pawns) == 1 and \
(all_stacks[d1].pawns[0].id == "white"):
move(all_stacks[d1], my_middle_stack)
move(i[0], all_stacks[d1])
if all_stacks[d1].loc >= 19:
if all_stacks[d1].pawns[-1] not in black_home:
black_home.append(all_stacks[d1].pawns[-1])
adversary_dice_1.my_dice = pg.image.load(blank_adversary_dice)
adversary_dice1_moved = True
if d1 == 25 and not adversary_dice1_moved and len(black_home) == 15:
move(i[0], black_pawn_outside_stack)
adversary_dice1_moved = True
adversary_dice_1.my_dice = pg.image.load(blank_adversary_dice)
if event.key == pg.K_DOWN:
if len(black_home) <= 15:
if d2 < 25 and not adversary_dice2_moved:
if all_stacks[d2] in black_possible_dest:
if len(all_stacks[d2].pawns) == 1 and \
(all_stacks[d2].pawns[0].id == "white"):
move(all_stacks[d2], my_middle_stack)
move(i[0], all_stacks[d2])
if all_stacks[d2].loc >= 19:
if all_stacks[d2].pawns[-1] not in black_home:
black_home.append(all_stacks[d2].pawns[-1])
adversary_dice_2.my_dice = pg.image.load(blank_adversary_dice)
adversary_dice2_moved = True
if d2 == 25 and not adversary_dice2_moved and len(black_home) == 15:
move(i[0], black_pawn_outside_stack)
adversary_dice_2.my_dice = pg.image.load(blank_adversary_dice)
adversary_dice2_moved = True
# update the screen
VM.screen_update(screen)
# turn of the player
if turn == "player":
# 1: roll dice
if not player_dice_rolled:
if 840 <= mouse[0] <= 885 and 330 <= mouse[1] <= 450:
screen.blit(active_player_dice_button, (840, 330))
if click[0] == 1:
player_dice_values()
else:
screen.blit(inactive_player_dice_button, (840, 330))
# 2: light pawns that are eligible to move
light_pawns = []
if len(my_middle_stack.pawns) > 0 and my_middle_stack.pawns[-1].id == "white":
for i in my_middle_stack.pawns:
if i.id == "white":
light_pawns.append([my_middle_stack, i])
else:
for k in all_stacks:
val = all_stacks[k]
if len(val.pawns) > 0:
light_piece = val.pawns[-1]
if light_piece.id == "white":
light_pawns.append([val, light_piece])
white_light_pawns = light_pawns
# 3: show the possible destinations when clicked on a pawn that's eligible to move
if player_dice_rolled and len(white_light_pawns) > 0:
temp_destination = []
dice_player = variables.player_dice_values
if len(white_home) <= 15:
for i in white_light_pawns:
if len(my_middle_stack.pawns) > 0 and my_middle_stack.pawns[-1].id == "white":
d1, d2 = 25 - (-(i[0].loc - dice_player[0])), 25 - (-(i[0].loc - dice_player[1]))
else:
d1, d2 = i[0].loc - dice_player[0], i[0].loc - dice_player[1]
if d1 > 0 and not player_dice1_moved:
if all_stacks[d1].check_if_receiving_light("white") == "on":
temp_destination.append(all_stacks[d1])
if d1 == 0 and player_dice1_moved == False:
if white_pawn_outside_stack.checking_receiving_light("white", white_home, black_home) == "on":
temp_destination.append(white_pawn_outside_stack)
if d2 > 0 and player_dice2_moved == False:
if all_stacks[d2].check_if_receiving_light("white") == "on":
temp_destination.append(all_stacks[d2])
if d2 == 0 and player_dice2_moved == False:
if white_pawn_outside_stack.checking_receiving_light("white", white_home, black_home) == "on":
temp_destination.append(white_pawn_outside_stack)
white_possible_dest = temp_destination
for i in white_light_pawns:
if click[0] == 1 and i[1].coordinates[0] <= mouse[0] <= i[1].coordinates[0] + 56 and \
i[1].coordinates[1] <= mouse[1] <= i[1].coordinates[1] + 56:
if len(my_middle_stack.pawns) > 0 and my_middle_stack.pawns[-1].id == "white":
d1, d2 = 25 - (-(i[0].loc - dice_player[0])), 25 - (-(i[0].loc - dice_player[1]))
else:
d1, d2 = i[0].loc - dice_player[0], i[0].loc - dice_player[1]
if len(white_home) <= 15:
if d1 > 0 and not player_dice1_moved:
all_stacks[d1].receiving_light("white", screen)
if d2 > 0 and not player_dice2_moved:
all_stacks[d2].receiving_light("white", screen)
if d1 == 0 and not player_dice1_moved:
white_pawn_outside_stack.receiving_light("white", screen, white_home, black_home)
if d2 == 0 and not player_dice2_moved:
white_pawn_outside_stack.receiving_light("white", screen, white_home, black_home)
# turn of the adversary
if turn == "adversary":
# 1: roll dice
if not adversary_dice_rolled:
if 3 <= mouse[0] <= 48 and 310 <= mouse[1] <= 430:
screen.blit(active_adversary_dice_button, (3, 310))
if click[0] == 1:
adversary_dice_values()
else:
screen.blit(inactive_adversary_dice_button, (3, 310))
# 2: light pawns that are eligible to move
light_pawns = []
if len(my_middle_stack.pawns) > 0 and my_middle_stack.pawns[-1].id == "black":
for i in my_middle_stack.pawns:
if i.id == "black":
light_pawns.append([my_middle_stack, i])
else:
for k in all_stacks:
val = all_stacks[k]
if len(val.pawns) > 0:
light_piece = val.pawns[-1]
if light_piece.id == "black":
light_pawns.append([val, light_piece])
black_light_pawns = light_pawns
# 3: show the possible destinations when clicked on a pawn that's eligible to move
if adversary_dice_rolled and len(black_light_pawns) > 0:
temp_destination = []
dice_adversary = variables.adversary_dice_values
if len(black_home) <= 15:
for i in black_light_pawns:
if len(my_middle_stack.pawns) > 0 and my_middle_stack.pawns[-1].id == "black":
d1, d2 = -(i[0].loc - dice_adversary[0]), -(i[0].loc - dice_adversary[1])
else:
d1, d2 = i[0].loc + dice_adversary[0], i[0].loc + dice_adversary[1]
if d1 < 25 and not adversary_dice1_moved:
if all_stacks[d1].check_if_receiving_light("black") == "on":
temp_destination.append(all_stacks[d1])
if d1 == 25 and not adversary_dice1_moved:
if black_pawn_outside_stack.checking_receiving_light("black", white_home, black_home) == "on":
temp_destination.append(black_pawn_outside_stack)
if d2 < 25 and not adversary_dice2_moved:
if all_stacks[d2].check_if_receiving_light("black") == "on":
temp_destination.append(all_stacks[d2])
if d2 == 25 and not adversary_dice2_moved:
if black_pawn_outside_stack.checking_receiving_light("black", white_home, black_home) == "on":
temp_destination.append(black_pawn_outside_stack)
black_possible_dest = temp_destination
for i in black_light_pawns:
if click[0] == 1 and i[1].coordinates[0] <= mouse[0] <= i[1].coordinates[0] + 56 and \
i[1].coordinates[1] <= mouse[1] <= i[1].coordinates[1] + 56:
if len(my_middle_stack.pawns) > 0 and my_middle_stack.pawns[-1].id == "black":
d1, d2 = -(i[0].loc - dice_adversary[0]), -(i[0].loc - dice_adversary[1])
else:
d1, d2 = i[0].loc + dice_adversary[0], i[0].loc + dice_adversary[1]
if len(black_home) <= 15:
if d1 < 25 and not adversary_dice1_moved:
all_stacks[d1].receiving_light("black", screen)
if d2 < 25 and not adversary_dice2_moved:
all_stacks[d2].receiving_light("black", screen)
if d1 == 25 and not adversary_dice1_moved:
black_pawn_outside_stack.receiving_light("black", screen, white_home, black_home)
if d2 == 25 and not adversary_dice2_moved:
black_pawn_outside_stack.receiving_light("black", screen, white_home, black_home)
screen.blit(player_dice_1.my_dice, (2, 540))
screen.blit(player_dice_2.my_dice, (2, 610))
screen.blit(adversary_dice_1.my_dice, (2, 100))
screen.blit(adversary_dice_2.my_dice, (2, 175))
if len(white_pawn_outside_stack.elements) == 15:
winner_declared = True
screen.blit(white_wins, (0, 0))
elif len(black_pawn_outside_stack.elements) == 15:
winner_declared = True
screen.blit(black_wins, (0, 0))
if winner_declared and 0 <= mouse[0] <= 900 and 0 <= mouse[1] <= 790 and click[0] == 1:
pass
# pg.display.quit() # TODO finetune the end menu
# sys.exit('end of the game')
pg.display.update()
|
# TODO: un-subclass dict in favor of something more explicit, once all regular
# dict-like access has been factored out into methods
class LineManager(dict):
"""
Manages multiple release lines/families as well as related config state.
"""
def __init__(self, app):
"""
Initialize new line manager dict.
:param app: The core Sphinx app object. Mostly used for config.
"""
super(LineManager, self).__init__()
self.app = app
@property
def config(self):
"""
Return Sphinx config object.
"""
return self.app.config
def add_family(self, major_number):
"""
Expand to a new release line with given ``major_number``.
This will flesh out mandatory buckets like ``unreleased_bugfix`` and do
other necessary bookkeeping.
"""
# Normally, we have separate buckets for bugfixes vs features
keys = ["unreleased_bugfix", "unreleased_feature"]
# But unstable prehistorical releases roll all up into just
# 'unreleased'
if major_number == 0 and self.config.releases_unstable_prehistory:
keys = ["unreleased"]
# Either way, the buckets default to an empty list
self[major_number] = {key: [] for key in keys}
@property
def unstable_prehistory(self):
"""
Returns True if 'unstable prehistory' behavior should be applied.
Specifically, checks config & whether any non-0.x releases exist.
"""
return self.config.releases_unstable_prehistory and not self.has_stable_releases
@property
def stable_families(self):
"""
Returns release family numbers which aren't 0 (i.e. prehistory).
"""
return [x for x in self if x != 0]
@property
def has_stable_releases(self):
"""
Returns whether stable (post-0.x) releases seem to exist.
"""
nonzeroes = self.stable_families
# Nothing but 0.x releases -> yup we're prehistory
if not nonzeroes:
return False
# Presumably, if there's >1 major family besides 0.x, we're at least
# one release into the 1.0 (or w/e) line.
if len(nonzeroes) > 1:
return True
# If there's only one, we may still be in the space before its N.0.0 as
# well; we can check by testing for existence of bugfix buckets
return any(x for x in self[nonzeroes[0]] if not x.startswith("unreleased"))
|
from sys import stdout, stderr
from scell import Selector
from pytest import fixture
import scell.core
@fixture(autouse=True)
def mock_select(monkeypatch):
def select(rlist, wlist, xlist, timeout=None):
if not timeout and not rlist and not wlist:
raise RuntimeError
return rlist, wlist, xlist
monkeypatch.setattr(scell.core, '_select', select)
@fixture
def handles(request):
return [stdout, stderr]
@fixture
def selector(request, handles):
sel = Selector()
for fp in handles:
sel.register(fp, mode='rw', callback=lambda: 1)
return sel
|
import inspect
import ast
from types import FunctionType
import hashlib
from typing import List
import astunparse
from pyminifier import minification
class VerfunException(Exception):
pass
def version_hash_for_function(fn: FunctionType) -> str:
abstract_syntax_tree = ast.parse(inspect.getsource(fn)).body[0]
abstract_syntax_tree.name = "replaced_functionname"
generated = astunparse.unparse(abstract_syntax_tree)
result = minification.remove_comments_and_docstrings(generated)
result = minification.remove_blank_lines(result)
return hashlib.md5(result.encode('utf-8')).hexdigest()
def version_hash_for_function_list(functions: List[FunctionType]) -> str:
if functions is None or len(functions) == 0:
raise VerfunException("Supplied function list must have at least one function")
checksums = [version_hash_for_function(f) for f in functions]
if len(checksums) == 1:
return checksums[0]
joined = "".join(checksums)
return hashlib.md5(joined.encode('utf-8')).hexdigest()
|
from pyrogram import Client
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
from pyrogram.errors import MessageNotModified
from functions.functions import speed_test, disk_space
start_and_help = InlineKeyboardMarkup([[InlineKeyboardButton(text='Creator 🦾', url='https://t.me/MosheWin')],
[
InlineKeyboardButton(text='Source Code 🗃',
url='https://github.com/moshe-coh/Terminal-Bot')
]])
refresh = InlineKeyboardMarkup([[InlineKeyboardButton(text='Refresh 💫', callback_data='refresh')]])
refresh_space = InlineKeyboardMarkup([[InlineKeyboardButton(text='Refresh 💫', callback_data='space')]])
@Client.on_callback_query()
async def bt(_, cb: CallbackQuery):
if cb.data == "refresh":
await cb.message.edit_text('Checking Again... ⏳ ')
st = speed_test()
down = st[0]
up = st[1]
ping = st[2]
text = f"**📥 Download Speed:** {down}\n\n**📤 Upload Speed:** {up}\n\n**🩸 ping: ** {ping}"
try:
await cb.message.edit_text(text, reply_markup=refresh)
except MessageNotModified:
pass
elif cb.data == "space":
await cb.message.edit_text('Checking Again... ⏳')
space = disk_space()
total = space[0]
used = space[1]
free = space[2]
total_ram = space[3]
free_ram = space[4]
text = f"**💾 Total Storage:** {total}\n\n**💽 Storage Used:** {used}\n\n**💿 Free Storage:** {free}\n\n" \
f"**Total Ram: {total_ram}**\n\n**Free Ram: {free_ram}**"
try:
await cb.message.edit_text(text, reply_markup=refresh_space)
except MessageNotModified:
pass
|
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
test_sanity_bundle_augmentation.py
Unittest for bundle augmentation.
"""
from __future__ import absolute_import
import sys
import unittest
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.augmentation import ietf_aug_base_1
from ydk.models.augmentation import ietf_aug_base_2
from test_utils import assert_with_error
from test_utils import ParametrizedTestCase
from test_utils import get_device_info
class SanityYang(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ncc = NetconfServiceProvider(
cls.hostname,
cls.username,
cls.password,
cls.port,
cls.protocol,
cls.on_demand,
cls.common_cache,
cls.timeout)
cls.crud = CRUDService()
def setUp(self):
self.crud.delete(self.ncc, ietf_aug_base_1.Cpython())
self.crud.delete(self.ncc, ietf_aug_base_2.Cpython())
def tearDown(self):
self.crud.delete(self.ncc, ietf_aug_base_1.Cpython())
self.crud.delete(self.ncc, ietf_aug_base_2.Cpython())
def test_aug_base_1(self):
cpython = ietf_aug_base_1.Cpython()
cpython.doc.ydktest_aug_1.aug_one = 'aug one'
cpython.doc.ydktest_aug_2.aug_two = 'aug two'
cpython.doc.ydktest_aug_4.aug_four = 'aug four'
cpython.lib.ydktest_aug_1.ydktest_aug_nested_1.aug_one = 'aug one'
cpython.lib.ydktest_aug_2.ydktest_aug_nested_2.aug_two = 'aug two'
cpython.lib.ydktest_aug_4.ydktest_aug_nested_4.aug_four = 'aug four'
cpython.doc.disutils.four_aug_list.enabled = True
item1 = cpython.doc.disutils.four_aug_list.Ldata()
item2 = cpython.doc.disutils.four_aug_list.Ldata()
item1.name, item1.number = 'one', 1
item2.name, item1.number = 'two', 2
self.crud.create(self.ncc, cpython)
cpython_read = self.crud.read(self.ncc, ietf_aug_base_1.Cpython())
self.assertEqual(cpython, cpython_read)
def test_aug_base_2(self):
cpython = ietf_aug_base_2.Cpython()
cpython.tools.aug_four = 'aug four'
self.crud.create(self.ncc, cpython)
cpython_read = self.crud.read(self.ncc, ietf_aug_base_2.Cpython())
self.assertEqual(cpython, cpython_read)
if __name__ == '__main__':
device, non_demand, common_cache, timeout = get_device_info()
suite = unittest.TestSuite()
suite.addTest(ParametrizedTestCase.parametrize(
SanityYang,
device=device,
non_demand=non_demand,
common_cache=common_cache,
timeout=timeout))
ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(ret)
|
# -*- coding: utf-8 -*-
#
# test_radkdict.py
# cjktools
#
import unittest
from six.moves import StringIO
from cjktools.resources.radkdict import RadkDict
def suite():
test_suite = unittest.TestSuite((
unittest.makeSuite(RadkdictTestCase)
))
return test_suite
SAMPLE = \
"""
$ 一 1
偏
$ | 1
偏
$ 化 2 js01
偏
$ 冂 2
偏
$ 尸 3
偏
$ 戸 4
偏
$ 冊 5
偏
""" # nopep8
class RadkdictTestCase(unittest.TestCase):
def test_fetch_radicals(self):
key = u'偏'
rkd = RadkDict(StringIO(SAMPLE))
radicals = set(rkd[key])
expected_radicals = set([u'一', u'|', u'化', u'冂', u'尸', u'戸',
u'冊'])
self.assertEqual(radicals, expected_radicals)
def test_get_cached(self):
rd = RadkDict.get_cached()
self.assertIsInstance(rd, RadkDict)
rd2 = RadkDict.get_cached()
self.assertIs(rd, rd2)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite())
|
"""
Funkcija akceptē vienu argumentu - temperatūru Celsija grādos,
un atgriež temperatūru Kelvina grādos. Zemākā temperatūra
Kelvina grādos var būt 0, tādēļ, ja aprēķinātā temperatūra ir
zemāka, atgriež 0.
Argumenti:
t {int vai float} -- temperatūra Celsija grādos
Atgriež:
int vai float -- temperatūra Kelvina grādos
"""
def kelvins(c):
k = c + 273.15
if k < 0:
k = 0
return k |
"""
verktyg_server.tests.test_ssl
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright:
(c) 2015 by Ben Mather.
:license:
BSD, see LICENSE for more details.
"""
import unittest
class SSLTestCase(unittest.TestCase):
pass
|
class Test:
x = 10
if __name__ == '__main__':
a = Test()
a.x = 1
b = Test()
assert a.x == 'update value'
assert b.x == 'update value'
Test.x = 5
c = Test()
assert a.x == 'update value'
assert b.x == 'update value'
assert c.x == 'update value'
|
from lektor.types import Type
from lektor.environment import PRIMARY_ALT
from lektor.i18n import get_i18n_block
class FakeType(Type):
def value_from_raw(self, raw):
return None
def to_json(self, pad, record=None, alt=PRIMARY_ALT):
rv = Type.to_json(self, pad, record, alt)
rv['is_fake_type'] = True
return rv
class LineType(FakeType):
widget = 'f-line'
class SpacingType(FakeType):
widget = 'f-spacing'
class InfoType(FakeType):
widget = 'f-info'
class HeadingType(FakeType):
widget = 'f-heading'
def to_json(self, pad, record=None, alt=PRIMARY_ALT):
rv = FakeType.to_json(self, pad, record, alt)
rv['heading_i18n'] = get_i18n_block(self.options, 'heading')
return rv
|
# -*- coding: utf-8 -*-
"""Syslog sender provider"""
from datetime import datetime
from builtins import input
from .base_sender import BaseSender
class SyslogSender(BaseSender):
"""Generate a lot of events from/for Syslog"""
def __init__(self, engine, template, **kwargs):
BaseSender.__init__(self, engine, template, **kwargs)
self.tag = kwargs.get('tag', 'test.keep.free')
def run(self):
"""Run function for cli or call function"""
while True:
lines = self.process(date_generator=self.date_generator).split('\n')
for line in lines:
if self.probability():
if not self.simulation:
self.engine.send(tag=self.tag, msg=str(line))
now = datetime.utcnow().ctime()
print('{0} => {1}'.format(now, str(line)))
else:
now = datetime.utcnow().ctime()
print('{0} => Skipped by prob.'.format(now))
if self.interactive:
input("» Press Enter for next iteration «")
else:
self.wait()
|
teste = list()
teste.append('Pedro')
teste.append(18)
galera = list()
galera.append(teste[:])
teste[0] = 'Gustavo'
teste[1] = 40
galera.append(teste[:])
print(galera)
maisteste = [['Pedro', 18], ['Joao', 16], ['Jaqueline', 41]]
print(maisteste[0])
print(maisteste[1][1])
for p in maisteste:
print(f'{p[0]} tem {p[1]} anos de idade')
teste1 = []
dado = []
for c in range(0, 3):
dado.append(str(input('Nome: ')))
dado.append(int(input('Idade: ')))
teste1.append(dado[:])
dado.clear()
print(teste1) |
from torch.distributions import Categorical
import torch
def sample_action(logits: torch.tensor, return_entropy_log_prob=False):
prob_dist = Categorical(logits=logits)
y_preds = prob_dist.sample()
if return_entropy_log_prob:
return y_preds, prob_dist.log_prob(y_preds), prob_dist.entropy()
return y_preds |
from scipy.stats import truncnorm
import matplotlib.pyplot as plt
import numpy as np
import math
# import seaborn as snc
fig = plt.figure()
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
no_of_req = 2600
freq4 = {1.0: 450, 2.0: 894, 3.0: 869, 4.0: 387}
mec4 = [3.0, 4.0, 3.0, 2.0, 4.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 4.0, 3.0, 2.0, 2.0, 1.0, 1.0, 4.0, 4.0, 3.0, 4.0, 2.0, 2.0, 3.0, 3.0, 1.0, 3.0, 3.0, 3.0, 1.0, 2.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 1.0, 1.0, 3.0, 1.0, 2.0, 2.0, 4.0, 2.0, 3.0, 2.0, 4.0, 3.0, 2.0, 4.0, 4.0, 2.0, 3.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 1.0, 1.0, 1.0, 4.0, 2.0, 1.0, 4.0, 1.0, 2.0, 1.0, 2.0, 4.0, 4.0, 1.0, 4.0, 3.0, 1.0, 3.0, 4.0, 4.0, 2.0, 3.0, 3.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 2.0, 1.0, 3.0, 4.0, 1.0, 1.0, 2.0, 1.0, 3.0, 1.0, 3.0, 4.0, 3.0, 4.0, 3.0, 2.0, 3.0, 4.0, 3.0, 4.0, 4.0, 2.0, 4.0, 1.0, 4.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 3.0, 2.0, 1.0, 3.0, 3.0, 3.0, 2.0, 4.0, 2.0, 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 3.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 3.0, 2.0, 4.0, 1.0, 3.0, 4.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 1.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 4.0, 2.0, 3.0, 4.0, 4.0, 3.0, 3.0, 2.0, 3.0, 1.0, 2.0, 3.0, 2.0, 3.0, 1.0, 2.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 4.0, 1.0, 2.0, 4.0, 1.0, 1.0, 2.0, 2.0, 4.0, 2.0, 3.0, 3.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 2.0, 1.0, 3.0, 4.0, 2.0, 2.0, 3.0, 1.0, 4.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 3.0, 3.0, 1.0, 3.0, 3.0, 4.0, 4.0, 3.0, 3.0, 1.0, 3.0, 3.0, 2.0, 2.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0, 2.0, 2.0, 4.0, 1.0, 3.0, 3.0, 2.0, 3.0, 3.0, 4.0, 2.0, 2.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 3.0, 2.0, 3.0, 3.0, 2.0, 4.0, 3.0, 4.0, 1.0, 1.0, 2.0, 3.0, 4.0, 4.0, 1.0, 4.0, 4.0, 2.0, 3.0, 2.0, 2.0, 4.0, 1.0, 2.0, 3.0, 4.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 1.0, 4.0, 3.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 1.0, 3.0, 1.0, 4.0, 2.0, 3.0, 2.0, 1.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 1.0, 4.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 4.0, 3.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 3.0, 1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 2.0, 4.0, 3.0, 1.0, 3.0, 2.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 1.0, 2.0, 2.0, 1.0, 4.0, 1.0, 3.0, 2.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 3.0, 2.0, 1.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 4.0, 2.0, 2.0, 1.0, 2.0, 1.0, 4.0, 2.0, 3.0, 3.0, 1.0, 3.0, 1.0, 4.0, 1.0, 2.0, 2.0, 3.0, 2.0, 4.0, 3.0, 4.0, 2.0, 2.0, 2.0, 2.0, 4.0, 4.0, 2.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 1.0, 2.0, 2.0, 2.0, 2.0, 4.0, 1.0, 4.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 4.0, 2.0, 4.0, 1.0, 2.0, 1.0, 3.0, 4.0, 1.0, 1.0, 2.0, 3.0, 3.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 2.0, 3.0, 2.0, 1.0, 3.0, 2.0, 3.0, 4.0, 4.0, 3.0, 1.0, 3.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 2.0, 3.0, 1.0, 4.0, 2.0, 3.0, 2.0, 3.0, 3.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 3.0, 2.0, 1.0, 2.0, 4.0, 3.0, 2.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 4.0, 1.0, 2.0, 3.0, 3.0, 1.0, 4.0, 2.0, 2.0, 1.0, 2.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 4.0, 1.0, 4.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 2.0, 2.0, 2.0, 4.0, 1.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 2.0, 4.0, 3.0, 3.0, 4.0, 4.0, 2.0, 3.0, 2.0, 3.0, 3.0, 4.0, 1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 3.0, 1.0, 4.0, 3.0, 4.0, 1.0, 1.0, 4.0, 4.0, 3.0, 2.0, 1.0, 3.0, 4.0, 1.0, 2.0, 4.0, 1.0, 2.0, 2.0, 3.0, 2.0, 3.0, 3.0, 2.0, 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 4.0, 2.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 3.0, 4.0, 3.0, 3.0, 3.0, 2.0, 3.0, 3.0, 2.0, 1.0, 3.0, 2.0, 3.0, 1.0, 2.0, 2.0, 2.0, 4.0, 3.0, 1.0, 1.0, 1.0, 3.0, 4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0, 2.0, 4.0, 2.0, 4.0, 1.0, 2.0, 4.0, 3.0, 1.0, 4.0, 3.0, 2.0, 1.0, 4.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 2.0, 4.0, 3.0, 3.0, 2.0, 1.0, 3.0, 3.0, 2.0, 3.0, 4.0, 3.0, 1.0, 3.0, 3.0, 1.0, 3.0, 4.0, 3.0, 4.0, 3.0, 2.0, 2.0, 3.0, 3.0, 3.0, 1.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 3.0, 4.0, 3.0, 4.0, 4.0, 4.0, 3.0, 1.0, 3.0, 4.0, 4.0, 1.0, 2.0, 4.0, 3.0, 3.0, 2.0, 3.0, 4.0, 1.0, 1.0, 2.0, 1.0, 1.0, 3.0, 4.0, 4.0, 2.0, 3.0, 4.0, 2.0, 2.0, 3.0, 2.0, 4.0, 1.0, 2.0, 3.0, 1.0, 1.0, 2.0, 1.0, 3.0, 2.0, 1.0, 2.0, 2.0, 2.0, 4.0, 3.0, 3.0, 4.0, 3.0, 2.0, 3.0, 3.0, 1.0, 1.0, 2.0, 3.0, 4.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 3.0, 4.0, 4.0, 4.0, 3.0, 1.0, 2.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 4.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 4.0, 2.0, 3.0, 1.0, 2.0, 4.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 2.0, 1.0, 3.0, 1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 4.0, 3.0, 4.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 2.0, 4.0, 2.0, 2.0, 2.0, 2.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 1.0, 3.0, 4.0, 2.0, 1.0, 2.0, 2.0, 3.0, 4.0, 3.0, 3.0, 4.0, 3.0, 4.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 4.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0, 2.0, 4.0, 3.0, 2.0, 2.0, 2.0, 3.0, 4.0, 2.0, 2.0, 4.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 3.0, 3.0, 1.0, 4.0, 4.0, 3.0, 3.0, 4.0, 2.0, 4.0, 2.0, 2.0, 4.0, 2.0, 3.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 3.0, 1.0, 1.0, 4.0, 3.0, 3.0, 3.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 4.0, 4.0, 1.0, 4.0, 4.0, 2.0, 1.0, 3.0, 2.0, 2.0, 1.0, 4.0, 2.0, 1.0, 4.0, 3.0, 2.0, 2.0, 1.0, 3.0, 1.0, 1.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 1.0, 4.0, 3.0, 2.0, 3.0, 4.0, 3.0, 3.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 2.0, 3.0, 4.0, 2.0, 1.0, 2.0, 3.0, 1.0, 3.0, 3.0, 2.0, 4.0, 3.0, 3.0, 4.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 3.0, 3.0, 2.0, 3.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0, 3.0, 3.0, 4.0, 4.0, 3.0, 1.0, 4.0, 3.0, 1.0, 4.0, 3.0, 3.0, 3.0, 2.0, 4.0, 2.0, 4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 4.0, 4.0, 3.0, 3.0, 2.0, 2.0, 3.0, 1.0, 1.0, 2.0, 3.0, 4.0, 3.0, 2.0, 3.0, 3.0, 2.0, 3.0, 3.0, 3.0, 1.0, 1.0, 3.0, 3.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 2.0, 2.0, 3.0, 4.0, 2.0, 1.0, 4.0, 2.0, 1.0, 4.0, 3.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 4.0, 1.0, 3.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 4.0, 1.0, 1.0, 3.0, 4.0, 3.0, 1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 4.0, 1.0, 3.0, 2.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 4.0, 3.0, 3.0, 1.0, 3.0, 2.0, 1.0, 4.0, 1.0, 2.0, 2.0, 3.0, 1.0, 4.0, 3.0, 2.0, 4.0, 4.0, 1.0, 1.0, 2.0, 2.0, 3.0, 1.0, 4.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 4.0, 4.0, 3.0, 3.0, 3.0, 1.0, 1.0, 4.0, 1.0, 3.0, 3.0, 3.0, 2.0, 4.0, 2.0, 2.0, 1.0, 3.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 3.0, 2.0, 3.0, 4.0, 3.0, 1.0, 4.0, 2.0, 2.0, 4.0, 2.0, 2.0, 1.0, 1.0, 3.0, 3.0, 2.0, 4.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 3.0, 3.0, 3.0, 4.0, 2.0, 4.0, 2.0, 1.0, 3.0, 4.0, 3.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 1.0, 3.0, 3.0, 1.0, 2.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 4.0, 3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 2.0, 2.0, 4.0, 4.0, 3.0, 2.0, 2.0, 1.0, 3.0, 3.0, 2.0, 2.0, 4.0, 4.0, 3.0, 1.0, 4.0, 3.0, 1.0, 4.0, 2.0, 3.0, 4.0, 3.0, 3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 4.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 4.0, 4.0, 3.0, 3.0, 1.0, 1.0, 1.0, 2.0, 1.0, 3.0, 4.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 2.0, 2.0, 4.0, 3.0, 1.0, 2.0, 1.0, 1.0, 1.0, 3.0, 3.0, 1.0, 4.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 3.0, 1.0, 2.0, 4.0, 1.0, 3.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0, 2.0, 4.0, 3.0, 2.0, 3.0, 2.0, 4.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 1.0, 3.0, 2.0, 4.0, 2.0, 2.0, 1.0, 3.0, 4.0, 4.0, 2.0, 3.0, 3.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 4.0, 3.0, 3.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 2.0, 4.0, 4.0, 2.0, 2.0, 2.0, 3.0, 3.0, 1.0, 4.0, 1.0, 3.0, 3.0, 3.0, 4.0, 4.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 4.0, 1.0, 3.0, 2.0, 3.0, 4.0, 3.0, 3.0, 2.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 4.0, 2.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 1.0, 4.0, 2.0, 3.0, 2.0, 4.0, 4.0, 3.0, 1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 4.0, 3.0, 1.0, 4.0, 2.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 1.0, 1.0, 3.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 2.0, 2.0, 4.0, 1.0, 3.0, 1.0, 3.0, 2.0, 1.0, 3.0, 2.0, 4.0, 3.0, 1.0, 2.0, 2.0, 3.0, 1.0, 2.0, 4.0, 4.0, 2.0, 1.0, 4.0, 4.0, 3.0, 2.0, 4.0, 1.0, 3.0, 2.0, 4.0, 2.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 2.0, 2.0, 3.0, 3.0, 2.0, 3.0, 3.0, 2.0, 2.0, 1.0, 2.0, 3.0, 1.0, 3.0, 2.0, 3.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 4.0, 4.0, 1.0, 3.0, 1.0, 4.0, 3.0, 2.0, 4.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 1.0, 3.0, 4.0, 4.0, 3.0, 3.0, 4.0, 3.0, 3.0, 2.0, 2.0, 2.0, 4.0, 2.0, 2.0, 2.0, 1.0, 2.0, 4.0, 2.0, 2.0, 3.0, 2.0, 3.0, 1.0, 3.0, 3.0, 2.0, 3.0, 1.0, 1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 4.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 3.0, 4.0, 3.0, 3.0, 1.0, 2.0, 2.0, 4.0, 1.0, 3.0, 4.0, 4.0, 3.0, 2.0, 4.0, 1.0, 2.0, 4.0, 4.0, 4.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 1.0, 2.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 3.0, 2.0, 3.0, 3.0, 3.0, 3.0, 2.0, 1.0, 2.0, 4.0, 1.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 2.0, 3.0, 1.0, 2.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0, 2.0, 4.0, 4.0, 4.0, 2.0, 3.0, 2.0, 2.0, 1.0, 1.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 3.0, 2.0, 3.0, 2.0, 3.0, 4.0, 2.0, 1.0, 4.0, 3.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 2.0, 4.0, 4.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 1.0, 4.0, 2.0, 3.0, 3.0, 2.0, 1.0, 3.0, 2.0, 3.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 4.0, 2.0, 1.0, 2.0, 3.0, 3.0, 1.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 4.0, 3.0, 1.0, 2.0, 3.0, 4.0, 2.0, 2.0, 3.0, 1.0, 1.0, 2.0, 4.0, 2.0, 3.0, 2.0, 3.0, 1.0, 3.0, 3.0, 4.0, 2.0, 3.0, 3.0, 2.0, 3.0, 1.0, 3.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 1.0, 4.0, 2.0, 2.0, 1.0, 2.0, 3.0, 4.0, 4.0, 3.0, 3.0, 4.0, 4.0, 2.0, 3.0, 1.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0, 2.0, 1.0, 3.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 4.0, 2.0, 3.0, 3.0, 2.0, 2.0, 4.0, 4.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 1.0, 3.0, 2.0, 1.0, 3.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 1.0, 2.0, 3.0, 3.0, 2.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 3.0, 3.0, 2.0, 4.0, 1.0, 3.0, 3.0, 4.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 1.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 2.0, 3.0, 2.0, 4.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 1.0, 2.0, 4.0, 1.0, 1.0, 4.0, 2.0, 3.0, 3.0, 4.0, 2.0, 2.0, 1.0, 3.0, 3.0, 2.0, 2.0, 3.0, 4.0, 2.0, 1.0, 4.0, 4.0, 3.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 1.0, 4.0, 1.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0, 3.0, 2.0, 3.0, 1.0, 3.0, 4.0, 4.0, 3.0, 1.0, 2.0, 3.0, 3.0, 1.0, 2.0, 1.0, 2.0, 1.0, 1.0, 3.0, 3.0, 3.0, 2.0, 1.0, 1.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 4.0, 1.0, 4.0, 4.0, 2.0, 4.0, 3.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 4.0, 2.0, 4.0, 2.0, 4.0, 2.0, 2.0, 2.0, 2.0, 1.0, 4.0, 3.0, 3.0, 3.0, 4.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 4.0, 2.0, 2.0, 2.0, 1.0, 3.0, 3.0, 1.0, 1.0, 2.0, 1.0, 2.0, 3.0, 1.0, 4.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 4.0, 2.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 1.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 2.0, 3.0, 2.0, 1.0, 4.0, 4.0, 2.0, 2.0, 1.0, 4.0, 3.0, 1.0, 3.0, 4.0, 1.0, 2.0, 1.0, 3.0, 2.0, 1.0, 4.0, 2.0, 3.0, 3.0, 2.0, 3.0, 1.0, 4.0, 3.0, 4.0, 4.0, 2.0, 2.0, 2.0, 3.0, 1.0, 4.0, 2.0, 4.0, 3.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 1.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 4.0, 2.0, 3.0, 2.0, 4.0, 1.0, 1.0, 2.0, 4.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 4.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 4.0, 2.0, 4.0, 2.0, 3.0, 3.0, 2.0, 4.0, 2.0, 2.0, 2.0, 1.0, 2.0, 4.0, 2.0, 2.0, 2.0, 1.0, 2.0, 4.0, 2.0, 4.0, 3.0, 3.0, 2.0, 1.0, 2.0, 4.0, 4.0, 4.0, 2.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 3.0, 1.0, 2.0, 2.0, 1.0, 3.0, 1.0, 2.0, 2.0, 1.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 3.0, 3.0, 3.0, 4.0, 1.0, 1.0, 3.0, 1.0, 3.0, 2.0, 2.0, 1.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 1.0, 3.0, 3.0, 4.0, 3.0, 3.0, 4.0, 2.0, 4.0, 4.0, 2.0, 3.0, 3.0, 3.0, 3.0, 2.0, 1.0, 2.0, 2.0, 4.0, 1.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0, 1.0, 3.0, 3.0, 4.0, 4.0, 1.0, 3.0, 3.0, 4.0]
freq7 = {1.0: 145, 2.0: 370, 3.0: 410, 4.0: 457, 5.0: 518, 6.0: 461, 7.0: 239}
mec7 = [6.0, 6.0, 6.0, 4.0, 6.0, 3.0, 2.0, 6.0, 7.0, 5.0, 7.0, 5.0, 5.0, 3.0, 4.0, 4.0, 4.0, 3.0, 1.0, 4.0, 7.0, 3.0, 6.0, 7.0, 7.0, 4.0, 5.0, 3.0, 6.0, 5.0, 4.0, 4.0, 7.0, 3.0, 5.0, 4.0, 4.0, 6.0, 2.0, 6.0, 5.0, 5.0, 2.0, 4.0, 3.0, 3.0, 1.0, 1.0, 2.0, 3.0, 7.0, 3.0, 3.0, 6.0, 4.0, 6.0, 4.0, 5.0, 3.0, 6.0, 7.0, 4.0, 7.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 5.0, 5.0, 7.0, 7.0, 5.0, 2.0, 5.0, 4.0, 4.0, 7.0, 4.0, 3.0, 3.0, 4.0, 3.0, 4.0, 1.0, 2.0, 1.0, 5.0, 3.0, 4.0, 6.0, 6.0, 4.0, 5.0, 4.0, 6.0, 5.0, 4.0, 6.0, 3.0, 6.0, 3.0, 6.0, 2.0, 6.0, 6.0, 5.0, 4.0, 6.0, 2.0, 3.0, 4.0, 5.0, 2.0, 7.0, 1.0, 7.0, 4.0, 6.0, 6.0, 1.0, 5.0, 6.0, 5.0, 6.0, 4.0, 6.0, 5.0, 4.0, 4.0, 2.0, 6.0, 4.0, 3.0, 3.0, 5.0, 6.0, 7.0, 5.0, 2.0, 6.0, 3.0, 6.0, 6.0, 6.0, 2.0, 1.0, 3.0, 4.0, 5.0, 6.0, 5.0, 3.0, 5.0, 7.0, 7.0, 2.0, 2.0, 4.0, 7.0, 4.0, 6.0, 4.0, 5.0, 4.0, 2.0, 6.0, 2.0, 6.0, 6.0, 4.0, 7.0, 5.0, 4.0, 5.0, 2.0, 4.0, 6.0, 4.0, 5.0, 4.0, 3.0, 6.0, 6.0, 5.0, 3.0, 2.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 3.0, 6.0, 5.0, 5.0, 2.0, 5.0, 6.0, 5.0, 3.0, 5.0, 5.0, 6.0, 5.0, 6.0, 1.0, 6.0, 3.0, 2.0, 5.0, 2.0, 6.0, 2.0, 4.0, 2.0, 2.0, 4.0, 7.0, 5.0, 5.0, 6.0, 4.0, 6.0, 4.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 2.0, 3.0, 3.0, 4.0, 3.0, 6.0, 2.0, 5.0, 2.0, 5.0, 4.0, 5.0, 6.0, 1.0, 6.0, 5.0, 3.0, 5.0, 5.0, 2.0, 6.0, 2.0, 7.0, 4.0, 2.0, 3.0, 4.0, 5.0, 3.0, 5.0, 5.0, 3.0, 3.0, 5.0, 5.0, 4.0, 6.0, 1.0, 1.0, 2.0, 3.0, 3.0, 4.0, 2.0, 7.0, 4.0, 4.0, 1.0, 3.0, 7.0, 2.0, 6.0, 2.0, 2.0, 5.0, 7.0, 4.0, 4.0, 2.0, 1.0, 3.0, 5.0, 3.0, 6.0, 5.0, 1.0, 4.0, 4.0, 2.0, 3.0, 1.0, 6.0, 4.0, 2.0, 5.0, 3.0, 6.0, 6.0, 5.0, 7.0, 2.0, 3.0, 5.0, 5.0, 4.0, 5.0, 6.0, 2.0, 4.0, 5.0, 4.0, 6.0, 5.0, 4.0, 6.0, 5.0, 5.0, 6.0, 5.0, 4.0, 5.0, 2.0, 6.0, 4.0, 5.0, 7.0, 7.0, 7.0, 5.0, 3.0, 4.0, 6.0, 5.0, 3.0, 5.0, 3.0, 6.0, 4.0, 6.0, 4.0, 5.0, 7.0, 5.0, 3.0, 4.0, 5.0, 5.0, 2.0, 2.0, 5.0, 2.0, 1.0, 5.0, 4.0, 4.0, 6.0, 4.0, 1.0, 6.0, 2.0, 5.0, 4.0, 5.0, 7.0, 2.0, 7.0, 2.0, 5.0, 4.0, 4.0, 2.0, 5.0, 6.0, 4.0, 5.0, 5.0, 6.0, 6.0, 3.0, 5.0, 7.0, 6.0, 3.0, 5.0, 5.0, 3.0, 3.0, 7.0, 5.0, 4.0, 6.0, 2.0, 6.0, 6.0, 3.0, 2.0, 2.0, 6.0, 4.0, 2.0, 2.0, 5.0, 6.0, 6.0, 2.0, 3.0, 6.0, 4.0, 2.0, 7.0, 7.0, 4.0, 4.0, 7.0, 4.0, 7.0, 5.0, 6.0, 2.0, 4.0, 3.0, 1.0, 2.0, 3.0, 6.0, 5.0, 7.0, 5.0, 3.0, 3.0, 5.0, 6.0, 3.0, 3.0, 2.0, 4.0, 2.0, 6.0, 3.0, 3.0, 7.0, 2.0, 7.0, 7.0, 3.0, 5.0, 6.0, 3.0, 2.0, 6.0, 4.0, 7.0, 6.0, 5.0, 2.0, 4.0, 7.0, 3.0, 6.0, 2.0, 1.0, 5.0, 2.0, 4.0, 7.0, 4.0, 3.0, 2.0, 3.0, 4.0, 4.0, 5.0, 4.0, 4.0, 6.0, 2.0, 5.0, 3.0, 7.0, 2.0, 1.0, 6.0, 5.0, 1.0, 3.0, 2.0, 3.0, 3.0, 4.0, 3.0, 6.0, 2.0, 1.0, 7.0, 3.0, 1.0, 2.0, 5.0, 6.0, 5.0, 2.0, 2.0, 5.0, 6.0, 7.0, 5.0, 6.0, 5.0, 5.0, 4.0, 5.0, 4.0, 4.0, 4.0, 3.0, 2.0, 4.0, 6.0, 5.0, 7.0, 6.0, 7.0, 7.0, 6.0, 6.0, 3.0, 3.0, 4.0, 4.0, 6.0, 3.0, 5.0, 5.0, 6.0, 3.0, 5.0, 6.0, 5.0, 4.0, 6.0, 6.0, 3.0, 4.0, 4.0, 2.0, 5.0, 6.0, 3.0, 4.0, 5.0, 7.0, 1.0, 6.0, 3.0, 3.0, 3.0, 6.0, 3.0, 4.0, 7.0, 6.0, 7.0, 1.0, 5.0, 5.0, 2.0, 5.0, 7.0, 3.0, 2.0, 6.0, 6.0, 3.0, 7.0, 3.0, 5.0, 4.0, 1.0, 5.0, 3.0, 3.0, 2.0, 6.0, 5.0, 2.0, 5.0, 3.0, 2.0, 6.0, 6.0, 5.0, 1.0, 2.0, 4.0, 7.0, 3.0, 4.0, 2.0, 3.0, 2.0, 5.0, 4.0, 6.0, 5.0, 3.0, 3.0, 5.0, 2.0, 7.0, 5.0, 5.0, 2.0, 2.0, 5.0, 6.0, 2.0, 5.0, 7.0, 6.0, 5.0, 5.0, 4.0, 7.0, 7.0, 6.0, 5.0, 4.0, 7.0, 1.0, 4.0, 6.0, 2.0, 3.0, 2.0, 6.0, 5.0, 4.0, 6.0, 6.0, 5.0, 7.0, 3.0, 3.0, 5.0, 3.0, 5.0, 2.0, 7.0, 4.0, 5.0, 2.0, 3.0, 4.0, 5.0, 3.0, 6.0, 2.0, 3.0, 6.0, 3.0, 5.0, 7.0, 5.0, 6.0, 5.0, 3.0, 4.0, 7.0, 3.0, 3.0, 2.0, 2.0, 7.0, 4.0, 5.0, 5.0, 5.0, 4.0, 5.0, 7.0, 6.0, 5.0, 3.0, 7.0, 5.0, 6.0, 5.0, 6.0, 7.0, 5.0, 4.0, 3.0, 2.0, 5.0, 5.0, 3.0, 4.0, 7.0, 3.0, 2.0, 6.0, 3.0, 3.0, 1.0, 6.0, 2.0, 2.0, 5.0, 4.0, 5.0, 1.0, 5.0, 5.0, 4.0, 1.0, 4.0, 2.0, 5.0, 5.0, 1.0, 4.0, 6.0, 5.0, 4.0, 5.0, 5.0, 6.0, 3.0, 6.0, 5.0, 5.0, 2.0, 4.0, 3.0, 5.0, 7.0, 2.0, 4.0, 4.0, 6.0, 3.0, 3.0, 5.0, 6.0, 4.0, 4.0, 6.0, 5.0, 5.0, 5.0, 1.0, 5.0, 3.0, 6.0, 3.0, 4.0, 4.0, 1.0, 4.0, 6.0, 4.0, 2.0, 6.0, 6.0, 5.0, 3.0, 4.0, 5.0, 5.0, 4.0, 5.0, 2.0, 4.0, 6.0, 4.0, 1.0, 4.0, 6.0, 1.0, 5.0, 6.0, 5.0, 3.0, 5.0, 1.0, 4.0, 6.0, 6.0, 6.0, 4.0, 3.0, 3.0, 7.0, 4.0, 3.0, 4.0, 3.0, 3.0, 2.0, 6.0, 2.0, 5.0, 4.0, 6.0, 3.0, 3.0, 5.0, 3.0, 5.0, 4.0, 6.0, 3.0, 2.0, 5.0, 7.0, 3.0, 2.0, 3.0, 4.0, 3.0, 3.0, 5.0, 2.0, 5.0, 6.0, 7.0, 5.0, 3.0, 5.0, 6.0, 2.0, 7.0, 3.0, 1.0, 2.0, 5.0, 3.0, 5.0, 4.0, 4.0, 3.0, 4.0, 1.0, 3.0, 5.0, 3.0, 5.0, 1.0, 4.0, 2.0, 2.0, 3.0, 4.0, 5.0, 5.0, 6.0, 2.0, 5.0, 3.0, 5.0, 7.0, 5.0, 5.0, 5.0, 2.0, 1.0, 4.0, 3.0, 5.0, 4.0, 2.0, 6.0, 3.0, 6.0, 3.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 3.0, 7.0, 7.0, 4.0, 5.0, 2.0, 6.0, 3.0, 6.0, 5.0, 3.0, 3.0, 3.0, 7.0, 5.0, 3.0, 5.0, 6.0, 4.0, 6.0, 5.0, 1.0, 6.0, 4.0, 3.0, 5.0, 5.0, 2.0, 4.0, 5.0, 3.0, 3.0, 6.0, 7.0, 2.0, 7.0, 2.0, 2.0, 2.0, 5.0, 4.0, 1.0, 3.0, 4.0, 5.0, 4.0, 5.0, 3.0, 1.0, 6.0, 4.0, 6.0, 4.0, 5.0, 3.0, 4.0, 4.0, 2.0, 5.0, 2.0, 2.0, 4.0, 7.0, 1.0, 2.0, 5.0, 7.0, 3.0, 4.0, 6.0, 2.0, 7.0, 5.0, 6.0, 3.0, 2.0, 2.0, 5.0, 1.0, 6.0, 5.0, 5.0, 1.0, 3.0, 6.0, 1.0, 3.0, 4.0, 7.0, 1.0, 5.0, 2.0, 3.0, 5.0, 4.0, 6.0, 5.0, 5.0, 6.0, 6.0, 3.0, 4.0, 4.0, 5.0, 5.0, 3.0, 5.0, 4.0, 5.0, 2.0, 3.0, 6.0, 4.0, 4.0, 4.0, 2.0, 4.0, 6.0, 1.0, 3.0, 4.0, 3.0, 5.0, 2.0, 6.0, 6.0, 7.0, 5.0, 4.0, 4.0, 4.0, 6.0, 5.0, 4.0, 7.0, 7.0, 3.0, 4.0, 6.0, 6.0, 7.0, 5.0, 3.0, 1.0, 5.0, 6.0, 6.0, 6.0, 4.0, 7.0, 2.0, 3.0, 6.0, 5.0, 2.0, 2.0, 6.0, 6.0, 6.0, 3.0, 6.0, 4.0, 5.0, 7.0, 1.0, 3.0, 4.0, 6.0, 3.0, 5.0, 6.0, 4.0, 5.0, 7.0, 3.0, 7.0, 2.0, 6.0, 5.0, 7.0, 2.0, 4.0, 3.0, 1.0, 4.0, 7.0, 6.0, 4.0, 3.0, 5.0, 5.0, 2.0, 6.0, 5.0, 2.0, 4.0, 4.0, 2.0, 4.0, 4.0, 3.0, 2.0, 3.0, 3.0, 5.0, 7.0, 2.0, 7.0, 7.0, 7.0, 2.0, 3.0, 3.0, 4.0, 1.0, 4.0, 2.0, 1.0, 4.0, 6.0, 5.0, 4.0, 4.0, 7.0, 2.0, 3.0, 2.0, 2.0, 6.0, 6.0, 4.0, 4.0, 4.0, 6.0, 3.0, 3.0, 6.0, 4.0, 2.0, 6.0, 3.0, 3.0, 5.0, 1.0, 5.0, 4.0, 5.0, 6.0, 6.0, 5.0, 7.0, 3.0, 4.0, 6.0, 1.0, 4.0, 2.0, 3.0, 3.0, 5.0, 5.0, 5.0, 3.0, 5.0, 1.0, 2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 6.0, 7.0, 3.0, 5.0, 3.0, 5.0, 4.0, 3.0, 1.0, 2.0, 2.0, 5.0, 3.0, 5.0, 6.0, 2.0, 1.0, 5.0, 6.0, 5.0, 5.0, 6.0, 7.0, 5.0, 6.0, 4.0, 4.0, 5.0, 4.0, 6.0, 4.0, 4.0, 2.0, 5.0, 5.0, 3.0, 6.0, 3.0, 5.0, 4.0, 3.0, 5.0, 3.0, 7.0, 5.0, 5.0, 6.0, 4.0, 2.0, 3.0, 7.0, 5.0, 5.0, 6.0, 5.0, 1.0, 4.0, 2.0, 2.0, 5.0, 4.0, 3.0, 3.0, 3.0, 6.0, 3.0, 6.0, 6.0, 5.0, 3.0, 6.0, 6.0, 3.0, 1.0, 3.0, 2.0, 4.0, 1.0, 2.0, 5.0, 7.0, 6.0, 5.0, 7.0, 1.0, 2.0, 5.0, 4.0, 5.0, 6.0, 1.0, 4.0, 2.0, 3.0, 3.0, 4.0, 6.0, 5.0, 4.0, 6.0, 6.0, 2.0, 6.0, 2.0, 6.0, 5.0, 2.0, 2.0, 2.0, 4.0, 2.0, 3.0, 6.0, 2.0, 5.0, 2.0, 2.0, 7.0, 3.0, 4.0, 6.0, 7.0, 5.0, 2.0, 7.0, 3.0, 4.0, 2.0, 7.0, 5.0, 6.0, 4.0, 6.0, 5.0, 6.0, 7.0, 4.0, 3.0, 6.0, 6.0, 6.0, 5.0, 3.0, 5.0, 6.0, 3.0, 6.0, 2.0, 6.0, 3.0, 5.0, 6.0, 5.0, 3.0, 4.0, 6.0, 5.0, 6.0, 5.0, 4.0, 2.0, 6.0, 3.0, 3.0, 2.0, 4.0, 4.0, 3.0, 2.0, 4.0, 5.0, 2.0, 3.0, 7.0, 3.0, 6.0, 2.0, 5.0, 5.0, 2.0, 6.0, 2.0, 6.0, 5.0, 5.0, 2.0, 2.0, 7.0, 2.0, 7.0, 6.0, 2.0, 2.0, 3.0, 5.0, 5.0, 4.0, 4.0, 2.0, 6.0, 7.0, 4.0, 4.0, 3.0, 3.0, 3.0, 7.0, 7.0, 2.0, 2.0, 7.0, 3.0, 2.0, 2.0, 4.0, 3.0, 4.0, 6.0, 1.0, 6.0, 5.0, 4.0, 3.0, 1.0, 5.0, 7.0, 6.0, 5.0, 5.0, 4.0, 6.0, 3.0, 5.0, 4.0, 5.0, 3.0, 6.0, 5.0, 4.0, 3.0, 4.0, 6.0, 7.0, 4.0, 2.0, 6.0, 5.0, 5.0, 4.0, 1.0, 7.0, 4.0, 5.0, 4.0, 4.0, 7.0, 5.0, 2.0, 7.0, 7.0, 1.0, 5.0, 5.0, 4.0, 7.0, 6.0, 4.0, 4.0, 2.0, 3.0, 4.0, 3.0, 5.0, 4.0, 6.0, 2.0, 4.0, 5.0, 4.0, 4.0, 6.0, 6.0, 7.0, 4.0, 7.0, 4.0, 4.0, 6.0, 1.0, 2.0, 2.0, 5.0, 6.0, 2.0, 3.0, 4.0, 6.0, 4.0, 6.0, 7.0, 5.0, 5.0, 2.0, 4.0, 6.0, 6.0, 5.0, 5.0, 3.0, 1.0, 3.0, 6.0, 5.0, 5.0, 7.0, 1.0, 2.0, 4.0, 6.0, 3.0, 3.0, 4.0, 4.0, 4.0, 7.0, 4.0, 7.0, 4.0, 6.0, 3.0, 3.0, 2.0, 1.0, 2.0, 3.0, 7.0, 5.0, 2.0, 4.0, 2.0, 5.0, 4.0, 4.0, 3.0, 6.0, 6.0, 6.0, 5.0, 5.0, 4.0, 3.0, 3.0, 4.0, 5.0, 3.0, 4.0, 4.0, 2.0, 1.0, 4.0, 3.0, 1.0, 6.0, 5.0, 2.0, 6.0, 4.0, 4.0, 5.0, 2.0, 4.0, 2.0, 5.0, 5.0, 6.0, 2.0, 7.0, 4.0, 5.0, 5.0, 5.0, 6.0, 3.0, 4.0, 5.0, 1.0, 5.0, 2.0, 3.0, 5.0, 4.0, 3.0, 2.0, 4.0, 6.0, 5.0, 4.0, 4.0, 6.0, 6.0, 7.0, 2.0, 1.0, 6.0, 4.0, 5.0, 6.0, 4.0, 6.0, 7.0, 5.0, 3.0, 2.0, 5.0, 4.0, 4.0, 4.0, 5.0, 5.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 5.0, 3.0, 4.0, 5.0, 3.0, 5.0, 5.0, 2.0, 5.0, 4.0, 2.0, 1.0, 2.0, 4.0, 7.0, 4.0, 6.0, 6.0, 7.0, 5.0, 7.0, 5.0, 6.0, 3.0, 6.0, 5.0, 7.0, 5.0, 3.0, 1.0, 5.0, 5.0, 2.0, 6.0, 6.0, 6.0, 3.0, 5.0, 5.0, 6.0, 7.0, 3.0, 7.0, 5.0, 3.0, 2.0, 5.0, 5.0, 2.0, 6.0, 6.0, 6.0, 6.0, 5.0, 2.0, 2.0, 6.0, 5.0, 7.0, 2.0, 4.0, 6.0, 6.0, 3.0, 1.0, 3.0, 3.0, 3.0, 4.0, 7.0, 1.0, 4.0, 3.0, 4.0, 3.0, 3.0, 5.0, 5.0, 6.0, 2.0, 5.0, 2.0, 4.0, 7.0, 6.0, 7.0, 4.0, 2.0, 7.0, 2.0, 2.0, 2.0, 2.0, 6.0, 6.0, 7.0, 5.0, 2.0, 6.0, 5.0, 4.0, 1.0, 5.0, 3.0, 2.0, 6.0, 4.0, 7.0, 3.0, 2.0, 3.0, 6.0, 3.0, 3.0, 4.0, 4.0, 3.0, 1.0, 5.0, 3.0, 2.0, 7.0, 7.0, 3.0, 3.0, 6.0, 7.0, 6.0, 4.0, 5.0, 4.0, 4.0, 4.0, 5.0, 6.0, 3.0, 6.0, 5.0, 2.0, 1.0, 5.0, 6.0, 1.0, 3.0, 6.0, 1.0, 4.0, 3.0, 4.0, 5.0, 7.0, 5.0, 5.0, 1.0, 4.0, 5.0, 2.0, 5.0, 7.0, 6.0, 7.0, 3.0, 3.0, 6.0, 3.0, 6.0, 7.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 4.0, 6.0, 7.0, 3.0, 3.0, 5.0, 6.0, 7.0, 2.0, 3.0, 4.0, 2.0, 7.0, 1.0, 2.0, 6.0, 4.0, 3.0, 4.0, 4.0, 6.0, 6.0, 1.0, 5.0, 7.0, 2.0, 7.0, 3.0, 5.0, 5.0, 4.0, 7.0, 4.0, 7.0, 6.0, 5.0, 3.0, 2.0, 4.0, 5.0, 2.0, 2.0, 6.0, 4.0, 3.0, 5.0, 7.0, 4.0, 2.0, 2.0, 5.0, 3.0, 2.0, 2.0, 1.0, 4.0, 7.0, 4.0, 5.0, 6.0, 6.0, 3.0, 3.0, 5.0, 5.0, 4.0, 4.0, 5.0, 4.0, 7.0, 1.0, 2.0, 3.0, 6.0, 5.0, 2.0, 2.0, 2.0, 2.0, 5.0, 3.0, 3.0, 4.0, 5.0, 7.0, 6.0, 6.0, 5.0, 2.0, 5.0, 6.0, 6.0, 1.0, 5.0, 7.0, 7.0, 3.0, 3.0, 1.0, 4.0, 6.0, 3.0, 3.0, 6.0, 7.0, 4.0, 3.0, 2.0, 3.0, 1.0, 5.0, 2.0, 3.0, 1.0, 1.0, 5.0, 4.0, 6.0, 5.0, 4.0, 7.0, 2.0, 5.0, 6.0, 1.0, 5.0, 1.0, 1.0, 6.0, 2.0, 2.0, 5.0, 4.0, 3.0, 4.0, 6.0, 3.0, 1.0, 5.0, 7.0, 4.0, 6.0, 5.0, 5.0, 7.0, 5.0, 7.0, 4.0, 5.0, 5.0, 6.0, 5.0, 1.0, 5.0, 4.0, 7.0, 5.0, 7.0, 5.0, 2.0, 7.0, 7.0, 1.0, 6.0, 4.0, 7.0, 7.0, 4.0, 4.0, 4.0, 5.0, 5.0, 6.0, 7.0, 7.0, 6.0, 6.0, 6.0, 4.0, 2.0, 6.0, 5.0, 6.0, 3.0, 2.0, 2.0, 3.0, 5.0, 7.0, 2.0, 4.0, 7.0, 3.0, 4.0, 3.0, 4.0, 2.0, 4.0, 2.0, 6.0, 5.0, 2.0, 1.0, 4.0, 5.0, 2.0, 3.0, 7.0, 4.0, 5.0, 3.0, 4.0, 1.0, 4.0, 2.0, 3.0, 7.0, 2.0, 6.0, 2.0, 3.0, 4.0, 2.0, 5.0, 6.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 4.0, 3.0, 1.0, 6.0, 7.0, 5.0, 4.0, 2.0, 5.0, 2.0, 6.0, 4.0, 6.0, 2.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0, 2.0, 4.0, 2.0, 2.0, 1.0, 4.0, 6.0, 6.0, 5.0, 5.0, 6.0, 6.0, 3.0, 5.0, 7.0, 4.0, 6.0, 6.0, 1.0, 5.0, 1.0, 2.0, 5.0, 4.0, 5.0, 2.0, 4.0, 2.0, 4.0, 7.0, 2.0, 3.0, 2.0, 4.0, 7.0, 3.0, 3.0, 5.0, 4.0, 6.0, 6.0, 1.0, 2.0, 5.0, 6.0, 6.0, 6.0, 7.0, 2.0, 4.0, 6.0, 2.0, 4.0, 6.0, 7.0, 2.0, 3.0, 6.0, 6.0, 5.0, 2.0, 1.0, 3.0, 6.0, 3.0, 1.0, 5.0, 3.0, 4.0, 3.0, 3.0, 7.0, 4.0, 5.0, 2.0, 2.0, 2.0, 2.0, 5.0, 5.0, 3.0, 5.0, 4.0, 5.0, 2.0, 6.0, 7.0, 4.0, 5.0, 5.0, 4.0, 6.0, 4.0, 4.0, 6.0, 4.0, 5.0, 7.0, 2.0, 4.0, 5.0, 6.0, 6.0, 2.0, 2.0, 6.0, 7.0, 4.0, 3.0, 6.0, 6.0, 3.0, 7.0, 4.0, 5.0, 3.0, 5.0, 4.0, 3.0, 6.0, 7.0, 3.0, 3.0, 1.0, 6.0, 6.0, 7.0, 7.0, 5.0, 4.0, 5.0, 6.0, 4.0, 4.0, 3.0, 5.0, 2.0, 1.0, 5.0, 3.0, 3.0, 4.0, 4.0, 1.0, 3.0, 4.0, 5.0, 5.0, 1.0, 3.0, 2.0, 6.0, 5.0, 3.0, 3.0, 5.0, 4.0, 6.0, 2.0, 5.0, 5.0, 3.0, 6.0, 6.0, 7.0, 6.0, 6.0, 5.0, 3.0, 2.0, 2.0, 5.0, 3.0, 2.0, 5.0, 1.0, 4.0, 3.0, 5.0, 3.0, 4.0, 3.0, 7.0, 4.0, 1.0, 7.0, 3.0, 4.0, 6.0, 6.0, 7.0, 5.0, 5.0, 1.0, 2.0, 7.0, 3.0, 3.0, 6.0, 2.0, 5.0, 7.0, 2.0, 6.0, 5.0, 6.0, 7.0, 5.0, 7.0, 6.0, 3.0, 5.0, 3.0, 2.0, 5.0, 5.0, 7.0, 3.0, 5.0, 5.0, 7.0, 4.0, 2.0, 6.0, 4.0, 6.0, 6.0, 5.0, 5.0, 3.0, 3.0, 4.0, 2.0, 3.0, 4.0, 4.0, 5.0, 5.0, 2.0, 2.0, 4.0, 7.0, 5.0, 4.0, 6.0, 4.0, 5.0, 5.0, 4.0, 6.0, 4.0, 5.0, 3.0, 3.0, 2.0, 7.0, 1.0, 3.0, 6.0, 6.0, 3.0, 4.0, 2.0, 2.0, 4.0, 1.0, 5.0, 5.0, 7.0, 6.0, 6.0, 3.0, 3.0, 4.0, 4.0, 2.0, 7.0, 6.0, 2.0, 3.0, 6.0, 5.0, 2.0, 2.0, 5.0, 6.0, 4.0, 6.0, 7.0, 3.0, 6.0, 6.0, 4.0, 4.0, 4.0, 6.0, 4.0, 6.0, 6.0, 5.0, 1.0, 4.0, 3.0, 6.0, 5.0, 5.0, 5.0, 3.0, 1.0, 5.0, 7.0, 6.0, 4.0, 4.0, 5.0, 2.0, 3.0, 7.0, 7.0, 6.0, 6.0, 2.0, 7.0, 4.0, 5.0, 7.0, 2.0, 3.0, 2.0, 6.0, 1.0, 6.0, 1.0, 2.0, 3.0, 6.0, 7.0, 5.0, 4.0, 7.0, 7.0, 3.0, 2.0, 6.0, 5.0, 5.0, 2.0, 2.0, 6.0, 2.0, 7.0, 5.0, 6.0, 2.0, 6.0, 4.0, 3.0, 4.0, 5.0, 6.0, 4.0, 3.0, 1.0, 3.0, 2.0, 4.0, 2.0, 2.0, 4.0, 7.0, 4.0, 3.0, 3.0, 6.0, 5.0, 2.0, 7.0, 1.0, 5.0, 4.0, 3.0, 1.0, 2.0, 6.0, 7.0, 3.0, 6.0, 5.0, 3.0, 7.0, 7.0, 2.0, 2.0, 6.0, 2.0, 2.0, 3.0, 6.0, 6.0, 2.0, 6.0, 6.0, 6.0, 6.0, 6.0, 2.0, 6.0, 4.0, 6.0, 4.0, 7.0, 6.0, 2.0, 6.0, 2.0, 7.0, 4.0, 1.0, 6.0, 4.0, 5.0, 6.0, 7.0, 1.0, 6.0, 5.0, 6.0, 6.0, 5.0, 3.0, 4.0, 2.0, 3.0, 2.0, 6.0, 3.0, 4.0, 2.0, 6.0, 4.0, 5.0, 4.0, 3.0, 7.0, 6.0, 5.0, 6.0, 3.0, 1.0, 5.0, 4.0, 3.0, 6.0, 3.0, 6.0, 4.0, 2.0, 2.0, 3.0, 4.0, 6.0, 4.0, 2.0, 5.0, 2.0, 6.0, 2.0, 2.0, 3.0, 5.0, 2.0, 4.0, 2.0, 5.0, 2.0, 3.0, 7.0, 6.0, 4.0, 5.0, 4.0, 5.0, 1.0, 2.0, 7.0, 6.0, 4.0, 3.0, 4.0, 4.0, 4.0, 5.0, 6.0, 2.0, 6.0, 3.0, 2.0, 6.0, 6.0, 5.0, 5.0, 3.0, 6.0, 4.0, 4.0, 4.0, 3.0, 4.0, 4.0, 3.0, 5.0, 4.0, 6.0, 3.0, 2.0, 4.0, 6.0, 2.0, 3.0, 7.0, 1.0, 4.0, 5.0, 1.0, 6.0, 2.0, 4.0, 5.0, 1.0, 4.0, 7.0, 6.0, 6.0, 1.0, 2.0, 6.0, 3.0, 1.0, 4.0, 2.0, 6.0, 6.0, 4.0, 5.0, 4.0, 1.0, 6.0, 3.0, 4.0, 5.0, 2.0, 6.0, 2.0, 6.0, 5.0, 5.0, 5.0, 4.0, 5.0, 4.0, 5.0, 6.0, 5.0, 3.0, 3.0, 5.0, 5.0, 3.0, 3.0, 1.0, 7.0, 5.0, 4.0, 5.0, 7.0, 5.0, 1.0, 2.0, 2.0, 2.0, 6.0]
freq10 = {1.0: 98, 2.0: 197, 3.0: 247, 4.0: 288, 5.0: 285, 6.0: 285, 7.0: 367, 8.0: 323, 9.0: 358, 10.0: 152}
mec10 = [9.0, 6.0, 6.0, 5.0, 3.0, 3.0, 9.0, 5.0, 5.0, 7.0, 2.0, 8.0, 8.0, 2.0, 9.0, 5.0, 2.0, 9.0, 3.0, 7.0, 9.0, 6.0, 8.0, 8.0, 10.0, 4.0, 2.0, 6.0, 6.0, 4.0, 2.0, 5.0, 8.0, 5.0, 9.0, 7.0, 10.0, 6.0, 8.0, 8.0, 6.0, 7.0, 4.0, 2.0, 7.0, 4.0, 7.0, 6.0, 5.0, 3.0, 3.0, 7.0, 8.0, 5.0, 4.0, 5.0, 5.0, 2.0, 8.0, 10.0, 8.0, 8.0, 5.0, 9.0, 4.0, 6.0, 9.0, 4.0, 5.0, 8.0, 6.0, 3.0, 5.0, 3.0, 9.0, 6.0, 4.0, 3.0, 9.0, 2.0, 1.0, 8.0, 8.0, 4.0, 10.0, 6.0, 5.0, 3.0, 9.0, 1.0, 1.0, 5.0, 4.0, 6.0, 7.0, 6.0, 9.0, 2.0, 5.0, 2.0, 9.0, 9.0, 6.0, 9.0, 6.0, 8.0, 8.0, 2.0, 2.0, 9.0, 5.0, 2.0, 7.0, 9.0, 3.0, 8.0, 6.0, 5.0, 1.0, 3.0, 10.0, 5.0, 10.0, 7.0, 7.0, 8.0, 9.0, 2.0, 5.0, 5.0, 3.0, 8.0, 9.0, 9.0, 8.0, 7.0, 9.0, 6.0, 8.0, 3.0, 4.0, 9.0, 7.0, 2.0, 1.0, 6.0, 8.0, 4.0, 5.0, 10.0, 5.0, 3.0, 7.0, 6.0, 8.0, 1.0, 8.0, 7.0, 10.0, 6.0, 6.0, 7.0, 6.0, 6.0, 6.0, 3.0, 8.0, 7.0, 8.0, 7.0, 2.0, 6.0, 2.0, 8.0, 7.0, 5.0, 10.0, 6.0, 3.0, 6.0, 7.0, 7.0, 4.0, 5.0, 3.0, 4.0, 1.0, 5.0, 7.0, 7.0, 7.0, 6.0, 8.0, 8.0, 6.0, 3.0, 9.0, 4.0, 5.0, 8.0, 8.0, 7.0, 7.0, 9.0, 7.0, 7.0, 10.0, 5.0, 1.0, 10.0, 6.0, 3.0, 1.0, 5.0, 4.0, 5.0, 5.0, 9.0, 8.0, 5.0, 2.0, 8.0, 8.0, 5.0, 9.0, 4.0, 3.0, 7.0, 5.0, 6.0, 9.0, 9.0, 9.0, 4.0, 8.0, 5.0, 5.0, 6.0, 3.0, 9.0, 4.0, 5.0, 5.0, 9.0, 7.0, 8.0, 8.0, 7.0, 9.0, 4.0, 5.0, 3.0, 3.0, 1.0, 4.0, 2.0, 6.0, 8.0, 1.0, 9.0, 10.0, 9.0, 7.0, 8.0, 1.0, 10.0, 6.0, 8.0, 1.0, 7.0, 9.0, 10.0, 3.0, 3.0, 5.0, 5.0, 5.0, 6.0, 9.0, 7.0, 6.0, 10.0, 9.0, 8.0, 10.0, 8.0, 8.0, 9.0, 3.0, 6.0, 7.0, 9.0, 6.0, 4.0, 8.0, 2.0, 8.0, 8.0, 6.0, 9.0, 2.0, 2.0, 4.0, 3.0, 6.0, 6.0, 2.0, 7.0, 4.0, 7.0, 7.0, 7.0, 7.0, 2.0, 9.0, 7.0, 9.0, 9.0, 5.0, 9.0, 5.0, 5.0, 3.0, 2.0, 10.0, 1.0, 8.0, 4.0, 8.0, 2.0, 8.0, 4.0, 4.0, 4.0, 7.0, 8.0, 7.0, 5.0, 7.0, 3.0, 7.0, 8.0, 10.0, 4.0, 9.0, 1.0, 7.0, 8.0, 8.0, 7.0, 6.0, 4.0, 5.0, 3.0, 7.0, 3.0, 5.0, 8.0, 8.0, 8.0, 7.0, 3.0, 4.0, 7.0, 6.0, 6.0, 5.0, 9.0, 6.0, 4.0, 3.0, 9.0, 5.0, 7.0, 8.0, 9.0, 6.0, 8.0, 3.0, 10.0, 1.0, 3.0, 7.0, 6.0, 6.0, 6.0, 7.0, 5.0, 9.0, 9.0, 7.0, 4.0, 9.0, 7.0, 2.0, 7.0, 7.0, 3.0, 5.0, 7.0, 2.0, 9.0, 5.0, 5.0, 4.0, 9.0, 6.0, 3.0, 2.0, 4.0, 8.0, 5.0, 5.0, 2.0, 10.0, 8.0, 3.0, 8.0, 5.0, 1.0, 9.0, 6.0, 5.0, 6.0, 7.0, 3.0, 3.0, 3.0, 6.0, 4.0, 6.0, 10.0, 5.0, 7.0, 3.0, 6.0, 8.0, 10.0, 3.0, 8.0, 6.0, 5.0, 6.0, 2.0, 8.0, 4.0, 8.0, 10.0, 4.0, 1.0, 6.0, 6.0, 10.0, 8.0, 3.0, 3.0, 10.0, 8.0, 5.0, 4.0, 5.0, 8.0, 4.0, 1.0, 1.0, 8.0, 8.0, 7.0, 10.0, 8.0, 6.0, 5.0, 10.0, 7.0, 9.0, 10.0, 7.0, 6.0, 3.0, 7.0, 2.0, 6.0, 9.0, 4.0, 1.0, 5.0, 7.0, 5.0, 9.0, 10.0, 5.0, 9.0, 3.0, 8.0, 6.0, 4.0, 6.0, 5.0, 2.0, 9.0, 5.0, 5.0, 2.0, 9.0, 7.0, 10.0, 5.0, 5.0, 7.0, 9.0, 6.0, 9.0, 5.0, 8.0, 7.0, 8.0, 4.0, 5.0, 9.0, 1.0, 7.0, 7.0, 9.0, 6.0, 9.0, 6.0, 9.0, 1.0, 6.0, 3.0, 8.0, 4.0, 6.0, 7.0, 4.0, 6.0, 9.0, 8.0, 7.0, 6.0, 9.0, 8.0, 4.0, 7.0, 6.0, 7.0, 6.0, 9.0, 3.0, 7.0, 6.0, 8.0, 2.0, 1.0, 5.0, 9.0, 2.0, 4.0, 10.0, 6.0, 8.0, 10.0, 1.0, 7.0, 2.0, 10.0, 4.0, 4.0, 3.0, 9.0, 10.0, 9.0, 6.0, 4.0, 8.0, 4.0, 5.0, 10.0, 3.0, 1.0, 4.0, 7.0, 5.0, 6.0, 2.0, 5.0, 8.0, 7.0, 7.0, 8.0, 5.0, 3.0, 7.0, 8.0, 10.0, 5.0, 7.0, 3.0, 9.0, 5.0, 6.0, 10.0, 7.0, 7.0, 10.0, 1.0, 8.0, 3.0, 9.0, 8.0, 4.0, 10.0, 8.0, 10.0, 8.0, 6.0, 3.0, 9.0, 4.0, 2.0, 1.0, 8.0, 3.0, 7.0, 8.0, 3.0, 7.0, 10.0, 1.0, 9.0, 6.0, 2.0, 10.0, 3.0, 4.0, 10.0, 10.0, 7.0, 7.0, 3.0, 7.0, 6.0, 10.0, 10.0, 8.0, 6.0, 5.0, 6.0, 2.0, 9.0, 8.0, 3.0, 9.0, 8.0, 5.0, 6.0, 7.0, 9.0, 8.0, 2.0, 6.0, 9.0, 10.0, 7.0, 6.0, 3.0, 8.0, 10.0, 8.0, 2.0, 4.0, 4.0, 4.0, 5.0, 1.0, 8.0, 3.0, 5.0, 10.0, 10.0, 8.0, 3.0, 6.0, 9.0, 9.0, 2.0, 2.0, 8.0, 8.0, 5.0, 8.0, 8.0, 5.0, 5.0, 4.0, 4.0, 4.0, 8.0, 10.0, 8.0, 1.0, 7.0, 2.0, 3.0, 7.0, 7.0, 7.0, 8.0, 10.0, 6.0, 6.0, 7.0, 9.0, 8.0, 7.0, 10.0, 9.0, 5.0, 3.0, 9.0, 9.0, 8.0, 6.0, 9.0, 9.0, 4.0, 4.0, 6.0, 3.0, 2.0, 3.0, 4.0, 6.0, 4.0, 8.0, 6.0, 1.0, 4.0, 9.0, 3.0, 9.0, 8.0, 4.0, 7.0, 5.0, 9.0, 9.0, 4.0, 7.0, 3.0, 1.0, 9.0, 10.0, 3.0, 5.0, 1.0, 3.0, 3.0, 5.0, 2.0, 4.0, 4.0, 4.0, 7.0, 8.0, 8.0, 9.0, 3.0, 9.0, 1.0, 10.0, 8.0, 10.0, 8.0, 8.0, 1.0, 5.0, 3.0, 3.0, 1.0, 8.0, 4.0, 5.0, 9.0, 7.0, 2.0, 3.0, 6.0, 5.0, 4.0, 2.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 6.0, 8.0, 10.0, 6.0, 7.0, 7.0, 2.0, 1.0, 5.0, 5.0, 10.0, 3.0, 6.0, 5.0, 8.0, 8.0, 6.0, 8.0, 10.0, 2.0, 7.0, 9.0, 7.0, 7.0, 4.0, 3.0, 7.0, 6.0, 1.0, 10.0, 4.0, 4.0, 8.0, 3.0, 8.0, 9.0, 3.0, 7.0, 9.0, 8.0, 5.0, 7.0, 4.0, 7.0, 2.0, 4.0, 5.0, 4.0, 6.0, 10.0, 2.0, 6.0, 8.0, 1.0, 5.0, 5.0, 7.0, 1.0, 4.0, 4.0, 4.0, 6.0, 7.0, 6.0, 8.0, 9.0, 8.0, 6.0, 6.0, 1.0, 5.0, 9.0, 9.0, 4.0, 8.0, 6.0, 4.0, 4.0, 7.0, 9.0, 4.0, 10.0, 6.0, 5.0, 7.0, 10.0, 4.0, 6.0, 7.0, 8.0, 4.0, 7.0, 5.0, 5.0, 2.0, 4.0, 7.0, 1.0, 7.0, 1.0, 9.0, 7.0, 9.0, 3.0, 4.0, 10.0, 7.0, 9.0, 2.0, 9.0, 5.0, 5.0, 3.0, 8.0, 2.0, 9.0, 8.0, 8.0, 8.0, 9.0, 3.0, 7.0, 9.0, 3.0, 9.0, 7.0, 2.0, 5.0, 3.0, 3.0, 8.0, 4.0, 9.0, 7.0, 4.0, 4.0, 9.0, 7.0, 2.0, 2.0, 4.0, 5.0, 8.0, 7.0, 2.0, 5.0, 5.0, 6.0, 9.0, 10.0, 6.0, 5.0, 6.0, 1.0, 8.0, 9.0, 10.0, 4.0, 9.0, 7.0, 5.0, 9.0, 7.0, 3.0, 2.0, 5.0, 5.0, 1.0, 7.0, 4.0, 4.0, 5.0, 7.0, 8.0, 7.0, 4.0, 3.0, 7.0, 8.0, 4.0, 8.0, 9.0, 9.0, 9.0, 6.0, 9.0, 6.0, 6.0, 8.0, 2.0, 9.0, 1.0, 7.0, 4.0, 6.0, 3.0, 4.0, 5.0, 8.0, 4.0, 6.0, 6.0, 6.0, 2.0, 4.0, 7.0, 3.0, 8.0, 3.0, 2.0, 10.0, 9.0, 10.0, 10.0, 3.0, 7.0, 1.0, 2.0, 1.0, 2.0, 4.0, 1.0, 6.0, 1.0, 1.0, 7.0, 10.0, 8.0, 8.0, 3.0, 6.0, 1.0, 4.0, 9.0, 4.0, 6.0, 1.0, 10.0, 8.0, 10.0, 9.0, 9.0, 5.0, 4.0, 5.0, 9.0, 7.0, 4.0, 8.0, 8.0, 7.0, 5.0, 8.0, 8.0, 9.0, 5.0, 1.0, 7.0, 10.0, 6.0, 2.0, 7.0, 8.0, 8.0, 3.0, 7.0, 7.0, 5.0, 9.0, 6.0, 8.0, 1.0, 7.0, 2.0, 9.0, 2.0, 8.0, 1.0, 8.0, 2.0, 7.0, 2.0, 4.0, 6.0, 9.0, 4.0, 3.0, 9.0, 5.0, 7.0, 9.0, 7.0, 6.0, 8.0, 7.0, 8.0, 9.0, 8.0, 4.0, 7.0, 5.0, 9.0, 4.0, 2.0, 8.0, 9.0, 6.0, 1.0, 5.0, 9.0, 7.0, 8.0, 8.0, 8.0, 8.0, 5.0, 6.0, 2.0, 7.0, 9.0, 9.0, 4.0, 5.0, 7.0, 7.0, 4.0, 5.0, 2.0, 3.0, 9.0, 2.0, 7.0, 2.0, 5.0, 4.0, 4.0, 5.0, 4.0, 7.0, 9.0, 6.0, 7.0, 6.0, 5.0, 6.0, 7.0, 7.0, 8.0, 4.0, 6.0, 3.0, 3.0, 6.0, 4.0, 6.0, 3.0, 5.0, 7.0, 8.0, 2.0, 3.0, 5.0, 9.0, 7.0, 7.0, 9.0, 7.0, 8.0, 5.0, 4.0, 4.0, 9.0, 7.0, 10.0, 8.0, 3.0, 2.0, 4.0, 5.0, 3.0, 6.0, 5.0, 9.0, 6.0, 3.0, 9.0, 5.0, 4.0, 6.0, 3.0, 5.0, 7.0, 9.0, 2.0, 6.0, 6.0, 2.0, 7.0, 9.0, 2.0, 7.0, 10.0, 9.0, 7.0, 5.0, 8.0, 5.0, 9.0, 3.0, 9.0, 9.0, 5.0, 6.0, 7.0, 6.0, 6.0, 6.0, 5.0, 3.0, 3.0, 5.0, 5.0, 5.0, 9.0, 7.0, 5.0, 6.0, 8.0, 5.0, 2.0, 3.0, 6.0, 9.0, 6.0, 4.0, 4.0, 7.0, 8.0, 5.0, 8.0, 7.0, 9.0, 9.0, 2.0, 7.0, 7.0, 7.0, 3.0, 1.0, 4.0, 6.0, 4.0, 10.0, 8.0, 10.0, 6.0, 3.0, 6.0, 5.0, 2.0, 1.0, 8.0, 3.0, 5.0, 4.0, 4.0, 8.0, 3.0, 2.0, 5.0, 9.0, 8.0, 4.0, 4.0, 8.0, 9.0, 9.0, 5.0, 5.0, 7.0, 2.0, 4.0, 9.0, 7.0, 6.0, 3.0, 9.0, 8.0, 10.0, 5.0, 4.0, 1.0, 9.0, 7.0, 3.0, 6.0, 7.0, 5.0, 6.0, 9.0, 7.0, 5.0, 9.0, 6.0, 4.0, 7.0, 9.0, 9.0, 9.0, 8.0, 7.0, 6.0, 3.0, 7.0, 9.0, 5.0, 7.0, 7.0, 9.0, 8.0, 3.0, 10.0, 1.0, 9.0, 7.0, 5.0, 5.0, 6.0, 10.0, 9.0, 9.0, 8.0, 2.0, 6.0, 6.0, 3.0, 9.0, 7.0, 2.0, 6.0, 4.0, 10.0, 3.0, 2.0, 9.0, 8.0, 3.0, 5.0, 9.0, 6.0, 7.0, 8.0, 7.0, 3.0, 4.0, 5.0, 5.0, 8.0, 9.0, 7.0, 4.0, 9.0, 8.0, 9.0, 5.0, 3.0, 3.0, 6.0, 3.0, 3.0, 6.0, 2.0, 9.0, 6.0, 6.0, 9.0, 3.0, 7.0, 9.0, 4.0, 7.0, 9.0, 9.0, 3.0, 2.0, 2.0, 7.0, 6.0, 2.0, 6.0, 3.0, 7.0, 3.0, 2.0, 9.0, 9.0, 7.0, 7.0, 9.0, 7.0, 3.0, 7.0, 2.0, 3.0, 4.0, 9.0, 7.0, 6.0, 5.0, 7.0, 2.0, 8.0, 7.0, 5.0, 1.0, 3.0, 3.0, 3.0, 7.0, 4.0, 7.0, 9.0, 8.0, 7.0, 1.0, 2.0, 9.0, 3.0, 2.0, 3.0, 4.0, 9.0, 1.0, 3.0, 9.0, 5.0, 8.0, 7.0, 5.0, 2.0, 2.0, 5.0, 2.0, 9.0, 7.0, 3.0, 9.0, 9.0, 8.0, 1.0, 4.0, 6.0, 7.0, 7.0, 5.0, 3.0, 8.0, 6.0, 6.0, 7.0, 6.0, 8.0, 7.0, 9.0, 8.0, 8.0, 10.0, 6.0, 4.0, 10.0, 7.0, 4.0, 9.0, 8.0, 9.0, 9.0, 5.0, 7.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 3.0, 10.0, 3.0, 7.0, 5.0, 4.0, 9.0, 10.0, 2.0, 4.0, 7.0, 8.0, 4.0, 3.0, 8.0, 5.0, 8.0, 7.0, 3.0, 9.0, 9.0, 7.0, 6.0, 6.0, 4.0, 10.0, 8.0, 5.0, 1.0, 2.0, 1.0, 4.0, 5.0, 3.0, 7.0, 4.0, 3.0, 7.0, 9.0, 4.0, 6.0, 4.0, 5.0, 8.0, 8.0, 4.0, 9.0, 7.0, 6.0, 4.0, 9.0, 7.0, 7.0, 8.0, 8.0, 8.0, 7.0, 8.0, 2.0, 7.0, 10.0, 7.0, 7.0, 8.0, 4.0, 4.0, 10.0, 4.0, 9.0, 4.0, 8.0, 6.0, 3.0, 9.0, 5.0, 8.0, 4.0, 3.0, 6.0, 4.0, 10.0, 5.0, 5.0, 3.0, 4.0, 2.0, 4.0, 3.0, 6.0, 6.0, 2.0, 8.0, 6.0, 5.0, 5.0, 1.0, 5.0, 9.0, 9.0, 4.0, 7.0, 1.0, 7.0, 4.0, 8.0, 5.0, 8.0, 10.0, 4.0, 8.0, 6.0, 9.0, 3.0, 4.0, 9.0, 7.0, 7.0, 7.0, 10.0, 8.0, 8.0, 3.0, 8.0, 4.0, 4.0, 3.0, 10.0, 6.0, 5.0, 9.0, 7.0, 6.0, 9.0, 2.0, 3.0, 6.0, 6.0, 8.0, 2.0, 5.0, 7.0, 3.0, 7.0, 4.0, 7.0, 4.0, 7.0, 8.0, 6.0, 5.0, 7.0, 9.0, 7.0, 6.0, 3.0, 3.0, 4.0, 10.0, 5.0, 6.0, 3.0, 10.0, 1.0, 2.0, 7.0, 5.0, 6.0, 8.0, 9.0, 9.0, 4.0, 4.0, 4.0, 4.0, 2.0, 4.0, 9.0, 6.0, 4.0, 2.0, 5.0, 6.0, 3.0, 9.0, 3.0, 5.0, 6.0, 10.0, 3.0, 10.0, 4.0, 3.0, 1.0, 2.0, 5.0, 7.0, 7.0, 7.0, 7.0, 5.0, 6.0, 2.0, 2.0, 9.0, 4.0, 2.0, 9.0, 4.0, 9.0, 2.0, 2.0, 3.0, 3.0, 3.0, 8.0, 5.0, 8.0, 9.0, 10.0, 7.0, 6.0, 2.0, 10.0, 8.0, 7.0, 7.0, 4.0, 3.0, 3.0, 7.0, 5.0, 8.0, 5.0, 8.0, 7.0, 9.0, 7.0, 4.0, 8.0, 8.0, 3.0, 10.0, 6.0, 6.0, 8.0, 3.0, 1.0, 5.0, 5.0, 10.0, 7.0, 4.0, 9.0, 4.0, 10.0, 10.0, 2.0, 8.0, 5.0, 6.0, 9.0, 3.0, 4.0, 8.0, 2.0, 9.0, 6.0, 8.0, 8.0, 3.0, 10.0, 8.0, 8.0, 8.0, 10.0, 5.0, 8.0, 8.0, 6.0, 4.0, 9.0, 4.0, 7.0, 4.0, 9.0, 9.0, 5.0, 5.0, 4.0, 9.0, 1.0, 7.0, 8.0, 7.0, 6.0, 4.0, 10.0, 9.0, 2.0, 4.0, 2.0, 2.0, 3.0, 4.0, 6.0, 2.0, 9.0, 8.0, 7.0, 1.0, 4.0, 2.0, 7.0, 9.0, 8.0, 9.0, 3.0, 9.0, 3.0, 10.0, 5.0, 6.0, 7.0, 8.0, 6.0, 7.0, 8.0, 9.0, 6.0, 9.0, 6.0, 6.0, 5.0, 3.0, 7.0, 9.0, 8.0, 9.0, 2.0, 8.0, 9.0, 9.0, 8.0, 6.0, 3.0, 4.0, 6.0, 3.0, 2.0, 4.0, 10.0, 8.0, 7.0, 2.0, 7.0, 4.0, 8.0, 2.0, 10.0, 5.0, 6.0, 5.0, 2.0, 7.0, 5.0, 4.0, 7.0, 10.0, 3.0, 3.0, 5.0, 9.0, 6.0, 10.0, 10.0, 9.0, 9.0, 3.0, 1.0, 5.0, 2.0, 5.0, 6.0, 9.0, 4.0, 4.0, 6.0, 7.0, 6.0, 5.0, 7.0, 8.0, 9.0, 7.0, 9.0, 8.0, 4.0, 3.0, 9.0, 5.0, 5.0, 8.0, 10.0, 3.0, 2.0, 9.0, 1.0, 4.0, 1.0, 2.0, 2.0, 9.0, 8.0, 6.0, 2.0, 5.0, 7.0, 5.0, 9.0, 2.0, 7.0, 2.0, 8.0, 9.0, 3.0, 2.0, 7.0, 7.0, 2.0, 2.0, 7.0, 6.0, 8.0, 2.0, 4.0, 10.0, 4.0, 7.0, 8.0, 3.0, 9.0, 3.0, 5.0, 2.0, 6.0, 7.0, 6.0, 4.0, 8.0, 6.0, 7.0, 3.0, 3.0, 7.0, 5.0, 2.0, 3.0, 6.0, 9.0, 9.0, 9.0, 9.0, 7.0, 2.0, 7.0, 7.0, 8.0, 7.0, 5.0, 7.0, 6.0, 5.0, 5.0, 9.0, 3.0, 9.0, 7.0, 5.0, 8.0, 5.0, 9.0, 5.0, 6.0, 2.0, 8.0, 1.0, 7.0, 2.0, 3.0, 8.0, 7.0, 8.0, 2.0, 7.0, 8.0, 9.0, 9.0, 6.0, 4.0, 4.0, 7.0, 9.0, 9.0, 4.0, 5.0, 1.0, 9.0, 4.0, 3.0, 6.0, 3.0, 4.0, 1.0, 4.0, 8.0, 2.0, 2.0, 10.0, 4.0, 9.0, 8.0, 7.0, 7.0, 9.0, 7.0, 10.0, 8.0, 4.0, 7.0, 4.0, 7.0, 9.0, 2.0, 2.0, 5.0, 6.0, 10.0, 7.0, 10.0, 4.0, 6.0, 6.0, 5.0, 9.0, 8.0, 8.0, 7.0, 9.0, 8.0, 2.0, 6.0, 10.0, 9.0, 8.0, 9.0, 9.0, 5.0, 2.0, 3.0, 8.0, 8.0, 2.0, 10.0, 1.0, 7.0, 9.0, 6.0, 9.0, 5.0, 10.0, 2.0, 10.0, 7.0, 6.0, 7.0, 4.0, 4.0, 10.0, 5.0, 5.0, 7.0, 9.0, 7.0, 4.0, 10.0, 10.0, 5.0, 9.0, 10.0, 4.0, 7.0, 5.0, 5.0, 5.0, 1.0, 9.0, 4.0, 2.0, 4.0, 4.0, 2.0, 2.0, 7.0, 7.0, 3.0, 5.0, 9.0, 2.0, 10.0, 4.0, 5.0, 10.0, 8.0, 3.0, 7.0, 10.0, 9.0, 7.0, 6.0, 5.0, 7.0, 3.0, 8.0, 5.0, 6.0, 4.0, 4.0, 2.0, 4.0, 3.0, 9.0, 1.0, 8.0, 7.0, 7.0, 9.0, 8.0, 2.0, 4.0, 8.0, 7.0, 8.0, 10.0, 8.0, 7.0, 9.0, 9.0, 3.0, 4.0, 10.0, 5.0, 2.0, 5.0, 8.0, 9.0, 6.0, 10.0, 7.0, 3.0, 5.0, 9.0, 8.0, 6.0, 9.0, 9.0, 2.0, 4.0, 8.0, 8.0, 3.0, 9.0, 6.0, 6.0, 7.0, 2.0, 9.0, 2.0, 10.0, 7.0, 7.0, 7.0, 4.0, 9.0, 9.0, 3.0, 8.0, 3.0, 4.0, 1.0, 9.0, 3.0, 8.0, 9.0, 3.0, 3.0, 1.0, 9.0, 9.0, 4.0, 3.0, 7.0, 6.0, 9.0, 7.0, 6.0, 4.0, 9.0, 4.0, 5.0, 2.0, 3.0, 4.0, 9.0, 9.0, 7.0, 7.0, 4.0, 6.0, 8.0, 10.0, 7.0, 8.0, 7.0, 4.0, 2.0, 7.0, 8.0, 9.0, 8.0, 4.0, 8.0, 8.0, 5.0, 7.0, 6.0, 5.0, 4.0, 9.0, 7.0, 8.0, 4.0, 4.0, 6.0, 8.0, 7.0, 9.0, 5.0, 5.0, 7.0, 9.0, 10.0, 2.0, 1.0, 10.0, 2.0, 5.0, 9.0, 1.0, 4.0, 3.0, 9.0, 5.0, 9.0, 9.0, 1.0, 10.0, 9.0, 9.0, 10.0, 1.0, 4.0, 4.0, 3.0, 8.0, 3.0, 4.0, 9.0, 2.0, 6.0, 9.0, 4.0, 6.0, 7.0, 8.0, 4.0, 7.0, 9.0, 10.0, 3.0, 9.0, 2.0, 5.0, 2.0, 3.0, 4.0, 7.0, 5.0, 6.0, 9.0, 6.0, 9.0, 6.0, 8.0, 9.0, 8.0, 5.0, 2.0, 7.0, 3.0, 7.0, 10.0, 5.0, 2.0, 6.0, 9.0, 9.0, 4.0, 3.0, 5.0, 5.0, 6.0, 3.0, 9.0, 10.0, 6.0, 9.0, 3.0, 3.0, 3.0, 5.0, 5.0, 4.0, 7.0, 8.0, 3.0, 7.0, 6.0, 9.0, 6.0, 10.0, 5.0, 7.0, 9.0, 8.0, 5.0, 6.0, 8.0, 4.0, 6.0, 6.0, 9.0, 6.0, 4.0, 8.0, 5.0, 10.0, 2.0, 4.0, 4.0, 7.0, 6.0, 8.0, 6.0, 3.0, 7.0, 7.0, 7.0, 3.0, 4.0, 5.0, 9.0, 4.0, 6.0, 9.0, 1.0, 9.0, 6.0, 5.0, 5.0, 2.0, 2.0, 6.0, 3.0, 9.0, 6.0, 4.0, 8.0, 9.0, 5.0, 3.0, 8.0, 4.0, 8.0, 1.0, 5.0, 9.0, 6.0, 2.0, 3.0, 6.0, 3.0, 9.0, 10.0, 4.0, 7.0, 5.0, 5.0, 8.0, 9.0, 6.0, 4.0, 4.0, 9.0, 9.0, 2.0, 7.0, 1.0, 9.0, 5.0, 9.0, 6.0, 2.0, 2.0, 4.0, 1.0, 8.0, 3.0, 9.0, 8.0, 2.0, 7.0, 5.0, 7.0, 2.0, 5.0, 6.0, 9.0, 7.0, 7.0, 5.0, 4.0, 3.0, 8.0, 6.0, 9.0, 10.0, 4.0, 1.0, 9.0, 3.0, 7.0, 7.0, 3.0, 6.0, 10.0, 9.0, 5.0, 9.0, 3.0, 4.0, 5.0, 4.0, 8.0, 7.0, 9.0, 8.0, 7.0, 5.0, 1.0, 8.0, 6.0, 2.0, 4.0, 8.0, 8.0, 5.0, 3.0, 3.0, 2.0, 7.0, 3.0, 3.0, 9.0, 5.0, 6.0, 9.0, 8.0, 2.0, 9.0, 5.0, 7.0, 7.0, 6.0, 7.0, 5.0, 7.0, 4.0, 9.0, 3.0, 7.0, 8.0, 6.0, 3.0, 6.0, 4.0, 7.0, 4.0, 3.0, 9.0, 10.0, 7.0, 10.0, 5.0, 8.0, 9.0, 2.0, 3.0, 8.0, 8.0, 9.0, 3.0, 9.0, 5.0, 4.0, 6.0, 7.0, 10.0, 5.0, 8.0, 8.0, 7.0, 1.0, 4.0, 4.0, 10.0, 7.0, 2.0, 4.0, 3.0, 4.0, 4.0, 8.0, 8.0, 3.0, 3.0, 6.0, 2.0, 9.0, 4.0, 6.0, 6.0, 1.0, 8.0, 5.0, 8.0, 5.0, 2.0, 4.0, 9.0, 6.0, 6.0, 3.0, 4.0, 3.0, 7.0, 7.0, 9.0, 5.0, 3.0, 5.0, 5.0, 10.0, 2.0, 4.0, 8.0, 8.0, 7.0, 8.0, 10.0, 7.0, 9.0, 8.0, 6.0, 8.0, 9.0, 4.0, 4.0, 3.0, 2.0, 4.0, 4.0, 4.0, 10.0, 7.0, 8.0, 9.0, 6.0, 4.0, 7.0, 2.0, 7.0, 7.0, 6.0, 9.0, 4.0, 7.0, 3.0, 9.0, 6.0, 7.0, 6.0, 3.0, 2.0, 6.0, 8.0, 7.0, 4.0, 5.0, 2.0]
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
return truncnorm(
(low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
# y = get_truncated_normal(mean=2,
# sd=3,
# low=1,
# upp=4).rvs(no_of_req)
#
# # print(y)
#
# y = [round(i) for i in y]
# freq = {i:y.count(i) for i in set(y)}
a, b = list(freq4.keys()), list(freq4.values())
ax1.bar(a, b, label='4 MEC', color='g', alpha=0.3)
ax1.plot([1]+a+[4], [0]+b+[0], 'g-.^', lw=2, alpha=0.6)
#ax1.legend()
ax1.set_ylabel('No of Requests', fontdict={'weight': 'medium', 'size': 14})
ax1.set_xlabel('No of MECs', fontdict={'weight': 'medium', 'size': 14})
ax1.set_xticks(np.arange(min(mec4), max(mec4) + 1,1))
ax1.set_title("Distribution for 4 MECs", fontdict={'weight': 'bold', 'size': 17})
#ax2.set_xticks([min(y) - 1, max(y) + 1])
#snc.kdeplot(y, label='sd=1')
print('freq4 =',freq4)
print(f'mec4 = {mec4}')
#print(y)
# z = get_truncated_normal(mean=5,
# sd=4,
# low=1,
# upp=7).rvs(no_of_req)
# #snc.kdeplot(z, label='sd=2')
# z = [round(i) for i in z]
# freq = {i:z.count(i) for i in set(z)}
# print('freq7 =',freq)
# print(f'mec7 = {z}')
x, y = list(freq7.keys()), list(freq7.values())
ax2.bar(x, y, width=.7, color='r', alpha=0.3)
ax2.plot([1]+x+[7], [0]+y+[0], 'r-.o', lw=2, alpha=0.6)
#ax2.legend()
ax2.set_ylabel('No of Requests', fontdict={'weight': 'medium', 'size': 14})
ax2.set_xlabel('No of MECs', fontdict={'weight': 'medium', 'size': 14})
ax2.set_xticks(np.arange(min(mec7), max(mec7) + 1,1))
ax2.set_title("Distribution for 7 MECs", fontdict={'weight': 'bold', 'size': 17})
# w = get_truncated_normal(mean=10,
# sd=8,
# low=1,
# upp=10).rvs(no_of_req)
# #ax2.set_xticks([min(z) - 1, max(z) + 1])
# #snc.kdeplot(x, label='sd=3')
# w = [round(i) for i in w]
# freq = {i:w.count(i) for i in set(w)}
# print('freq10 =',freq)
# print(f'mec10 = {w}')
#ax3.hist(w, label='15 MEC', color='b', alpha=0.3)
x, y = list(freq10.keys()), list(freq10.values())
ax3.bar(x, y, width=.5, color='b', alpha=0.3)
ax3.plot([1]+x+[10], [0]+y+[0], 'b-.s', lw=2, alpha=0.6)
#ax3.legend()
ax3.set_ylabel('No of Requests', fontdict={'weight': 'medium', 'size': 14})
ax3.set_xlabel('No of MECs', fontdict={'weight': 'medium', 'size': 14})
ax3.set_xticks(np.arange(min(mec10), max(mec10) + 1, 1))
ax3.set_title("Distribution for 10 MECs", fontdict={'weight': 'bold', 'size': 17})
plt.show()
# New distribution
# freq4 = {1.0: 450, 2.0: 894, 3.0: 869, 4.0: 387}
# mec4 = [3.0, 4.0, 3.0, 2.0, 4.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 4.0, 3.0, 2.0, 2.0, 1.0, 1.0, 4.0, 4.0, 3.0, 4.0, 2.0, 2.0, 3.0, 3.0, 1.0, 3.0, 3.0, 3.0, 1.0, 2.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 1.0, 1.0, 3.0, 1.0, 2.0, 2.0, 4.0, 2.0, 3.0, 2.0, 4.0, 3.0, 2.0, 4.0, 4.0, 2.0, 3.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 1.0, 1.0, 1.0, 4.0, 2.0, 1.0, 4.0, 1.0, 2.0, 1.0, 2.0, 4.0, 4.0, 1.0, 4.0, 3.0, 1.0, 3.0, 4.0, 4.0, 2.0, 3.0, 3.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 2.0, 1.0, 3.0, 4.0, 1.0, 1.0, 2.0, 1.0, 3.0, 1.0, 3.0, 4.0, 3.0, 4.0, 3.0, 2.0, 3.0, 4.0, 3.0, 4.0, 4.0, 2.0, 4.0, 1.0, 4.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 3.0, 2.0, 1.0, 3.0, 3.0, 3.0, 2.0, 4.0, 2.0, 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 3.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 3.0, 2.0, 4.0, 1.0, 3.0, 4.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 1.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 4.0, 2.0, 3.0, 4.0, 4.0, 3.0, 3.0, 2.0, 3.0, 1.0, 2.0, 3.0, 2.0, 3.0, 1.0, 2.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 4.0, 1.0, 2.0, 4.0, 1.0, 1.0, 2.0, 2.0, 4.0, 2.0, 3.0, 3.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 2.0, 1.0, 3.0, 4.0, 2.0, 2.0, 3.0, 1.0, 4.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 3.0, 3.0, 1.0, 3.0, 3.0, 4.0, 4.0, 3.0, 3.0, 1.0, 3.0, 3.0, 2.0, 2.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0, 2.0, 2.0, 4.0, 1.0, 3.0, 3.0, 2.0, 3.0, 3.0, 4.0, 2.0, 2.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 3.0, 2.0, 3.0, 3.0, 2.0, 4.0, 3.0, 4.0, 1.0, 1.0, 2.0, 3.0, 4.0, 4.0, 1.0, 4.0, 4.0, 2.0, 3.0, 2.0, 2.0, 4.0, 1.0, 2.0, 3.0, 4.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 1.0, 4.0, 3.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 1.0, 3.0, 1.0, 4.0, 2.0, 3.0, 2.0, 1.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 1.0, 4.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 4.0, 3.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 3.0, 1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 2.0, 4.0, 3.0, 1.0, 3.0, 2.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 1.0, 2.0, 2.0, 1.0, 4.0, 1.0, 3.0, 2.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 3.0, 2.0, 1.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 4.0, 2.0, 2.0, 1.0, 2.0, 1.0, 4.0, 2.0, 3.0, 3.0, 1.0, 3.0, 1.0, 4.0, 1.0, 2.0, 2.0, 3.0, 2.0, 4.0, 3.0, 4.0, 2.0, 2.0, 2.0, 2.0, 4.0, 4.0, 2.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 1.0, 2.0, 2.0, 2.0, 2.0, 4.0, 1.0, 4.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 4.0, 2.0, 4.0, 1.0, 2.0, 1.0, 3.0, 4.0, 1.0, 1.0, 2.0, 3.0, 3.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 2.0, 3.0, 2.0, 1.0, 3.0, 2.0, 3.0, 4.0, 4.0, 3.0, 1.0, 3.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 2.0, 3.0, 1.0, 4.0, 2.0, 3.0, 2.0, 3.0, 3.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 3.0, 2.0, 1.0, 2.0, 4.0, 3.0, 2.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 4.0, 1.0, 2.0, 3.0, 3.0, 1.0, 4.0, 2.0, 2.0, 1.0, 2.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 4.0, 1.0, 4.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 2.0, 2.0, 2.0, 4.0, 1.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 2.0, 4.0, 3.0, 3.0, 4.0, 4.0, 2.0, 3.0, 2.0, 3.0, 3.0, 4.0, 1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 3.0, 1.0, 4.0, 3.0, 4.0, 1.0, 1.0, 4.0, 4.0, 3.0, 2.0, 1.0, 3.0, 4.0, 1.0, 2.0, 4.0, 1.0, 2.0, 2.0, 3.0, 2.0, 3.0, 3.0, 2.0, 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 4.0, 2.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 3.0, 4.0, 3.0, 3.0, 3.0, 2.0, 3.0, 3.0, 2.0, 1.0, 3.0, 2.0, 3.0, 1.0, 2.0, 2.0, 2.0, 4.0, 3.0, 1.0, 1.0, 1.0, 3.0, 4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0, 2.0, 4.0, 2.0, 4.0, 1.0, 2.0, 4.0, 3.0, 1.0, 4.0, 3.0, 2.0, 1.0, 4.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 2.0, 4.0, 3.0, 3.0, 2.0, 1.0, 3.0, 3.0, 2.0, 3.0, 4.0, 3.0, 1.0, 3.0, 3.0, 1.0, 3.0, 4.0, 3.0, 4.0, 3.0, 2.0, 2.0, 3.0, 3.0, 3.0, 1.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 3.0, 4.0, 3.0, 4.0, 4.0, 4.0, 3.0, 1.0, 3.0, 4.0, 4.0, 1.0, 2.0, 4.0, 3.0, 3.0, 2.0, 3.0, 4.0, 1.0, 1.0, 2.0, 1.0, 1.0, 3.0, 4.0, 4.0, 2.0, 3.0, 4.0, 2.0, 2.0, 3.0, 2.0, 4.0, 1.0, 2.0, 3.0, 1.0, 1.0, 2.0, 1.0, 3.0, 2.0, 1.0, 2.0, 2.0, 2.0, 4.0, 3.0, 3.0, 4.0, 3.0, 2.0, 3.0, 3.0, 1.0, 1.0, 2.0, 3.0, 4.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 3.0, 4.0, 4.0, 4.0, 3.0, 1.0, 2.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 4.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 4.0, 2.0, 3.0, 1.0, 2.0, 4.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 2.0, 1.0, 3.0, 1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 4.0, 3.0, 4.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2.0, 2.0, 4.0, 2.0, 2.0, 2.0, 2.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 1.0, 3.0, 4.0, 2.0, 1.0, 2.0, 2.0, 3.0, 4.0, 3.0, 3.0, 4.0, 3.0, 4.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 4.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0, 2.0, 4.0, 3.0, 2.0, 2.0, 2.0, 3.0, 4.0, 2.0, 2.0, 4.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 3.0, 3.0, 1.0, 4.0, 4.0, 3.0, 3.0, 4.0, 2.0, 4.0, 2.0, 2.0, 4.0, 2.0, 3.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 3.0, 1.0, 1.0, 4.0, 3.0, 3.0, 3.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 4.0, 4.0, 1.0, 4.0, 4.0, 2.0, 1.0, 3.0, 2.0, 2.0, 1.0, 4.0, 2.0, 1.0, 4.0, 3.0, 2.0, 2.0, 1.0, 3.0, 1.0, 1.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 1.0, 4.0, 3.0, 2.0, 3.0, 4.0, 3.0, 3.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 2.0, 3.0, 4.0, 2.0, 1.0, 2.0, 3.0, 1.0, 3.0, 3.0, 2.0, 4.0, 3.0, 3.0, 4.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 3.0, 3.0, 2.0, 3.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0, 3.0, 3.0, 4.0, 4.0, 3.0, 1.0, 4.0, 3.0, 1.0, 4.0, 3.0, 3.0, 3.0, 2.0, 4.0, 2.0, 4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 4.0, 4.0, 3.0, 3.0, 2.0, 2.0, 3.0, 1.0, 1.0, 2.0, 3.0, 4.0, 3.0, 2.0, 3.0, 3.0, 2.0, 3.0, 3.0, 3.0, 1.0, 1.0, 3.0, 3.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 2.0, 2.0, 3.0, 4.0, 2.0, 1.0, 4.0, 2.0, 1.0, 4.0, 3.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 4.0, 1.0, 3.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 4.0, 1.0, 1.0, 3.0, 4.0, 3.0, 1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 4.0, 1.0, 3.0, 2.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 4.0, 3.0, 3.0, 1.0, 3.0, 2.0, 1.0, 4.0, 1.0, 2.0, 2.0, 3.0, 1.0, 4.0, 3.0, 2.0, 4.0, 4.0, 1.0, 1.0, 2.0, 2.0, 3.0, 1.0, 4.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 4.0, 4.0, 3.0, 3.0, 3.0, 1.0, 1.0, 4.0, 1.0, 3.0, 3.0, 3.0, 2.0, 4.0, 2.0, 2.0, 1.0, 3.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 3.0, 2.0, 3.0, 4.0, 3.0, 1.0, 4.0, 2.0, 2.0, 4.0, 2.0, 2.0, 1.0, 1.0, 3.0, 3.0, 2.0, 4.0, 2.0, 2.0, 3.0, 1.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 3.0, 3.0, 3.0, 4.0, 2.0, 4.0, 2.0, 1.0, 3.0, 4.0, 3.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 1.0, 3.0, 3.0, 1.0, 2.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 4.0, 3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 2.0, 2.0, 4.0, 4.0, 3.0, 2.0, 2.0, 1.0, 3.0, 3.0, 2.0, 2.0, 4.0, 4.0, 3.0, 1.0, 4.0, 3.0, 1.0, 4.0, 2.0, 3.0, 4.0, 3.0, 3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 4.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 1.0, 4.0, 4.0, 3.0, 3.0, 1.0, 1.0, 1.0, 2.0, 1.0, 3.0, 4.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 2.0, 2.0, 4.0, 3.0, 1.0, 2.0, 1.0, 1.0, 1.0, 3.0, 3.0, 1.0, 4.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 3.0, 1.0, 2.0, 4.0, 1.0, 3.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0, 2.0, 4.0, 3.0, 2.0, 3.0, 2.0, 4.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 1.0, 3.0, 2.0, 4.0, 2.0, 2.0, 1.0, 3.0, 4.0, 4.0, 2.0, 3.0, 3.0, 2.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 4.0, 3.0, 3.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 2.0, 4.0, 4.0, 2.0, 2.0, 2.0, 3.0, 3.0, 1.0, 4.0, 1.0, 3.0, 3.0, 3.0, 4.0, 4.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 4.0, 1.0, 3.0, 2.0, 3.0, 4.0, 3.0, 3.0, 2.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 4.0, 2.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 1.0, 4.0, 2.0, 3.0, 2.0, 4.0, 4.0, 3.0, 1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 4.0, 3.0, 1.0, 4.0, 2.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 1.0, 1.0, 3.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 2.0, 2.0, 4.0, 1.0, 3.0, 1.0, 3.0, 2.0, 1.0, 3.0, 2.0, 4.0, 3.0, 1.0, 2.0, 2.0, 3.0, 1.0, 2.0, 4.0, 4.0, 2.0, 1.0, 4.0, 4.0, 3.0, 2.0, 4.0, 1.0, 3.0, 2.0, 4.0, 2.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 2.0, 2.0, 3.0, 3.0, 2.0, 3.0, 3.0, 2.0, 2.0, 1.0, 2.0, 3.0, 1.0, 3.0, 2.0, 3.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 4.0, 4.0, 1.0, 3.0, 1.0, 4.0, 3.0, 2.0, 4.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 1.0, 3.0, 4.0, 4.0, 3.0, 3.0, 4.0, 3.0, 3.0, 2.0, 2.0, 2.0, 4.0, 2.0, 2.0, 2.0, 1.0, 2.0, 4.0, 2.0, 2.0, 3.0, 2.0, 3.0, 1.0, 3.0, 3.0, 2.0, 3.0, 1.0, 1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 4.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 3.0, 4.0, 3.0, 3.0, 1.0, 2.0, 2.0, 4.0, 1.0, 3.0, 4.0, 4.0, 3.0, 2.0, 4.0, 1.0, 2.0, 4.0, 4.0, 4.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 1.0, 2.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 3.0, 2.0, 3.0, 3.0, 3.0, 3.0, 2.0, 1.0, 2.0, 4.0, 1.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 2.0, 3.0, 1.0, 2.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0, 2.0, 4.0, 4.0, 4.0, 2.0, 3.0, 2.0, 2.0, 1.0, 1.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 3.0, 2.0, 3.0, 2.0, 3.0, 4.0, 2.0, 1.0, 4.0, 3.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 2.0, 4.0, 4.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 1.0, 4.0, 2.0, 3.0, 3.0, 2.0, 1.0, 3.0, 2.0, 3.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 4.0, 2.0, 1.0, 2.0, 3.0, 3.0, 1.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 4.0, 3.0, 1.0, 2.0, 3.0, 4.0, 2.0, 2.0, 3.0, 1.0, 1.0, 2.0, 4.0, 2.0, 3.0, 2.0, 3.0, 1.0, 3.0, 3.0, 4.0, 2.0, 3.0, 3.0, 2.0, 3.0, 1.0, 3.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 1.0, 4.0, 2.0, 2.0, 1.0, 2.0, 3.0, 4.0, 4.0, 3.0, 3.0, 4.0, 4.0, 2.0, 3.0, 1.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0, 2.0, 1.0, 3.0, 2.0, 4.0, 2.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 4.0, 3.0, 2.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 3.0, 4.0, 2.0, 3.0, 3.0, 2.0, 2.0, 4.0, 4.0, 2.0, 1.0, 2.0, 3.0, 2.0, 2.0, 1.0, 3.0, 2.0, 1.0, 3.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 1.0, 2.0, 3.0, 3.0, 2.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 3.0, 3.0, 2.0, 4.0, 1.0, 3.0, 3.0, 4.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 1.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 2.0, 3.0, 2.0, 4.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 1.0, 2.0, 4.0, 1.0, 1.0, 4.0, 2.0, 3.0, 3.0, 4.0, 2.0, 2.0, 1.0, 3.0, 3.0, 2.0, 2.0, 3.0, 4.0, 2.0, 1.0, 4.0, 4.0, 3.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 1.0, 4.0, 1.0, 2.0, 2.0, 2.0, 3.0, 1.0, 3.0, 3.0, 2.0, 3.0, 1.0, 3.0, 4.0, 4.0, 3.0, 1.0, 2.0, 3.0, 3.0, 1.0, 2.0, 1.0, 2.0, 1.0, 1.0, 3.0, 3.0, 3.0, 2.0, 1.0, 1.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 4.0, 1.0, 4.0, 4.0, 2.0, 4.0, 3.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 4.0, 2.0, 4.0, 2.0, 4.0, 2.0, 2.0, 2.0, 2.0, 1.0, 4.0, 3.0, 3.0, 3.0, 4.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 4.0, 2.0, 2.0, 2.0, 1.0, 3.0, 3.0, 1.0, 1.0, 2.0, 1.0, 2.0, 3.0, 1.0, 4.0, 2.0, 3.0, 2.0, 3.0, 3.0, 1.0, 4.0, 2.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 1.0, 3.0, 3.0, 3.0, 4.0, 3.0, 2.0, 2.0, 3.0, 2.0, 1.0, 4.0, 4.0, 2.0, 2.0, 1.0, 4.0, 3.0, 1.0, 3.0, 4.0, 1.0, 2.0, 1.0, 3.0, 2.0, 1.0, 4.0, 2.0, 3.0, 3.0, 2.0, 3.0, 1.0, 4.0, 3.0, 4.0, 4.0, 2.0, 2.0, 2.0, 3.0, 1.0, 4.0, 2.0, 4.0, 3.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 1.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 2.0, 4.0, 2.0, 3.0, 2.0, 4.0, 1.0, 1.0, 2.0, 4.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 4.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 4.0, 2.0, 4.0, 2.0, 3.0, 3.0, 2.0, 4.0, 2.0, 2.0, 2.0, 1.0, 2.0, 4.0, 2.0, 2.0, 2.0, 1.0, 2.0, 4.0, 2.0, 4.0, 3.0, 3.0, 2.0, 1.0, 2.0, 4.0, 4.0, 4.0, 2.0, 3.0, 3.0, 1.0, 2.0, 2.0, 1.0, 3.0, 1.0, 2.0, 2.0, 1.0, 3.0, 1.0, 2.0, 2.0, 1.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 3.0, 3.0, 3.0, 4.0, 1.0, 1.0, 3.0, 1.0, 3.0, 2.0, 2.0, 1.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 4.0, 3.0, 2.0, 4.0, 3.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 2.0, 2.0, 1.0, 1.0, 3.0, 3.0, 4.0, 3.0, 3.0, 4.0, 2.0, 4.0, 4.0, 2.0, 3.0, 3.0, 3.0, 3.0, 2.0, 1.0, 2.0, 2.0, 4.0, 1.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0, 1.0, 3.0, 3.0, 4.0, 4.0, 1.0, 3.0, 3.0, 4.0]
# freq7 = {1.0: 145, 2.0: 370, 3.0: 410, 4.0: 457, 5.0: 518, 6.0: 461, 7.0: 239}
# mec7 = [6.0, 6.0, 6.0, 4.0, 6.0, 3.0, 2.0, 6.0, 7.0, 5.0, 7.0, 5.0, 5.0, 3.0, 4.0, 4.0, 4.0, 3.0, 1.0, 4.0, 7.0, 3.0, 6.0, 7.0, 7.0, 4.0, 5.0, 3.0, 6.0, 5.0, 4.0, 4.0, 7.0, 3.0, 5.0, 4.0, 4.0, 6.0, 2.0, 6.0, 5.0, 5.0, 2.0, 4.0, 3.0, 3.0, 1.0, 1.0, 2.0, 3.0, 7.0, 3.0, 3.0, 6.0, 4.0, 6.0, 4.0, 5.0, 3.0, 6.0, 7.0, 4.0, 7.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 5.0, 5.0, 7.0, 7.0, 5.0, 2.0, 5.0, 4.0, 4.0, 7.0, 4.0, 3.0, 3.0, 4.0, 3.0, 4.0, 1.0, 2.0, 1.0, 5.0, 3.0, 4.0, 6.0, 6.0, 4.0, 5.0, 4.0, 6.0, 5.0, 4.0, 6.0, 3.0, 6.0, 3.0, 6.0, 2.0, 6.0, 6.0, 5.0, 4.0, 6.0, 2.0, 3.0, 4.0, 5.0, 2.0, 7.0, 1.0, 7.0, 4.0, 6.0, 6.0, 1.0, 5.0, 6.0, 5.0, 6.0, 4.0, 6.0, 5.0, 4.0, 4.0, 2.0, 6.0, 4.0, 3.0, 3.0, 5.0, 6.0, 7.0, 5.0, 2.0, 6.0, 3.0, 6.0, 6.0, 6.0, 2.0, 1.0, 3.0, 4.0, 5.0, 6.0, 5.0, 3.0, 5.0, 7.0, 7.0, 2.0, 2.0, 4.0, 7.0, 4.0, 6.0, 4.0, 5.0, 4.0, 2.0, 6.0, 2.0, 6.0, 6.0, 4.0, 7.0, 5.0, 4.0, 5.0, 2.0, 4.0, 6.0, 4.0, 5.0, 4.0, 3.0, 6.0, 6.0, 5.0, 3.0, 2.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 3.0, 6.0, 5.0, 5.0, 2.0, 5.0, 6.0, 5.0, 3.0, 5.0, 5.0, 6.0, 5.0, 6.0, 1.0, 6.0, 3.0, 2.0, 5.0, 2.0, 6.0, 2.0, 4.0, 2.0, 2.0, 4.0, 7.0, 5.0, 5.0, 6.0, 4.0, 6.0, 4.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 2.0, 3.0, 3.0, 4.0, 3.0, 6.0, 2.0, 5.0, 2.0, 5.0, 4.0, 5.0, 6.0, 1.0, 6.0, 5.0, 3.0, 5.0, 5.0, 2.0, 6.0, 2.0, 7.0, 4.0, 2.0, 3.0, 4.0, 5.0, 3.0, 5.0, 5.0, 3.0, 3.0, 5.0, 5.0, 4.0, 6.0, 1.0, 1.0, 2.0, 3.0, 3.0, 4.0, 2.0, 7.0, 4.0, 4.0, 1.0, 3.0, 7.0, 2.0, 6.0, 2.0, 2.0, 5.0, 7.0, 4.0, 4.0, 2.0, 1.0, 3.0, 5.0, 3.0, 6.0, 5.0, 1.0, 4.0, 4.0, 2.0, 3.0, 1.0, 6.0, 4.0, 2.0, 5.0, 3.0, 6.0, 6.0, 5.0, 7.0, 2.0, 3.0, 5.0, 5.0, 4.0, 5.0, 6.0, 2.0, 4.0, 5.0, 4.0, 6.0, 5.0, 4.0, 6.0, 5.0, 5.0, 6.0, 5.0, 4.0, 5.0, 2.0, 6.0, 4.0, 5.0, 7.0, 7.0, 7.0, 5.0, 3.0, 4.0, 6.0, 5.0, 3.0, 5.0, 3.0, 6.0, 4.0, 6.0, 4.0, 5.0, 7.0, 5.0, 3.0, 4.0, 5.0, 5.0, 2.0, 2.0, 5.0, 2.0, 1.0, 5.0, 4.0, 4.0, 6.0, 4.0, 1.0, 6.0, 2.0, 5.0, 4.0, 5.0, 7.0, 2.0, 7.0, 2.0, 5.0, 4.0, 4.0, 2.0, 5.0, 6.0, 4.0, 5.0, 5.0, 6.0, 6.0, 3.0, 5.0, 7.0, 6.0, 3.0, 5.0, 5.0, 3.0, 3.0, 7.0, 5.0, 4.0, 6.0, 2.0, 6.0, 6.0, 3.0, 2.0, 2.0, 6.0, 4.0, 2.0, 2.0, 5.0, 6.0, 6.0, 2.0, 3.0, 6.0, 4.0, 2.0, 7.0, 7.0, 4.0, 4.0, 7.0, 4.0, 7.0, 5.0, 6.0, 2.0, 4.0, 3.0, 1.0, 2.0, 3.0, 6.0, 5.0, 7.0, 5.0, 3.0, 3.0, 5.0, 6.0, 3.0, 3.0, 2.0, 4.0, 2.0, 6.0, 3.0, 3.0, 7.0, 2.0, 7.0, 7.0, 3.0, 5.0, 6.0, 3.0, 2.0, 6.0, 4.0, 7.0, 6.0, 5.0, 2.0, 4.0, 7.0, 3.0, 6.0, 2.0, 1.0, 5.0, 2.0, 4.0, 7.0, 4.0, 3.0, 2.0, 3.0, 4.0, 4.0, 5.0, 4.0, 4.0, 6.0, 2.0, 5.0, 3.0, 7.0, 2.0, 1.0, 6.0, 5.0, 1.0, 3.0, 2.0, 3.0, 3.0, 4.0, 3.0, 6.0, 2.0, 1.0, 7.0, 3.0, 1.0, 2.0, 5.0, 6.0, 5.0, 2.0, 2.0, 5.0, 6.0, 7.0, 5.0, 6.0, 5.0, 5.0, 4.0, 5.0, 4.0, 4.0, 4.0, 3.0, 2.0, 4.0, 6.0, 5.0, 7.0, 6.0, 7.0, 7.0, 6.0, 6.0, 3.0, 3.0, 4.0, 4.0, 6.0, 3.0, 5.0, 5.0, 6.0, 3.0, 5.0, 6.0, 5.0, 4.0, 6.0, 6.0, 3.0, 4.0, 4.0, 2.0, 5.0, 6.0, 3.0, 4.0, 5.0, 7.0, 1.0, 6.0, 3.0, 3.0, 3.0, 6.0, 3.0, 4.0, 7.0, 6.0, 7.0, 1.0, 5.0, 5.0, 2.0, 5.0, 7.0, 3.0, 2.0, 6.0, 6.0, 3.0, 7.0, 3.0, 5.0, 4.0, 1.0, 5.0, 3.0, 3.0, 2.0, 6.0, 5.0, 2.0, 5.0, 3.0, 2.0, 6.0, 6.0, 5.0, 1.0, 2.0, 4.0, 7.0, 3.0, 4.0, 2.0, 3.0, 2.0, 5.0, 4.0, 6.0, 5.0, 3.0, 3.0, 5.0, 2.0, 7.0, 5.0, 5.0, 2.0, 2.0, 5.0, 6.0, 2.0, 5.0, 7.0, 6.0, 5.0, 5.0, 4.0, 7.0, 7.0, 6.0, 5.0, 4.0, 7.0, 1.0, 4.0, 6.0, 2.0, 3.0, 2.0, 6.0, 5.0, 4.0, 6.0, 6.0, 5.0, 7.0, 3.0, 3.0, 5.0, 3.0, 5.0, 2.0, 7.0, 4.0, 5.0, 2.0, 3.0, 4.0, 5.0, 3.0, 6.0, 2.0, 3.0, 6.0, 3.0, 5.0, 7.0, 5.0, 6.0, 5.0, 3.0, 4.0, 7.0, 3.0, 3.0, 2.0, 2.0, 7.0, 4.0, 5.0, 5.0, 5.0, 4.0, 5.0, 7.0, 6.0, 5.0, 3.0, 7.0, 5.0, 6.0, 5.0, 6.0, 7.0, 5.0, 4.0, 3.0, 2.0, 5.0, 5.0, 3.0, 4.0, 7.0, 3.0, 2.0, 6.0, 3.0, 3.0, 1.0, 6.0, 2.0, 2.0, 5.0, 4.0, 5.0, 1.0, 5.0, 5.0, 4.0, 1.0, 4.0, 2.0, 5.0, 5.0, 1.0, 4.0, 6.0, 5.0, 4.0, 5.0, 5.0, 6.0, 3.0, 6.0, 5.0, 5.0, 2.0, 4.0, 3.0, 5.0, 7.0, 2.0, 4.0, 4.0, 6.0, 3.0, 3.0, 5.0, 6.0, 4.0, 4.0, 6.0, 5.0, 5.0, 5.0, 1.0, 5.0, 3.0, 6.0, 3.0, 4.0, 4.0, 1.0, 4.0, 6.0, 4.0, 2.0, 6.0, 6.0, 5.0, 3.0, 4.0, 5.0, 5.0, 4.0, 5.0, 2.0, 4.0, 6.0, 4.0, 1.0, 4.0, 6.0, 1.0, 5.0, 6.0, 5.0, 3.0, 5.0, 1.0, 4.0, 6.0, 6.0, 6.0, 4.0, 3.0, 3.0, 7.0, 4.0, 3.0, 4.0, 3.0, 3.0, 2.0, 6.0, 2.0, 5.0, 4.0, 6.0, 3.0, 3.0, 5.0, 3.0, 5.0, 4.0, 6.0, 3.0, 2.0, 5.0, 7.0, 3.0, 2.0, 3.0, 4.0, 3.0, 3.0, 5.0, 2.0, 5.0, 6.0, 7.0, 5.0, 3.0, 5.0, 6.0, 2.0, 7.0, 3.0, 1.0, 2.0, 5.0, 3.0, 5.0, 4.0, 4.0, 3.0, 4.0, 1.0, 3.0, 5.0, 3.0, 5.0, 1.0, 4.0, 2.0, 2.0, 3.0, 4.0, 5.0, 5.0, 6.0, 2.0, 5.0, 3.0, 5.0, 7.0, 5.0, 5.0, 5.0, 2.0, 1.0, 4.0, 3.0, 5.0, 4.0, 2.0, 6.0, 3.0, 6.0, 3.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 3.0, 7.0, 7.0, 4.0, 5.0, 2.0, 6.0, 3.0, 6.0, 5.0, 3.0, 3.0, 3.0, 7.0, 5.0, 3.0, 5.0, 6.0, 4.0, 6.0, 5.0, 1.0, 6.0, 4.0, 3.0, 5.0, 5.0, 2.0, 4.0, 5.0, 3.0, 3.0, 6.0, 7.0, 2.0, 7.0, 2.0, 2.0, 2.0, 5.0, 4.0, 1.0, 3.0, 4.0, 5.0, 4.0, 5.0, 3.0, 1.0, 6.0, 4.0, 6.0, 4.0, 5.0, 3.0, 4.0, 4.0, 2.0, 5.0, 2.0, 2.0, 4.0, 7.0, 1.0, 2.0, 5.0, 7.0, 3.0, 4.0, 6.0, 2.0, 7.0, 5.0, 6.0, 3.0, 2.0, 2.0, 5.0, 1.0, 6.0, 5.0, 5.0, 1.0, 3.0, 6.0, 1.0, 3.0, 4.0, 7.0, 1.0, 5.0, 2.0, 3.0, 5.0, 4.0, 6.0, 5.0, 5.0, 6.0, 6.0, 3.0, 4.0, 4.0, 5.0, 5.0, 3.0, 5.0, 4.0, 5.0, 2.0, 3.0, 6.0, 4.0, 4.0, 4.0, 2.0, 4.0, 6.0, 1.0, 3.0, 4.0, 3.0, 5.0, 2.0, 6.0, 6.0, 7.0, 5.0, 4.0, 4.0, 4.0, 6.0, 5.0, 4.0, 7.0, 7.0, 3.0, 4.0, 6.0, 6.0, 7.0, 5.0, 3.0, 1.0, 5.0, 6.0, 6.0, 6.0, 4.0, 7.0, 2.0, 3.0, 6.0, 5.0, 2.0, 2.0, 6.0, 6.0, 6.0, 3.0, 6.0, 4.0, 5.0, 7.0, 1.0, 3.0, 4.0, 6.0, 3.0, 5.0, 6.0, 4.0, 5.0, 7.0, 3.0, 7.0, 2.0, 6.0, 5.0, 7.0, 2.0, 4.0, 3.0, 1.0, 4.0, 7.0, 6.0, 4.0, 3.0, 5.0, 5.0, 2.0, 6.0, 5.0, 2.0, 4.0, 4.0, 2.0, 4.0, 4.0, 3.0, 2.0, 3.0, 3.0, 5.0, 7.0, 2.0, 7.0, 7.0, 7.0, 2.0, 3.0, 3.0, 4.0, 1.0, 4.0, 2.0, 1.0, 4.0, 6.0, 5.0, 4.0, 4.0, 7.0, 2.0, 3.0, 2.0, 2.0, 6.0, 6.0, 4.0, 4.0, 4.0, 6.0, 3.0, 3.0, 6.0, 4.0, 2.0, 6.0, 3.0, 3.0, 5.0, 1.0, 5.0, 4.0, 5.0, 6.0, 6.0, 5.0, 7.0, 3.0, 4.0, 6.0, 1.0, 4.0, 2.0, 3.0, 3.0, 5.0, 5.0, 5.0, 3.0, 5.0, 1.0, 2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 6.0, 7.0, 3.0, 5.0, 3.0, 5.0, 4.0, 3.0, 1.0, 2.0, 2.0, 5.0, 3.0, 5.0, 6.0, 2.0, 1.0, 5.0, 6.0, 5.0, 5.0, 6.0, 7.0, 5.0, 6.0, 4.0, 4.0, 5.0, 4.0, 6.0, 4.0, 4.0, 2.0, 5.0, 5.0, 3.0, 6.0, 3.0, 5.0, 4.0, 3.0, 5.0, 3.0, 7.0, 5.0, 5.0, 6.0, 4.0, 2.0, 3.0, 7.0, 5.0, 5.0, 6.0, 5.0, 1.0, 4.0, 2.0, 2.0, 5.0, 4.0, 3.0, 3.0, 3.0, 6.0, 3.0, 6.0, 6.0, 5.0, 3.0, 6.0, 6.0, 3.0, 1.0, 3.0, 2.0, 4.0, 1.0, 2.0, 5.0, 7.0, 6.0, 5.0, 7.0, 1.0, 2.0, 5.0, 4.0, 5.0, 6.0, 1.0, 4.0, 2.0, 3.0, 3.0, 4.0, 6.0, 5.0, 4.0, 6.0, 6.0, 2.0, 6.0, 2.0, 6.0, 5.0, 2.0, 2.0, 2.0, 4.0, 2.0, 3.0, 6.0, 2.0, 5.0, 2.0, 2.0, 7.0, 3.0, 4.0, 6.0, 7.0, 5.0, 2.0, 7.0, 3.0, 4.0, 2.0, 7.0, 5.0, 6.0, 4.0, 6.0, 5.0, 6.0, 7.0, 4.0, 3.0, 6.0, 6.0, 6.0, 5.0, 3.0, 5.0, 6.0, 3.0, 6.0, 2.0, 6.0, 3.0, 5.0, 6.0, 5.0, 3.0, 4.0, 6.0, 5.0, 6.0, 5.0, 4.0, 2.0, 6.0, 3.0, 3.0, 2.0, 4.0, 4.0, 3.0, 2.0, 4.0, 5.0, 2.0, 3.0, 7.0, 3.0, 6.0, 2.0, 5.0, 5.0, 2.0, 6.0, 2.0, 6.0, 5.0, 5.0, 2.0, 2.0, 7.0, 2.0, 7.0, 6.0, 2.0, 2.0, 3.0, 5.0, 5.0, 4.0, 4.0, 2.0, 6.0, 7.0, 4.0, 4.0, 3.0, 3.0, 3.0, 7.0, 7.0, 2.0, 2.0, 7.0, 3.0, 2.0, 2.0, 4.0, 3.0, 4.0, 6.0, 1.0, 6.0, 5.0, 4.0, 3.0, 1.0, 5.0, 7.0, 6.0, 5.0, 5.0, 4.0, 6.0, 3.0, 5.0, 4.0, 5.0, 3.0, 6.0, 5.0, 4.0, 3.0, 4.0, 6.0, 7.0, 4.0, 2.0, 6.0, 5.0, 5.0, 4.0, 1.0, 7.0, 4.0, 5.0, 4.0, 4.0, 7.0, 5.0, 2.0, 7.0, 7.0, 1.0, 5.0, 5.0, 4.0, 7.0, 6.0, 4.0, 4.0, 2.0, 3.0, 4.0, 3.0, 5.0, 4.0, 6.0, 2.0, 4.0, 5.0, 4.0, 4.0, 6.0, 6.0, 7.0, 4.0, 7.0, 4.0, 4.0, 6.0, 1.0, 2.0, 2.0, 5.0, 6.0, 2.0, 3.0, 4.0, 6.0, 4.0, 6.0, 7.0, 5.0, 5.0, 2.0, 4.0, 6.0, 6.0, 5.0, 5.0, 3.0, 1.0, 3.0, 6.0, 5.0, 5.0, 7.0, 1.0, 2.0, 4.0, 6.0, 3.0, 3.0, 4.0, 4.0, 4.0, 7.0, 4.0, 7.0, 4.0, 6.0, 3.0, 3.0, 2.0, 1.0, 2.0, 3.0, 7.0, 5.0, 2.0, 4.0, 2.0, 5.0, 4.0, 4.0, 3.0, 6.0, 6.0, 6.0, 5.0, 5.0, 4.0, 3.0, 3.0, 4.0, 5.0, 3.0, 4.0, 4.0, 2.0, 1.0, 4.0, 3.0, 1.0, 6.0, 5.0, 2.0, 6.0, 4.0, 4.0, 5.0, 2.0, 4.0, 2.0, 5.0, 5.0, 6.0, 2.0, 7.0, 4.0, 5.0, 5.0, 5.0, 6.0, 3.0, 4.0, 5.0, 1.0, 5.0, 2.0, 3.0, 5.0, 4.0, 3.0, 2.0, 4.0, 6.0, 5.0, 4.0, 4.0, 6.0, 6.0, 7.0, 2.0, 1.0, 6.0, 4.0, 5.0, 6.0, 4.0, 6.0, 7.0, 5.0, 3.0, 2.0, 5.0, 4.0, 4.0, 4.0, 5.0, 5.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 5.0, 3.0, 4.0, 5.0, 3.0, 5.0, 5.0, 2.0, 5.0, 4.0, 2.0, 1.0, 2.0, 4.0, 7.0, 4.0, 6.0, 6.0, 7.0, 5.0, 7.0, 5.0, 6.0, 3.0, 6.0, 5.0, 7.0, 5.0, 3.0, 1.0, 5.0, 5.0, 2.0, 6.0, 6.0, 6.0, 3.0, 5.0, 5.0, 6.0, 7.0, 3.0, 7.0, 5.0, 3.0, 2.0, 5.0, 5.0, 2.0, 6.0, 6.0, 6.0, 6.0, 5.0, 2.0, 2.0, 6.0, 5.0, 7.0, 2.0, 4.0, 6.0, 6.0, 3.0, 1.0, 3.0, 3.0, 3.0, 4.0, 7.0, 1.0, 4.0, 3.0, 4.0, 3.0, 3.0, 5.0, 5.0, 6.0, 2.0, 5.0, 2.0, 4.0, 7.0, 6.0, 7.0, 4.0, 2.0, 7.0, 2.0, 2.0, 2.0, 2.0, 6.0, 6.0, 7.0, 5.0, 2.0, 6.0, 5.0, 4.0, 1.0, 5.0, 3.0, 2.0, 6.0, 4.0, 7.0, 3.0, 2.0, 3.0, 6.0, 3.0, 3.0, 4.0, 4.0, 3.0, 1.0, 5.0, 3.0, 2.0, 7.0, 7.0, 3.0, 3.0, 6.0, 7.0, 6.0, 4.0, 5.0, 4.0, 4.0, 4.0, 5.0, 6.0, 3.0, 6.0, 5.0, 2.0, 1.0, 5.0, 6.0, 1.0, 3.0, 6.0, 1.0, 4.0, 3.0, 4.0, 5.0, 7.0, 5.0, 5.0, 1.0, 4.0, 5.0, 2.0, 5.0, 7.0, 6.0, 7.0, 3.0, 3.0, 6.0, 3.0, 6.0, 7.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 4.0, 6.0, 7.0, 3.0, 3.0, 5.0, 6.0, 7.0, 2.0, 3.0, 4.0, 2.0, 7.0, 1.0, 2.0, 6.0, 4.0, 3.0, 4.0, 4.0, 6.0, 6.0, 1.0, 5.0, 7.0, 2.0, 7.0, 3.0, 5.0, 5.0, 4.0, 7.0, 4.0, 7.0, 6.0, 5.0, 3.0, 2.0, 4.0, 5.0, 2.0, 2.0, 6.0, 4.0, 3.0, 5.0, 7.0, 4.0, 2.0, 2.0, 5.0, 3.0, 2.0, 2.0, 1.0, 4.0, 7.0, 4.0, 5.0, 6.0, 6.0, 3.0, 3.0, 5.0, 5.0, 4.0, 4.0, 5.0, 4.0, 7.0, 1.0, 2.0, 3.0, 6.0, 5.0, 2.0, 2.0, 2.0, 2.0, 5.0, 3.0, 3.0, 4.0, 5.0, 7.0, 6.0, 6.0, 5.0, 2.0, 5.0, 6.0, 6.0, 1.0, 5.0, 7.0, 7.0, 3.0, 3.0, 1.0, 4.0, 6.0, 3.0, 3.0, 6.0, 7.0, 4.0, 3.0, 2.0, 3.0, 1.0, 5.0, 2.0, 3.0, 1.0, 1.0, 5.0, 4.0, 6.0, 5.0, 4.0, 7.0, 2.0, 5.0, 6.0, 1.0, 5.0, 1.0, 1.0, 6.0, 2.0, 2.0, 5.0, 4.0, 3.0, 4.0, 6.0, 3.0, 1.0, 5.0, 7.0, 4.0, 6.0, 5.0, 5.0, 7.0, 5.0, 7.0, 4.0, 5.0, 5.0, 6.0, 5.0, 1.0, 5.0, 4.0, 7.0, 5.0, 7.0, 5.0, 2.0, 7.0, 7.0, 1.0, 6.0, 4.0, 7.0, 7.0, 4.0, 4.0, 4.0, 5.0, 5.0, 6.0, 7.0, 7.0, 6.0, 6.0, 6.0, 4.0, 2.0, 6.0, 5.0, 6.0, 3.0, 2.0, 2.0, 3.0, 5.0, 7.0, 2.0, 4.0, 7.0, 3.0, 4.0, 3.0, 4.0, 2.0, 4.0, 2.0, 6.0, 5.0, 2.0, 1.0, 4.0, 5.0, 2.0, 3.0, 7.0, 4.0, 5.0, 3.0, 4.0, 1.0, 4.0, 2.0, 3.0, 7.0, 2.0, 6.0, 2.0, 3.0, 4.0, 2.0, 5.0, 6.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 4.0, 3.0, 1.0, 6.0, 7.0, 5.0, 4.0, 2.0, 5.0, 2.0, 6.0, 4.0, 6.0, 2.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0, 2.0, 4.0, 2.0, 2.0, 1.0, 4.0, 6.0, 6.0, 5.0, 5.0, 6.0, 6.0, 3.0, 5.0, 7.0, 4.0, 6.0, 6.0, 1.0, 5.0, 1.0, 2.0, 5.0, 4.0, 5.0, 2.0, 4.0, 2.0, 4.0, 7.0, 2.0, 3.0, 2.0, 4.0, 7.0, 3.0, 3.0, 5.0, 4.0, 6.0, 6.0, 1.0, 2.0, 5.0, 6.0, 6.0, 6.0, 7.0, 2.0, 4.0, 6.0, 2.0, 4.0, 6.0, 7.0, 2.0, 3.0, 6.0, 6.0, 5.0, 2.0, 1.0, 3.0, 6.0, 3.0, 1.0, 5.0, 3.0, 4.0, 3.0, 3.0, 7.0, 4.0, 5.0, 2.0, 2.0, 2.0, 2.0, 5.0, 5.0, 3.0, 5.0, 4.0, 5.0, 2.0, 6.0, 7.0, 4.0, 5.0, 5.0, 4.0, 6.0, 4.0, 4.0, 6.0, 4.0, 5.0, 7.0, 2.0, 4.0, 5.0, 6.0, 6.0, 2.0, 2.0, 6.0, 7.0, 4.0, 3.0, 6.0, 6.0, 3.0, 7.0, 4.0, 5.0, 3.0, 5.0, 4.0, 3.0, 6.0, 7.0, 3.0, 3.0, 1.0, 6.0, 6.0, 7.0, 7.0, 5.0, 4.0, 5.0, 6.0, 4.0, 4.0, 3.0, 5.0, 2.0, 1.0, 5.0, 3.0, 3.0, 4.0, 4.0, 1.0, 3.0, 4.0, 5.0, 5.0, 1.0, 3.0, 2.0, 6.0, 5.0, 3.0, 3.0, 5.0, 4.0, 6.0, 2.0, 5.0, 5.0, 3.0, 6.0, 6.0, 7.0, 6.0, 6.0, 5.0, 3.0, 2.0, 2.0, 5.0, 3.0, 2.0, 5.0, 1.0, 4.0, 3.0, 5.0, 3.0, 4.0, 3.0, 7.0, 4.0, 1.0, 7.0, 3.0, 4.0, 6.0, 6.0, 7.0, 5.0, 5.0, 1.0, 2.0, 7.0, 3.0, 3.0, 6.0, 2.0, 5.0, 7.0, 2.0, 6.0, 5.0, 6.0, 7.0, 5.0, 7.0, 6.0, 3.0, 5.0, 3.0, 2.0, 5.0, 5.0, 7.0, 3.0, 5.0, 5.0, 7.0, 4.0, 2.0, 6.0, 4.0, 6.0, 6.0, 5.0, 5.0, 3.0, 3.0, 4.0, 2.0, 3.0, 4.0, 4.0, 5.0, 5.0, 2.0, 2.0, 4.0, 7.0, 5.0, 4.0, 6.0, 4.0, 5.0, 5.0, 4.0, 6.0, 4.0, 5.0, 3.0, 3.0, 2.0, 7.0, 1.0, 3.0, 6.0, 6.0, 3.0, 4.0, 2.0, 2.0, 4.0, 1.0, 5.0, 5.0, 7.0, 6.0, 6.0, 3.0, 3.0, 4.0, 4.0, 2.0, 7.0, 6.0, 2.0, 3.0, 6.0, 5.0, 2.0, 2.0, 5.0, 6.0, 4.0, 6.0, 7.0, 3.0, 6.0, 6.0, 4.0, 4.0, 4.0, 6.0, 4.0, 6.0, 6.0, 5.0, 1.0, 4.0, 3.0, 6.0, 5.0, 5.0, 5.0, 3.0, 1.0, 5.0, 7.0, 6.0, 4.0, 4.0, 5.0, 2.0, 3.0, 7.0, 7.0, 6.0, 6.0, 2.0, 7.0, 4.0, 5.0, 7.0, 2.0, 3.0, 2.0, 6.0, 1.0, 6.0, 1.0, 2.0, 3.0, 6.0, 7.0, 5.0, 4.0, 7.0, 7.0, 3.0, 2.0, 6.0, 5.0, 5.0, 2.0, 2.0, 6.0, 2.0, 7.0, 5.0, 6.0, 2.0, 6.0, 4.0, 3.0, 4.0, 5.0, 6.0, 4.0, 3.0, 1.0, 3.0, 2.0, 4.0, 2.0, 2.0, 4.0, 7.0, 4.0, 3.0, 3.0, 6.0, 5.0, 2.0, 7.0, 1.0, 5.0, 4.0, 3.0, 1.0, 2.0, 6.0, 7.0, 3.0, 6.0, 5.0, 3.0, 7.0, 7.0, 2.0, 2.0, 6.0, 2.0, 2.0, 3.0, 6.0, 6.0, 2.0, 6.0, 6.0, 6.0, 6.0, 6.0, 2.0, 6.0, 4.0, 6.0, 4.0, 7.0, 6.0, 2.0, 6.0, 2.0, 7.0, 4.0, 1.0, 6.0, 4.0, 5.0, 6.0, 7.0, 1.0, 6.0, 5.0, 6.0, 6.0, 5.0, 3.0, 4.0, 2.0, 3.0, 2.0, 6.0, 3.0, 4.0, 2.0, 6.0, 4.0, 5.0, 4.0, 3.0, 7.0, 6.0, 5.0, 6.0, 3.0, 1.0, 5.0, 4.0, 3.0, 6.0, 3.0, 6.0, 4.0, 2.0, 2.0, 3.0, 4.0, 6.0, 4.0, 2.0, 5.0, 2.0, 6.0, 2.0, 2.0, 3.0, 5.0, 2.0, 4.0, 2.0, 5.0, 2.0, 3.0, 7.0, 6.0, 4.0, 5.0, 4.0, 5.0, 1.0, 2.0, 7.0, 6.0, 4.0, 3.0, 4.0, 4.0, 4.0, 5.0, 6.0, 2.0, 6.0, 3.0, 2.0, 6.0, 6.0, 5.0, 5.0, 3.0, 6.0, 4.0, 4.0, 4.0, 3.0, 4.0, 4.0, 3.0, 5.0, 4.0, 6.0, 3.0, 2.0, 4.0, 6.0, 2.0, 3.0, 7.0, 1.0, 4.0, 5.0, 1.0, 6.0, 2.0, 4.0, 5.0, 1.0, 4.0, 7.0, 6.0, 6.0, 1.0, 2.0, 6.0, 3.0, 1.0, 4.0, 2.0, 6.0, 6.0, 4.0, 5.0, 4.0, 1.0, 6.0, 3.0, 4.0, 5.0, 2.0, 6.0, 2.0, 6.0, 5.0, 5.0, 5.0, 4.0, 5.0, 4.0, 5.0, 6.0, 5.0, 3.0, 3.0, 5.0, 5.0, 3.0, 3.0, 1.0, 7.0, 5.0, 4.0, 5.0, 7.0, 5.0, 1.0, 2.0, 2.0, 2.0, 6.0]
# freq10 = {1.0: 98, 2.0: 197, 3.0: 247, 4.0: 288, 5.0: 285, 6.0: 285, 7.0: 367, 8.0: 323, 9.0: 358, 10.0: 152}
# mec10 = [9.0, 6.0, 6.0, 5.0, 3.0, 3.0, 9.0, 5.0, 5.0, 7.0, 2.0, 8.0, 8.0, 2.0, 9.0, 5.0, 2.0, 9.0, 3.0, 7.0, 9.0, 6.0, 8.0, 8.0, 10.0, 4.0, 2.0, 6.0, 6.0, 4.0, 2.0, 5.0, 8.0, 5.0, 9.0, 7.0, 10.0, 6.0, 8.0, 8.0, 6.0, 7.0, 4.0, 2.0, 7.0, 4.0, 7.0, 6.0, 5.0, 3.0, 3.0, 7.0, 8.0, 5.0, 4.0, 5.0, 5.0, 2.0, 8.0, 10.0, 8.0, 8.0, 5.0, 9.0, 4.0, 6.0, 9.0, 4.0, 5.0, 8.0, 6.0, 3.0, 5.0, 3.0, 9.0, 6.0, 4.0, 3.0, 9.0, 2.0, 1.0, 8.0, 8.0, 4.0, 10.0, 6.0, 5.0, 3.0, 9.0, 1.0, 1.0, 5.0, 4.0, 6.0, 7.0, 6.0, 9.0, 2.0, 5.0, 2.0, 9.0, 9.0, 6.0, 9.0, 6.0, 8.0, 8.0, 2.0, 2.0, 9.0, 5.0, 2.0, 7.0, 9.0, 3.0, 8.0, 6.0, 5.0, 1.0, 3.0, 10.0, 5.0, 10.0, 7.0, 7.0, 8.0, 9.0, 2.0, 5.0, 5.0, 3.0, 8.0, 9.0, 9.0, 8.0, 7.0, 9.0, 6.0, 8.0, 3.0, 4.0, 9.0, 7.0, 2.0, 1.0, 6.0, 8.0, 4.0, 5.0, 10.0, 5.0, 3.0, 7.0, 6.0, 8.0, 1.0, 8.0, 7.0, 10.0, 6.0, 6.0, 7.0, 6.0, 6.0, 6.0, 3.0, 8.0, 7.0, 8.0, 7.0, 2.0, 6.0, 2.0, 8.0, 7.0, 5.0, 10.0, 6.0, 3.0, 6.0, 7.0, 7.0, 4.0, 5.0, 3.0, 4.0, 1.0, 5.0, 7.0, 7.0, 7.0, 6.0, 8.0, 8.0, 6.0, 3.0, 9.0, 4.0, 5.0, 8.0, 8.0, 7.0, 7.0, 9.0, 7.0, 7.0, 10.0, 5.0, 1.0, 10.0, 6.0, 3.0, 1.0, 5.0, 4.0, 5.0, 5.0, 9.0, 8.0, 5.0, 2.0, 8.0, 8.0, 5.0, 9.0, 4.0, 3.0, 7.0, 5.0, 6.0, 9.0, 9.0, 9.0, 4.0, 8.0, 5.0, 5.0, 6.0, 3.0, 9.0, 4.0, 5.0, 5.0, 9.0, 7.0, 8.0, 8.0, 7.0, 9.0, 4.0, 5.0, 3.0, 3.0, 1.0, 4.0, 2.0, 6.0, 8.0, 1.0, 9.0, 10.0, 9.0, 7.0, 8.0, 1.0, 10.0, 6.0, 8.0, 1.0, 7.0, 9.0, 10.0, 3.0, 3.0, 5.0, 5.0, 5.0, 6.0, 9.0, 7.0, 6.0, 10.0, 9.0, 8.0, 10.0, 8.0, 8.0, 9.0, 3.0, 6.0, 7.0, 9.0, 6.0, 4.0, 8.0, 2.0, 8.0, 8.0, 6.0, 9.0, 2.0, 2.0, 4.0, 3.0, 6.0, 6.0, 2.0, 7.0, 4.0, 7.0, 7.0, 7.0, 7.0, 2.0, 9.0, 7.0, 9.0, 9.0, 5.0, 9.0, 5.0, 5.0, 3.0, 2.0, 10.0, 1.0, 8.0, 4.0, 8.0, 2.0, 8.0, 4.0, 4.0, 4.0, 7.0, 8.0, 7.0, 5.0, 7.0, 3.0, 7.0, 8.0, 10.0, 4.0, 9.0, 1.0, 7.0, 8.0, 8.0, 7.0, 6.0, 4.0, 5.0, 3.0, 7.0, 3.0, 5.0, 8.0, 8.0, 8.0, 7.0, 3.0, 4.0, 7.0, 6.0, 6.0, 5.0, 9.0, 6.0, 4.0, 3.0, 9.0, 5.0, 7.0, 8.0, 9.0, 6.0, 8.0, 3.0, 10.0, 1.0, 3.0, 7.0, 6.0, 6.0, 6.0, 7.0, 5.0, 9.0, 9.0, 7.0, 4.0, 9.0, 7.0, 2.0, 7.0, 7.0, 3.0, 5.0, 7.0, 2.0, 9.0, 5.0, 5.0, 4.0, 9.0, 6.0, 3.0, 2.0, 4.0, 8.0, 5.0, 5.0, 2.0, 10.0, 8.0, 3.0, 8.0, 5.0, 1.0, 9.0, 6.0, 5.0, 6.0, 7.0, 3.0, 3.0, 3.0, 6.0, 4.0, 6.0, 10.0, 5.0, 7.0, 3.0, 6.0, 8.0, 10.0, 3.0, 8.0, 6.0, 5.0, 6.0, 2.0, 8.0, 4.0, 8.0, 10.0, 4.0, 1.0, 6.0, 6.0, 10.0, 8.0, 3.0, 3.0, 10.0, 8.0, 5.0, 4.0, 5.0, 8.0, 4.0, 1.0, 1.0, 8.0, 8.0, 7.0, 10.0, 8.0, 6.0, 5.0, 10.0, 7.0, 9.0, 10.0, 7.0, 6.0, 3.0, 7.0, 2.0, 6.0, 9.0, 4.0, 1.0, 5.0, 7.0, 5.0, 9.0, 10.0, 5.0, 9.0, 3.0, 8.0, 6.0, 4.0, 6.0, 5.0, 2.0, 9.0, 5.0, 5.0, 2.0, 9.0, 7.0, 10.0, 5.0, 5.0, 7.0, 9.0, 6.0, 9.0, 5.0, 8.0, 7.0, 8.0, 4.0, 5.0, 9.0, 1.0, 7.0, 7.0, 9.0, 6.0, 9.0, 6.0, 9.0, 1.0, 6.0, 3.0, 8.0, 4.0, 6.0, 7.0, 4.0, 6.0, 9.0, 8.0, 7.0, 6.0, 9.0, 8.0, 4.0, 7.0, 6.0, 7.0, 6.0, 9.0, 3.0, 7.0, 6.0, 8.0, 2.0, 1.0, 5.0, 9.0, 2.0, 4.0, 10.0, 6.0, 8.0, 10.0, 1.0, 7.0, 2.0, 10.0, 4.0, 4.0, 3.0, 9.0, 10.0, 9.0, 6.0, 4.0, 8.0, 4.0, 5.0, 10.0, 3.0, 1.0, 4.0, 7.0, 5.0, 6.0, 2.0, 5.0, 8.0, 7.0, 7.0, 8.0, 5.0, 3.0, 7.0, 8.0, 10.0, 5.0, 7.0, 3.0, 9.0, 5.0, 6.0, 10.0, 7.0, 7.0, 10.0, 1.0, 8.0, 3.0, 9.0, 8.0, 4.0, 10.0, 8.0, 10.0, 8.0, 6.0, 3.0, 9.0, 4.0, 2.0, 1.0, 8.0, 3.0, 7.0, 8.0, 3.0, 7.0, 10.0, 1.0, 9.0, 6.0, 2.0, 10.0, 3.0, 4.0, 10.0, 10.0, 7.0, 7.0, 3.0, 7.0, 6.0, 10.0, 10.0, 8.0, 6.0, 5.0, 6.0, 2.0, 9.0, 8.0, 3.0, 9.0, 8.0, 5.0, 6.0, 7.0, 9.0, 8.0, 2.0, 6.0, 9.0, 10.0, 7.0, 6.0, 3.0, 8.0, 10.0, 8.0, 2.0, 4.0, 4.0, 4.0, 5.0, 1.0, 8.0, 3.0, 5.0, 10.0, 10.0, 8.0, 3.0, 6.0, 9.0, 9.0, 2.0, 2.0, 8.0, 8.0, 5.0, 8.0, 8.0, 5.0, 5.0, 4.0, 4.0, 4.0, 8.0, 10.0, 8.0, 1.0, 7.0, 2.0, 3.0, 7.0, 7.0, 7.0, 8.0, 10.0, 6.0, 6.0, 7.0, 9.0, 8.0, 7.0, 10.0, 9.0, 5.0, 3.0, 9.0, 9.0, 8.0, 6.0, 9.0, 9.0, 4.0, 4.0, 6.0, 3.0, 2.0, 3.0, 4.0, 6.0, 4.0, 8.0, 6.0, 1.0, 4.0, 9.0, 3.0, 9.0, 8.0, 4.0, 7.0, 5.0, 9.0, 9.0, 4.0, 7.0, 3.0, 1.0, 9.0, 10.0, 3.0, 5.0, 1.0, 3.0, 3.0, 5.0, 2.0, 4.0, 4.0, 4.0, 7.0, 8.0, 8.0, 9.0, 3.0, 9.0, 1.0, 10.0, 8.0, 10.0, 8.0, 8.0, 1.0, 5.0, 3.0, 3.0, 1.0, 8.0, 4.0, 5.0, 9.0, 7.0, 2.0, 3.0, 6.0, 5.0, 4.0, 2.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 6.0, 8.0, 10.0, 6.0, 7.0, 7.0, 2.0, 1.0, 5.0, 5.0, 10.0, 3.0, 6.0, 5.0, 8.0, 8.0, 6.0, 8.0, 10.0, 2.0, 7.0, 9.0, 7.0, 7.0, 4.0, 3.0, 7.0, 6.0, 1.0, 10.0, 4.0, 4.0, 8.0, 3.0, 8.0, 9.0, 3.0, 7.0, 9.0, 8.0, 5.0, 7.0, 4.0, 7.0, 2.0, 4.0, 5.0, 4.0, 6.0, 10.0, 2.0, 6.0, 8.0, 1.0, 5.0, 5.0, 7.0, 1.0, 4.0, 4.0, 4.0, 6.0, 7.0, 6.0, 8.0, 9.0, 8.0, 6.0, 6.0, 1.0, 5.0, 9.0, 9.0, 4.0, 8.0, 6.0, 4.0, 4.0, 7.0, 9.0, 4.0, 10.0, 6.0, 5.0, 7.0, 10.0, 4.0, 6.0, 7.0, 8.0, 4.0, 7.0, 5.0, 5.0, 2.0, 4.0, 7.0, 1.0, 7.0, 1.0, 9.0, 7.0, 9.0, 3.0, 4.0, 10.0, 7.0, 9.0, 2.0, 9.0, 5.0, 5.0, 3.0, 8.0, 2.0, 9.0, 8.0, 8.0, 8.0, 9.0, 3.0, 7.0, 9.0, 3.0, 9.0, 7.0, 2.0, 5.0, 3.0, 3.0, 8.0, 4.0, 9.0, 7.0, 4.0, 4.0, 9.0, 7.0, 2.0, 2.0, 4.0, 5.0, 8.0, 7.0, 2.0, 5.0, 5.0, 6.0, 9.0, 10.0, 6.0, 5.0, 6.0, 1.0, 8.0, 9.0, 10.0, 4.0, 9.0, 7.0, 5.0, 9.0, 7.0, 3.0, 2.0, 5.0, 5.0, 1.0, 7.0, 4.0, 4.0, 5.0, 7.0, 8.0, 7.0, 4.0, 3.0, 7.0, 8.0, 4.0, 8.0, 9.0, 9.0, 9.0, 6.0, 9.0, 6.0, 6.0, 8.0, 2.0, 9.0, 1.0, 7.0, 4.0, 6.0, 3.0, 4.0, 5.0, 8.0, 4.0, 6.0, 6.0, 6.0, 2.0, 4.0, 7.0, 3.0, 8.0, 3.0, 2.0, 10.0, 9.0, 10.0, 10.0, 3.0, 7.0, 1.0, 2.0, 1.0, 2.0, 4.0, 1.0, 6.0, 1.0, 1.0, 7.0, 10.0, 8.0, 8.0, 3.0, 6.0, 1.0, 4.0, 9.0, 4.0, 6.0, 1.0, 10.0, 8.0, 10.0, 9.0, 9.0, 5.0, 4.0, 5.0, 9.0, 7.0, 4.0, 8.0, 8.0, 7.0, 5.0, 8.0, 8.0, 9.0, 5.0, 1.0, 7.0, 10.0, 6.0, 2.0, 7.0, 8.0, 8.0, 3.0, 7.0, 7.0, 5.0, 9.0, 6.0, 8.0, 1.0, 7.0, 2.0, 9.0, 2.0, 8.0, 1.0, 8.0, 2.0, 7.0, 2.0, 4.0, 6.0, 9.0, 4.0, 3.0, 9.0, 5.0, 7.0, 9.0, 7.0, 6.0, 8.0, 7.0, 8.0, 9.0, 8.0, 4.0, 7.0, 5.0, 9.0, 4.0, 2.0, 8.0, 9.0, 6.0, 1.0, 5.0, 9.0, 7.0, 8.0, 8.0, 8.0, 8.0, 5.0, 6.0, 2.0, 7.0, 9.0, 9.0, 4.0, 5.0, 7.0, 7.0, 4.0, 5.0, 2.0, 3.0, 9.0, 2.0, 7.0, 2.0, 5.0, 4.0, 4.0, 5.0, 4.0, 7.0, 9.0, 6.0, 7.0, 6.0, 5.0, 6.0, 7.0, 7.0, 8.0, 4.0, 6.0, 3.0, 3.0, 6.0, 4.0, 6.0, 3.0, 5.0, 7.0, 8.0, 2.0, 3.0, 5.0, 9.0, 7.0, 7.0, 9.0, 7.0, 8.0, 5.0, 4.0, 4.0, 9.0, 7.0, 10.0, 8.0, 3.0, 2.0, 4.0, 5.0, 3.0, 6.0, 5.0, 9.0, 6.0, 3.0, 9.0, 5.0, 4.0, 6.0, 3.0, 5.0, 7.0, 9.0, 2.0, 6.0, 6.0, 2.0, 7.0, 9.0, 2.0, 7.0, 10.0, 9.0, 7.0, 5.0, 8.0, 5.0, 9.0, 3.0, 9.0, 9.0, 5.0, 6.0, 7.0, 6.0, 6.0, 6.0, 5.0, 3.0, 3.0, 5.0, 5.0, 5.0, 9.0, 7.0, 5.0, 6.0, 8.0, 5.0, 2.0, 3.0, 6.0, 9.0, 6.0, 4.0, 4.0, 7.0, 8.0, 5.0, 8.0, 7.0, 9.0, 9.0, 2.0, 7.0, 7.0, 7.0, 3.0, 1.0, 4.0, 6.0, 4.0, 10.0, 8.0, 10.0, 6.0, 3.0, 6.0, 5.0, 2.0, 1.0, 8.0, 3.0, 5.0, 4.0, 4.0, 8.0, 3.0, 2.0, 5.0, 9.0, 8.0, 4.0, 4.0, 8.0, 9.0, 9.0, 5.0, 5.0, 7.0, 2.0, 4.0, 9.0, 7.0, 6.0, 3.0, 9.0, 8.0, 10.0, 5.0, 4.0, 1.0, 9.0, 7.0, 3.0, 6.0, 7.0, 5.0, 6.0, 9.0, 7.0, 5.0, 9.0, 6.0, 4.0, 7.0, 9.0, 9.0, 9.0, 8.0, 7.0, 6.0, 3.0, 7.0, 9.0, 5.0, 7.0, 7.0, 9.0, 8.0, 3.0, 10.0, 1.0, 9.0, 7.0, 5.0, 5.0, 6.0, 10.0, 9.0, 9.0, 8.0, 2.0, 6.0, 6.0, 3.0, 9.0, 7.0, 2.0, 6.0, 4.0, 10.0, 3.0, 2.0, 9.0, 8.0, 3.0, 5.0, 9.0, 6.0, 7.0, 8.0, 7.0, 3.0, 4.0, 5.0, 5.0, 8.0, 9.0, 7.0, 4.0, 9.0, 8.0, 9.0, 5.0, 3.0, 3.0, 6.0, 3.0, 3.0, 6.0, 2.0, 9.0, 6.0, 6.0, 9.0, 3.0, 7.0, 9.0, 4.0, 7.0, 9.0, 9.0, 3.0, 2.0, 2.0, 7.0, 6.0, 2.0, 6.0, 3.0, 7.0, 3.0, 2.0, 9.0, 9.0, 7.0, 7.0, 9.0, 7.0, 3.0, 7.0, 2.0, 3.0, 4.0, 9.0, 7.0, 6.0, 5.0, 7.0, 2.0, 8.0, 7.0, 5.0, 1.0, 3.0, 3.0, 3.0, 7.0, 4.0, 7.0, 9.0, 8.0, 7.0, 1.0, 2.0, 9.0, 3.0, 2.0, 3.0, 4.0, 9.0, 1.0, 3.0, 9.0, 5.0, 8.0, 7.0, 5.0, 2.0, 2.0, 5.0, 2.0, 9.0, 7.0, 3.0, 9.0, 9.0, 8.0, 1.0, 4.0, 6.0, 7.0, 7.0, 5.0, 3.0, 8.0, 6.0, 6.0, 7.0, 6.0, 8.0, 7.0, 9.0, 8.0, 8.0, 10.0, 6.0, 4.0, 10.0, 7.0, 4.0, 9.0, 8.0, 9.0, 9.0, 5.0, 7.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 3.0, 10.0, 3.0, 7.0, 5.0, 4.0, 9.0, 10.0, 2.0, 4.0, 7.0, 8.0, 4.0, 3.0, 8.0, 5.0, 8.0, 7.0, 3.0, 9.0, 9.0, 7.0, 6.0, 6.0, 4.0, 10.0, 8.0, 5.0, 1.0, 2.0, 1.0, 4.0, 5.0, 3.0, 7.0, 4.0, 3.0, 7.0, 9.0, 4.0, 6.0, 4.0, 5.0, 8.0, 8.0, 4.0, 9.0, 7.0, 6.0, 4.0, 9.0, 7.0, 7.0, 8.0, 8.0, 8.0, 7.0, 8.0, 2.0, 7.0, 10.0, 7.0, 7.0, 8.0, 4.0, 4.0, 10.0, 4.0, 9.0, 4.0, 8.0, 6.0, 3.0, 9.0, 5.0, 8.0, 4.0, 3.0, 6.0, 4.0, 10.0, 5.0, 5.0, 3.0, 4.0, 2.0, 4.0, 3.0, 6.0, 6.0, 2.0, 8.0, 6.0, 5.0, 5.0, 1.0, 5.0, 9.0, 9.0, 4.0, 7.0, 1.0, 7.0, 4.0, 8.0, 5.0, 8.0, 10.0, 4.0, 8.0, 6.0, 9.0, 3.0, 4.0, 9.0, 7.0, 7.0, 7.0, 10.0, 8.0, 8.0, 3.0, 8.0, 4.0, 4.0, 3.0, 10.0, 6.0, 5.0, 9.0, 7.0, 6.0, 9.0, 2.0, 3.0, 6.0, 6.0, 8.0, 2.0, 5.0, 7.0, 3.0, 7.0, 4.0, 7.0, 4.0, 7.0, 8.0, 6.0, 5.0, 7.0, 9.0, 7.0, 6.0, 3.0, 3.0, 4.0, 10.0, 5.0, 6.0, 3.0, 10.0, 1.0, 2.0, 7.0, 5.0, 6.0, 8.0, 9.0, 9.0, 4.0, 4.0, 4.0, 4.0, 2.0, 4.0, 9.0, 6.0, 4.0, 2.0, 5.0, 6.0, 3.0, 9.0, 3.0, 5.0, 6.0, 10.0, 3.0, 10.0, 4.0, 3.0, 1.0, 2.0, 5.0, 7.0, 7.0, 7.0, 7.0, 5.0, 6.0, 2.0, 2.0, 9.0, 4.0, 2.0, 9.0, 4.0, 9.0, 2.0, 2.0, 3.0, 3.0, 3.0, 8.0, 5.0, 8.0, 9.0, 10.0, 7.0, 6.0, 2.0, 10.0, 8.0, 7.0, 7.0, 4.0, 3.0, 3.0, 7.0, 5.0, 8.0, 5.0, 8.0, 7.0, 9.0, 7.0, 4.0, 8.0, 8.0, 3.0, 10.0, 6.0, 6.0, 8.0, 3.0, 1.0, 5.0, 5.0, 10.0, 7.0, 4.0, 9.0, 4.0, 10.0, 10.0, 2.0, 8.0, 5.0, 6.0, 9.0, 3.0, 4.0, 8.0, 2.0, 9.0, 6.0, 8.0, 8.0, 3.0, 10.0, 8.0, 8.0, 8.0, 10.0, 5.0, 8.0, 8.0, 6.0, 4.0, 9.0, 4.0, 7.0, 4.0, 9.0, 9.0, 5.0, 5.0, 4.0, 9.0, 1.0, 7.0, 8.0, 7.0, 6.0, 4.0, 10.0, 9.0, 2.0, 4.0, 2.0, 2.0, 3.0, 4.0, 6.0, 2.0, 9.0, 8.0, 7.0, 1.0, 4.0, 2.0, 7.0, 9.0, 8.0, 9.0, 3.0, 9.0, 3.0, 10.0, 5.0, 6.0, 7.0, 8.0, 6.0, 7.0, 8.0, 9.0, 6.0, 9.0, 6.0, 6.0, 5.0, 3.0, 7.0, 9.0, 8.0, 9.0, 2.0, 8.0, 9.0, 9.0, 8.0, 6.0, 3.0, 4.0, 6.0, 3.0, 2.0, 4.0, 10.0, 8.0, 7.0, 2.0, 7.0, 4.0, 8.0, 2.0, 10.0, 5.0, 6.0, 5.0, 2.0, 7.0, 5.0, 4.0, 7.0, 10.0, 3.0, 3.0, 5.0, 9.0, 6.0, 10.0, 10.0, 9.0, 9.0, 3.0, 1.0, 5.0, 2.0, 5.0, 6.0, 9.0, 4.0, 4.0, 6.0, 7.0, 6.0, 5.0, 7.0, 8.0, 9.0, 7.0, 9.0, 8.0, 4.0, 3.0, 9.0, 5.0, 5.0, 8.0, 10.0, 3.0, 2.0, 9.0, 1.0, 4.0, 1.0, 2.0, 2.0, 9.0, 8.0, 6.0, 2.0, 5.0, 7.0, 5.0, 9.0, 2.0, 7.0, 2.0, 8.0, 9.0, 3.0, 2.0, 7.0, 7.0, 2.0, 2.0, 7.0, 6.0, 8.0, 2.0, 4.0, 10.0, 4.0, 7.0, 8.0, 3.0, 9.0, 3.0, 5.0, 2.0, 6.0, 7.0, 6.0, 4.0, 8.0, 6.0, 7.0, 3.0, 3.0, 7.0, 5.0, 2.0, 3.0, 6.0, 9.0, 9.0, 9.0, 9.0, 7.0, 2.0, 7.0, 7.0, 8.0, 7.0, 5.0, 7.0, 6.0, 5.0, 5.0, 9.0, 3.0, 9.0, 7.0, 5.0, 8.0, 5.0, 9.0, 5.0, 6.0, 2.0, 8.0, 1.0, 7.0, 2.0, 3.0, 8.0, 7.0, 8.0, 2.0, 7.0, 8.0, 9.0, 9.0, 6.0, 4.0, 4.0, 7.0, 9.0, 9.0, 4.0, 5.0, 1.0, 9.0, 4.0, 3.0, 6.0, 3.0, 4.0, 1.0, 4.0, 8.0, 2.0, 2.0, 10.0, 4.0, 9.0, 8.0, 7.0, 7.0, 9.0, 7.0, 10.0, 8.0, 4.0, 7.0, 4.0, 7.0, 9.0, 2.0, 2.0, 5.0, 6.0, 10.0, 7.0, 10.0, 4.0, 6.0, 6.0, 5.0, 9.0, 8.0, 8.0, 7.0, 9.0, 8.0, 2.0, 6.0, 10.0, 9.0, 8.0, 9.0, 9.0, 5.0, 2.0, 3.0, 8.0, 8.0, 2.0, 10.0, 1.0, 7.0, 9.0, 6.0, 9.0, 5.0, 10.0, 2.0, 10.0, 7.0, 6.0, 7.0, 4.0, 4.0, 10.0, 5.0, 5.0, 7.0, 9.0, 7.0, 4.0, 10.0, 10.0, 5.0, 9.0, 10.0, 4.0, 7.0, 5.0, 5.0, 5.0, 1.0, 9.0, 4.0, 2.0, 4.0, 4.0, 2.0, 2.0, 7.0, 7.0, 3.0, 5.0, 9.0, 2.0, 10.0, 4.0, 5.0, 10.0, 8.0, 3.0, 7.0, 10.0, 9.0, 7.0, 6.0, 5.0, 7.0, 3.0, 8.0, 5.0, 6.0, 4.0, 4.0, 2.0, 4.0, 3.0, 9.0, 1.0, 8.0, 7.0, 7.0, 9.0, 8.0, 2.0, 4.0, 8.0, 7.0, 8.0, 10.0, 8.0, 7.0, 9.0, 9.0, 3.0, 4.0, 10.0, 5.0, 2.0, 5.0, 8.0, 9.0, 6.0, 10.0, 7.0, 3.0, 5.0, 9.0, 8.0, 6.0, 9.0, 9.0, 2.0, 4.0, 8.0, 8.0, 3.0, 9.0, 6.0, 6.0, 7.0, 2.0, 9.0, 2.0, 10.0, 7.0, 7.0, 7.0, 4.0, 9.0, 9.0, 3.0, 8.0, 3.0, 4.0, 1.0, 9.0, 3.0, 8.0, 9.0, 3.0, 3.0, 1.0, 9.0, 9.0, 4.0, 3.0, 7.0, 6.0, 9.0, 7.0, 6.0, 4.0, 9.0, 4.0, 5.0, 2.0, 3.0, 4.0, 9.0, 9.0, 7.0, 7.0, 4.0, 6.0, 8.0, 10.0, 7.0, 8.0, 7.0, 4.0, 2.0, 7.0, 8.0, 9.0, 8.0, 4.0, 8.0, 8.0, 5.0, 7.0, 6.0, 5.0, 4.0, 9.0, 7.0, 8.0, 4.0, 4.0, 6.0, 8.0, 7.0, 9.0, 5.0, 5.0, 7.0, 9.0, 10.0, 2.0, 1.0, 10.0, 2.0, 5.0, 9.0, 1.0, 4.0, 3.0, 9.0, 5.0, 9.0, 9.0, 1.0, 10.0, 9.0, 9.0, 10.0, 1.0, 4.0, 4.0, 3.0, 8.0, 3.0, 4.0, 9.0, 2.0, 6.0, 9.0, 4.0, 6.0, 7.0, 8.0, 4.0, 7.0, 9.0, 10.0, 3.0, 9.0, 2.0, 5.0, 2.0, 3.0, 4.0, 7.0, 5.0, 6.0, 9.0, 6.0, 9.0, 6.0, 8.0, 9.0, 8.0, 5.0, 2.0, 7.0, 3.0, 7.0, 10.0, 5.0, 2.0, 6.0, 9.0, 9.0, 4.0, 3.0, 5.0, 5.0, 6.0, 3.0, 9.0, 10.0, 6.0, 9.0, 3.0, 3.0, 3.0, 5.0, 5.0, 4.0, 7.0, 8.0, 3.0, 7.0, 6.0, 9.0, 6.0, 10.0, 5.0, 7.0, 9.0, 8.0, 5.0, 6.0, 8.0, 4.0, 6.0, 6.0, 9.0, 6.0, 4.0, 8.0, 5.0, 10.0, 2.0, 4.0, 4.0, 7.0, 6.0, 8.0, 6.0, 3.0, 7.0, 7.0, 7.0, 3.0, 4.0, 5.0, 9.0, 4.0, 6.0, 9.0, 1.0, 9.0, 6.0, 5.0, 5.0, 2.0, 2.0, 6.0, 3.0, 9.0, 6.0, 4.0, 8.0, 9.0, 5.0, 3.0, 8.0, 4.0, 8.0, 1.0, 5.0, 9.0, 6.0, 2.0, 3.0, 6.0, 3.0, 9.0, 10.0, 4.0, 7.0, 5.0, 5.0, 8.0, 9.0, 6.0, 4.0, 4.0, 9.0, 9.0, 2.0, 7.0, 1.0, 9.0, 5.0, 9.0, 6.0, 2.0, 2.0, 4.0, 1.0, 8.0, 3.0, 9.0, 8.0, 2.0, 7.0, 5.0, 7.0, 2.0, 5.0, 6.0, 9.0, 7.0, 7.0, 5.0, 4.0, 3.0, 8.0, 6.0, 9.0, 10.0, 4.0, 1.0, 9.0, 3.0, 7.0, 7.0, 3.0, 6.0, 10.0, 9.0, 5.0, 9.0, 3.0, 4.0, 5.0, 4.0, 8.0, 7.0, 9.0, 8.0, 7.0, 5.0, 1.0, 8.0, 6.0, 2.0, 4.0, 8.0, 8.0, 5.0, 3.0, 3.0, 2.0, 7.0, 3.0, 3.0, 9.0, 5.0, 6.0, 9.0, 8.0, 2.0, 9.0, 5.0, 7.0, 7.0, 6.0, 7.0, 5.0, 7.0, 4.0, 9.0, 3.0, 7.0, 8.0, 6.0, 3.0, 6.0, 4.0, 7.0, 4.0, 3.0, 9.0, 10.0, 7.0, 10.0, 5.0, 8.0, 9.0, 2.0, 3.0, 8.0, 8.0, 9.0, 3.0, 9.0, 5.0, 4.0, 6.0, 7.0, 10.0, 5.0, 8.0, 8.0, 7.0, 1.0, 4.0, 4.0, 10.0, 7.0, 2.0, 4.0, 3.0, 4.0, 4.0, 8.0, 8.0, 3.0, 3.0, 6.0, 2.0, 9.0, 4.0, 6.0, 6.0, 1.0, 8.0, 5.0, 8.0, 5.0, 2.0, 4.0, 9.0, 6.0, 6.0, 3.0, 4.0, 3.0, 7.0, 7.0, 9.0, 5.0, 3.0, 5.0, 5.0, 10.0, 2.0, 4.0, 8.0, 8.0, 7.0, 8.0, 10.0, 7.0, 9.0, 8.0, 6.0, 8.0, 9.0, 4.0, 4.0, 3.0, 2.0, 4.0, 4.0, 4.0, 10.0, 7.0, 8.0, 9.0, 6.0, 4.0, 7.0, 2.0, 7.0, 7.0, 6.0, 9.0, 4.0, 7.0, 3.0, 9.0, 6.0, 7.0, 6.0, 3.0, 2.0, 6.0, 8.0, 7.0, 4.0, 5.0, 2.0]
|
import xlrd
import os
import data
class NaturePredict(object):
"""http://www.nature.com/msb/journal/v7/n1/full/msb201126.html"""
def __init__(self, directory=None):
if not directory:
directory = data.source_data_dir('nature-predict')
self.directory = directory
self.indications_path = os.path.join(directory, 'msb201126-s1.xls')
self.disease_mappings_path = os.path.join(directory, 'msb201126-s4.xls')
def read_indications(self):
"""
Read indications excel file and return an omim disease name to
drugbank name dictionary.
"""
wb = xlrd.open_workbook(self.indications_path)
sheet = wb.sheet_by_name(u'Drug indications')
drug_to_diseases, disease_to_drugs = dict(), dict()
# Seven drug names had the first letter of their second word incorrectly capitalized.
# Two drug names appear to have omitted their second word.
# One drug contained the brand instead of generic name.
drug_conversion_dict = {'Arsenic Trioxide': 'Arsenic trioxide',
'Meclofenamic Acid': 'Meclofenamic acid',
'Ipratropium': 'Ipratropium bromide',
'Salicyclic Acid': 'Salicyclic acid',
'Adenosine Monophosphate': 'Adenosine monophosphate',
'Ethacrynic Acid': 'Ethacrynic acid',
'Divalproex Sodium': 'Valproic Acid', # Valporic acid is the generic
'Methyl Aminolevulinate': 'Methyl aminolevulinate',
'Fondaparinux Sodium': 'Fondaparinux sodium',
'Beclomethasone': 'Beclometasone dipropionate'}
for row_num in range(1, sheet.nrows):
drug, disease = sheet.row_values(row_num)
if drug in drug_conversion_dict:
drug = drug_conversion_dict[drug]
drug_to_diseases.setdefault(drug, set()).add(disease)
disease_to_drugs.setdefault(disease, set()).add(drug)
self.drug_to_diseases = drug_to_diseases
self.disease_to_drugs = disease_to_drugs
def read_disease_mappings(self):
wb = xlrd.open_workbook(self.disease_mappings_path)
sheet = wb.sheet_by_name(u'OMIM to UMLS mapping')
column_names = ['omim_id', 'omim_name', 'concept_id', 'concept_name']
rows = list()
for row_num in range(1, sheet.nrows):
row = sheet.row_values(row_num)
row_dict = dict(zip(column_names, row))
if row_dict['omim_name'] == "Neuropathy, Hereditary Sensory And Autonomic, Type I, With Cough And":
row_dict['omim_name'] = "Neuropathy, Hereditary Sensory And Autonomic, Type I, With Cough And Gastroesophageal Reflux"
row_dict['omim_id'] = str(int(row_dict['omim_id']))
rows.append(row_dict)
self.disease_mappings = rows
def omim_drugbank_mapper(self):
self.read_disease_mappings()
self.read_indications()
data.Data().drugbank.read(True, False, False)
name_to_drugbank = data.Data().drugbank.get_name_to_drug()
rows = list()
omim_id_to_name = {disease_mapping['omim_id']: disease_mapping['omim_name']
for disease_mapping in self.disease_mappings}
for omim_id, omim_name in omim_id_to_name.items():
drugs = self.disease_to_drugs[omim_name]
drugbanks = filter(lambda x: x, (name_to_drugbank.get(drug) for drug in drugs))
#print omim_name, len(drugs), len(drugbanks)
for drugbank in drugbanks:
row = {'omim_id': omim_id, 'omim_name': omim_name,
'drugbank_id': drugbank['drugbank_id'], 'drugbank_name': drugbank['name']}
rows.append(row)
return rows
def read(self):
print 'Reading Nature PREDICT indications:'
self.read_indications()
self.read_disease_mappings()
concept_id_to_indicated_drugs = dict()
for disease_mapping in self.disease_mappings:
concept_id = disease_mapping['concept_id']
omim_name = disease_mapping['omim_name']
drugs = self.disease_to_drugs[omim_name]
concept_id_to_indicated_drugs.setdefault(concept_id, set()).update(drugs)
self.concept_id_to_indicated_drugs = concept_id_to_indicated_drugs
print len(concept_id_to_indicated_drugs), 'diseases with indicated drugs.'
print sum(map(len, concept_id_to_indicated_drugs.values())), 'total indications.'
return concept_id_to_indicated_drugs
if __name__ == '__main__':
np = NaturePredict()
#np.read_disease_mappings()
rows = np.omim_drugbank_mapper()
import pprint
#pprint.pprint(np.disease_mappings)
pprint.pprint(rows)
#np.read()
|
# build graph from cleaned (only contains actor id and receiver id of each tx)
# & normalized venmo dataset
import csv
import networkx as nx
import os
import metis
dg = nx.MultiDiGraph()
fp = open("venmo_hybrid_algo_metis_part.csv", "r") # , encoding='utf-8')
csv_file = csv.reader(fp)
for row in csv_file:
dg.add_edge(row[0], row[1])
print(nx.info(dg))
fp.close()
(edgecuts, parts) = metis.part_graph(dg, 3) # num of shards
fp = open("clustered_venmo_metis.txt", "w")
print(parts, file=fp)
fp.close()
|
from .base import *
from .mixins import *
from .decorators import *
@Singleton
class CommandService(AjaxMixin):
"""
This service exists to register the available commands and provide a way of
routing to the correct command class based on the command name received
by the front end code.
"""
# the name of the field at which command handlers should specify their callable name.
command_name_field = 'command_name'
# handlers
handlers = CommandHandlerBase.registry
# used to retrieve a set of all commands and their required parameters
def get_all_definitions(self):
return [command.to_definition() for command in self.handlers.values()]
# used to retrieve a set of commands based on a particular user's permissions
def get_available_definitions(self, request):
return [command.to_definition() for command in self.handlers.values()
if command.validate_auth(request) and command.validate_permissions(request)]
# method to check if a handler exists for the command
def has_handler(self, command_name):
return command_name in self.handlers
# returns the appropriate handler
def get_handler(self, command_name):
return self.handlers[command_name]
# handles the dispatching and execution of a command
def dispatch(self, request):
command_data = request.FILES.copy()
command_data.update(request.POST.copy())
# make sure they actually specified a command in the request
if not 'command' in command_data:
return self.error("No command parameter was received.")
# retrieving the name of the command
command_name = json.loads(command_data.pop('command')[0])
# make sure a valid handler strategy exists.
if not self.has_handler(command_name):
return self.error("No command handler exists for the requested command")
# retrieving the class for the command handler
handler_class = self.get_handler(command_name)
# First, check if the user needs to be authenticated
if not handler_class.validate_auth(request):
return self.error("You must be an authenticated user to perform the requested command.", status=401)
# Next, check will be for the necessary permissions
if not handler_class.validate_permissions(request):
return self.error("Your user does not have the correct permissions for the requested command.", status=403)
# Next, check if required request parameters exist for the command
valid, message = handler_class.validate_param_existence(command_data)
if not valid: return self.error(message)
# Lastly, try to build an object with the right data types and attribute names
valid, result = handler_class.validate_param_types(command_data)
if not valid: return self.error(result)
# creating an object with an attribute for each of the command params since type validation was okay
data = type(command_name, (object,), result)()
'''
Once we get here, everything that can be known outside of the specific business logic
for their request has been validated. It is still possible for the command to not
succeed, but that part must be handled by the command handler itself and cannot be
reasonably determined via the static context.
'''
# nothing more can be done off of the static class definition, so go ahead and instantiate
handler = handler_class(request)
# performing any normalization prior to running custom validators
normalized_data, valid, errors = handler.perform_data_normalization(data)
if not valid: return self.errors(errors)
# performing any last validation based on custom validation methods defined on the handler
valid, result = handler.perform_custom_validation(normalized_data)
if not valid: return self.errors(result)
# pass responsibility off to the actual handle method
return handler.handle(normalized_data) |
class Solution:
def numberOfSteps (self, num: int) -> int:
binary = bin(num)[2:]
ones = binary.count("1")
total = len(binary)
return ones + total - 1 |
import re
from org.zaproxy.zap.extension.script import ScriptVars
''' find posible Server Side Template Injection using Hunt Methodology'''
def scan(ps, msg, src):
# Test the request and/or response here
if ScriptVars.getGlobalVar("hunt_pssti") is None:
ScriptVars.setGlobalVar("hunt_pssti","init")
if (msg and msg.getHistoryRef().getHistoryType()<=2):
# Change to a test which detects the vulnerability
# raiseAlert(risk, int reliability, String name, String description, String uri,
# String param, String attack, String otherInfo, String solution, String evidence,
# int cweId, int wascId, HttpMessage msg)
# risk: 0: info, 1: low, 2: medium, 3: high
# reliability: 0: falsePositive, 1: suspicious, 2: warning
words = ['template','preview','id','view','activity','name','content','redirect']
result = []
uri = msg.getRequestHeader().getURI().toString()
params = msg.getParamNames()
params = [element.lower() for element in params]
base_uri = re.search('https?:\/\/([^/]+)(\/[^?#=]*)',uri)
if base_uri:
base_uri = str( base_uri.group() )
regex = base_uri + str(params)
globalvar = ScriptVars.getGlobalVar("hunt_pssti")
if regex not in globalvar:
ScriptVars.setGlobalVar("hunt_pssti","" + globalvar + ' , ' + regex)
for x in words:
y = re.compile(".*"+x)
if len(filter(y.match, params))>0:
result.append(x)
if result:
ps.raiseAlert(1, 1, 'Possible SSTI', 'HUNT located the ' + ','.join(result) + ' parameter inside of your application traffic. The ' + ','.join(result) + ' parameter is most often susceptible to Server Side Template Injection. HUNT recommends further manual analysis of the parameter in question.',
msg.getRequestHeader().getURI().toString(),
','.join(result), '', msg.getRequestHeader().toString()+'\n'+msg.getRequestBody().toString(), '', '', 0, 0, msg);
|
from Configurables import DaVinci
DaVinci().DataType = '2012'
DaVinci().Simulation = True
DaVinci().TupleFile = 'mc.root'
from Configurables import LHCbApp
LHCbApp().CondDBtag = "sim-20130522-1-vc-md100"
LHCbApp().DDDBtag = "dddb-20130829-1"
|
import numpy as np
class Conv1DT:
def __call__(self, x, weight, bias, stride, padding):
x = x.T
w = weight.transpose(2, 0, 1)
kernel_len, _, outchan = w.shape
temp = np.dot(x, w) # [inlen, kernel, outchan]
temp = temp.reshape(-1, outchan)
temp = np.pad(temp, ((kernel_len//2, kernel_len//2), (0, 0)))
temp = temp.reshape(-1, 2, kernel_len//2, outchan)
output = temp.sum(1).reshape(-1, outchan) + bias[None]
return output[padding:-padding].T
|
import sys
import oisin
filename = "input/alices.txt"
try:
filename = sys.argv[1]
except IndexError:
pass
oisin.balladize(
oisin.load(filename),
meter=oisin.iambic(4, 'aabbccdd'),
step=50,
order=3)
|
# -*- coding: utf-8 -*-
# Copyright 2017 Leo Moll and Dominik Schlösser
#
# -- Imports ------------------------------------------------
import xbmcgui
import xbmcplugin
from resources.lib.film import Film
from resources.lib.settings import Settings
# -- Classes ------------------------------------------------
class FilmUI( Film ):
def __init__( self, plugin, sortmethods = None ):
self.plugin = plugin
self.handle = plugin.addon_handle
self.settings = Settings()
self.sortmethods = sortmethods if sortmethods is not None else [ xbmcplugin.SORT_METHOD_TITLE, xbmcplugin.SORT_METHOD_DATE, xbmcplugin.SORT_METHOD_DURATION, xbmcplugin.SORT_METHOD_SIZE ]
self.showshows = False
self.showchannels = False
def Begin( self, showshows, showchannels ):
self.showshows = showshows
self.showchannels = showchannels
# xbmcplugin.setContent( self.handle, 'tvshows' )
for method in self.sortmethods:
xbmcplugin.addSortMethod( self.handle, method )
def Add( self, alttitle = None, totalItems = None ):
# get the best url
videourl = self.url_video_hd if ( self.url_video_hd != "" and self.settings.preferhd ) else self.url_video if self.url_video != "" else self.url_video_sd
videohds = " (HD)" if ( self.url_video_hd != "" and self.settings.preferhd ) else ""
# exit if no url supplied
if videourl == "":
return
if alttitle is not None:
resultingtitle = alttitle
else:
if self.showshows:
resultingtitle = self.show + ': ' + self.title
else:
resultingtitle = self.title
if self.showchannels:
resultingtitle += ' [' + self.channel + ']'
infoLabels = {
'title' : resultingtitle + videohds,
'sorttitle' : resultingtitle.lower(),
'tvshowtitle' : self.show,
'plot' : self.description
}
if self.size > 0:
infoLabels['size'] = self.size * 1024 * 1024
if self.seconds > 0:
infoLabels['duration'] = self.seconds
if self.aired is not None:
airedstring = '%s' % self.aired
if airedstring[:4] != '1970':
infoLabels['date'] = airedstring[8:10] + '-' + airedstring[5:7] + '-' + airedstring[:4]
infoLabels['aired'] = airedstring
infoLabels['dateadded'] = airedstring
icon = 'special://home/addons/' + self.plugin.addon_id + '/resources/icons/' + self.channel.lower() + '-m.png'
li = xbmcgui.ListItem( resultingtitle )
li.setInfo( type = 'video', infoLabels = infoLabels )
li.setProperty( 'IsPlayable', 'true' )
li.setArt( {
'thumb': icon,
'icon': icon
} )
# create context menu
contextmenu = []
if self.size > 0:
# Download video
contextmenu.append( (
self.plugin.language( 30921 ),
'RunPlugin({})'.format( self.plugin.build_url( { 'mode': "download", 'id': self.id, 'quality': 1 } ) )
) )
if self.url_video_hd:
# Download SD video
contextmenu.append( (
self.plugin.language( 30923 ),
'RunPlugin({})'.format( self.plugin.build_url( { 'mode': "download", 'id': self.id, 'quality': 2 } ) )
) )
if self.url_video_sd:
# Download SD video
contextmenu.append( (
self.plugin.language( 30922 ),
'RunPlugin({})'.format( self.plugin.build_url( { 'mode': "download", 'id': self.id, 'quality': 0 } ) )
) )
# Add to queue
# TODO: Enable later
# contextmenu.append( (
# self.plugin.language( 30924 ),
# 'RunPlugin({})'.format( self.plugin.build_url( { 'mode': "enqueue", 'id': self.id } ) )
# ) )
li.addContextMenuItems( contextmenu )
if totalItems is not None:
xbmcplugin.addDirectoryItem(
handle = self.handle,
url = videourl,
listitem = li,
isFolder = False,
totalItems = totalItems
)
else:
xbmcplugin.addDirectoryItem(
handle = self.handle,
url = videourl,
listitem = li,
isFolder = False
)
def End( self ):
xbmcplugin.endOfDirectory( self.handle, cacheToDisc = False )
|
import logging
log = logging.getLogger(__name__)
from uasyncio import Loop as loop, sleep_ms
from board import act_led
class Blinker:
def __init__(self, mqclient, topic, period):
self.mqclient = mqclient
self.topic = topic
self.period = period
async def blinker(self):
while True:
act_led(1)
await sleep_ms(self.period // 2)
act_led(0)
await sleep_ms(self.period // 2)
def period(self, millisecs):
self.period = millisecs
def on_msg(self, topic, msg, retained, qos, dup):
topic = str(topic, "utf-8")
log.info("on_msg: %s (len=%d ret=%d qos=%d dup=%d)", topic, len(msg), retained, qos, dup)
if topic == self.topic:
try:
p = int(msg)
if p < 50 or p > 10000:
raise ValueError("period must be in 50..10000")
self.period = p
except Exception as e:
log.exc(e, "Invalid incoming message")
async def hook_it_up(self, mqtt):
log.info("hook_it_up called")
mqtt.on_msg(self.on_msg)
await mqtt.client.subscribe(self.topic, qos=1)
log.info("Subscribed to %s", self.topic)
# start is called by the module launcher loop in main.py; it is passed a handle onto the MQTT
# dispatcher and to the "blinky" config dict in board_config.py
def start(mqtt, config):
period = config.get("period", 1000) # get period from config with a default of 1000ms
log.info("start called, period=%d", period)
bl = Blinker(mqtt.client, config["topic"], period)
loop.create_task(bl.blinker())
mqtt.on_init(bl.hook_it_up(mqtt))
|
if __name__ == "__main__":
main = int(input("Que ejercicio deseas realizar(1,2 o 3):"))
if main == 1:
ciudad = str(input("En que ciudad quieres que se produzca la tragedia(NewYork o LosAngeles)?: "))
edificios = []
persona = []
if ciudad == "NewYork":
from Clases.eldia2 import NuevaYork
total = NuevaYork(ciudad, edificios, persona)
print(total.Edificios())
print(total.Persona())
print("Ha habido fuertes consecuencias...:")
del total
#print(total.Edificios()) ya me dice que no esta definido porque he utilizado el destructor
elif ciudad == "LosAngeles":
from Clases.eldia2 import LosAngeles
total1 = LosAngeles(ciudad,edificios,persona)
print(total1.Edificios())
print(total1.Persona())
print("Ha habido fuertes consecuencias...:")
del total1
#print(total1.Edificios()) ya me dice que no esta definido porque he utilizado el destructor
else:
print("Esa ciudad no esta disponible")
pass
elif main == 2:
from Clases.inmortal import *
yin = Yin()
yang = Yang()
yin.yang = yang
print(yang)
print(yang is yin.yang)
del(yang)
print("?")
elif main ==3:
from Clases.herencia import *
casa = Casa([pared_norte, pared_oeste, pared_sur, pared_cortina])
print(casa.superficie_acristalada()) |
import requests
import json
from requests.auth import HTTPBasicAuth
class Elastic(object):
def __init__(self):
self.ELASTIC_SEARCH_URL = "https://c-c9qc3vfnqlo9av21d79a.rw.mdb.yandexcloud.net:9200/"
self.LOGIN = "***"
self.PASSWORD = "***"
self.INDEX_NAME = "gpn_01"
self.headers = {"Content-Type": "application/json"}
def get_results(self, query, size=100):
params = {
"query": {
"query_string": {
"query": query
}
}
}
size_param = str(size)
resp = requests.get(url=self.ELASTIC_SEARCH_URL + "_search?size=" + size_param, data=json.dumps(params),
verify="./lib/ut/root.crt",
auth=HTTPBasicAuth(self.LOGIN, self.PASSWORD), headers=self.headers)
return resp.json()
# params = {
# "query": {
# "query_string": {
# "query": query
# }
# }
# }
# resp = requests.get(url=self.ELASTIC_SEARCH_URL + "_search", data=json.dumps(params), verify="./lib/ut/root.crt",
# auth=HTTPBasicAuth(self.LOGIN, self.PASSWORD), headers=self.headers)
# return resp.json()
def insert_document(self, doc):
resp = requests.post(url=self.ELASTIC_SEARCH_URL + self.INDEX_NAME + "/_doc", data=json.dumps(doc), verify="./lib/ut/root.crt",
auth=HTTPBasicAuth(self.LOGIN, self.PASSWORD), headers=self.headers)
print(resp.text)
def delete_index(self):
resp = requests.delete(url=self.ELASTIC_SEARCH_URL + self.INDEX_NAME, verify="./lib/ut/root.crt",
auth=HTTPBasicAuth(self.LOGIN, self.PASSWORD),
headers=self.headers)
print(resp)
def create_index(self):
index = {
"settings": {
"number_of_shards": 1
},
"mappings": {
"properties": {
"name": {"type": "text"},
"address": {"type": "text"},
"emails": {"type": "text"},
"phones": {"type": "text"},
"url": {"type": "text"},
"description": {"type": "text"},
"additional_info": {"type": "text"},
"categories": {"type": "text"},
"update_date_time": {"type": "date"}
}
}
}
resp = requests.put(url=self.ELASTIC_SEARCH_URL + self.INDEX_NAME, verify="./root.crt", data=json.dumps(index),
auth=HTTPBasicAuth(self.LOGIN, self.PASSWORD),
headers=self.headers)
print(resp)
# delete_index()
# create_index()
# if __name__ == '__main__':
# elastic = Elastic()
# print(elastic.get_results('Газ'))
# insert_document(doc)
|
import os
import os.path
import sys
restools_root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(restools_root_path, 'thequickmath'))
sys.path.append(os.path.join(restools_root_path, 'pycomsdk'))
sys.path.append(os.path.join(restools_root_path, 'reducedmodels'))
|
"""
Tools to automate browsing (requires Firefox)
"""
try:
from urllib.parse import quote_plus # Python 3
except ImportError:
from urllib import quote_plus # Python 2
import os
import traceback
from sys import platform as _platform
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from orion.classes.api import Api
from orion import settings, log
GOOGLE_URL = 'https://www.google.com/search?gs_ivs=1&q='
OS_KEY = Keys.CONTROL
if _platform == "darwin":
OS_KEY = Keys.COMMAND # use CMD key for Mac
NW_TAB = OS_KEY+'n' # new tab
CL_TAB = OS_KEY+'w' # close tab
SW_TAB = OS_KEY+Keys.TAB # switch tab
class VoiceBrowseApi(Api):
def __init__(self):
self.key = 'voice_browse_api'
self.driver = None
def open(self, url=None, new_tab=False):
if not self.driver:
try:
# print(settings.CHROME_DRIVER)
# print(os.path.isfile(settings.CHROME_DRIVER))
if not os.path.isfile(settings.CHROME_DRIVER):
raise Exception
self.driver = webdriver.Chrome(settings.CHROME_DRIVER)
except:
print(traceback.format_exc())
self.driver = webdriver.Firefox()
else:
if new_tab:
log.info('Opening new tab...')
self.driver.find_element_by_tag_name('body').send_keys(NW_TAB)
if url:
if not url[0:4] == 'http':
url = 'https://'+url.replace(' ', '')
self.driver.get(url)
def close(self):
if self.driver:
self.driver.quit()
self.driver = None
def close_tab(self):
if self.driver:
self.driver.find_element_by_tag_name('body').send_keys(CL_TAB)
try:
self.driver.current_url()
except:
self.driver = None
log.debug('Browser was closed.')
def switch_tab(self):
if self.driver:
self.driver.find_element_by_tag_name('body').send_keys(SW_TAB)
def maximize(self):
if self.driver:
self.driver.maximize_window()
def search(self, q):
log.info('Answering with Google...')
self.open(GOOGLE_URL+quote_plus(q), new_tab=False)
def clear(self):
if self.driver:
self.driver.switch_to_active_element().clear()
def type(self, text):
if self.driver:
self.driver.switch_to_active_element().send_keys(text+Keys.RETURN)
def click(self):
if self.driver:
self.driver.switch_to_active_element().click()
|
from ..utils import Object
class UpdateBasicGroupFullInfo(Object):
"""
Some data from basicGroupFullInfo has been changed
Attributes:
ID (:obj:`str`): ``UpdateBasicGroupFullInfo``
Args:
basic_group_id (:obj:`int`):
Identifier of a basic group
basic_group_full_info (:class:`telegram.api.types.basicGroupFullInfo`):
New full information about the group
Returns:
Update
Raises:
:class:`telegram.Error`
"""
ID = "updateBasicGroupFullInfo"
def __init__(self, basic_group_id, basic_group_full_info, **kwargs):
self.basic_group_id = basic_group_id # int
self.basic_group_full_info = basic_group_full_info # BasicGroupFullInfo
@staticmethod
def read(q: dict, *args) -> "UpdateBasicGroupFullInfo":
basic_group_id = q.get('basic_group_id')
basic_group_full_info = Object.read(q.get('basic_group_full_info'))
return UpdateBasicGroupFullInfo(basic_group_id, basic_group_full_info)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created: 2018
Author: A. P. Naik
Description: Code to fit galactic disc component for SPARC galaxies, and store
parameters in text file
"""
import spam
from scipy.optimize import curve_fit
import numpy as np
from scipy.constants import G
from scipy.special import i0, i1, k0, k1
from scipy.constants import parsec as pc
Msun = 1.989e+30
kpc = 1e+3*pc
def v_disc_anal(R, sigma_0, R_d):
"""
Analytic expression for rotation speed of exponential disc, from Binney and
Tremaine.
sigma_0 in kg/m^2, R_d and R in m. Returns v in km/s
"""
const = 4*np.pi*G*sigma_0*R_d
y = R/(2*R_d)
bessel_term = i0(y)*k0(y) - i1(y)*k1(y)
v = 1e-3*np.sqrt(np.abs(const * (y**2) * bessel_term))
return v
# text file to store disc parameters
fitfile = open("SPARCData/stellar_disc_parameters.txt", 'w')
# loop over galaxies
for name in spam.data.names_full:
galaxy = spam.data.SPARCGalaxy(name)
R_d = galaxy.disc_scale # metres
sigma_0 = Msun*galaxy.disc_SB # kg/m^2
# fit
bounds = ((0.1*sigma_0, 0.1*R_d), (10*sigma_0, 10*R_d))
popt, pcov = curve_fit(v_disc_anal, galaxy.R*kpc, galaxy.v_disc,
p0=(0.5*sigma_0, R_d), bounds=bounds)
fitfile.write(galaxy.name+'\t'+str(popt[0])+'\t'+str(popt[1])+'\n')
fitfile.close()
|
from .base_backend import BaseBackend
class MlpBackend(BaseBackend):
def __init__(self, inpmulti, hidmulti, outmulti, learning_rate, inp96, hid96, out96, path, buffsize, mean, std, statspath):
from neupre.misc.builders import build_model_mlp
super(MlpBackend, self).__init__(int(buffsize))
self.model_multistep = build_model_mlp(inpmulti, hidmulti, outmulti)
self.model_onestep96 = build_model_mlp(inp96, hid96, out96)
self.initialize(False, path, mean, std, statspath)
def train(self):
log2 = self.model_multistep.fit(self.X_train_multistep, self.y_train_multistep, batch_size=10, nb_epoch=2,
validation_split=0.1, verbose=1)
log3 = self.model_onestep96.fit(self.X_train_onestep96, self.y_train_onestep96, batch_size=10, nb_epoch=2,
validation_split=0.1, verbose=1)
return [log2, log3]
def predict(self, X_test_multistep, X_test_onestep96):
p2 = self.model_multistep.predict(X_test_multistep)
p3 = self.model_onestep96.predict(X_test_onestep96)
return [p2, p3]
|
from .handler import Handler
class NameHandler(Handler):
def handle(self, *args):
print("NH" + str(args))
def name(self):
return "name"
|
from lucas_kanade.corridor import corridor_interpolation
from lucas_kanade.sphere import sphere_interpolation
# Corridor dataset interpolation
corridor_interpolation(N=5)
# Sphere dataset interpolation
sphere_interpolation(N=5)
|
"""
Tests for the `csvvalidator` module.
"""
import logging
import math
from csvvalidator import CSVValidator, VALUE_CHECK_FAILED, MESSAGES,\
HEADER_CHECK_FAILED, RECORD_LENGTH_CHECK_FAILED, enumeration, match_pattern,\
search_pattern, number_range_inclusive, number_range_exclusive,\
VALUE_PREDICATE_FALSE, RECORD_PREDICATE_FALSE, UNIQUE_CHECK_FAILED,\
ASSERT_CHECK_FAILED, UNEXPECTED_EXCEPTION, write_problems, datetime_string,\
RECORD_CHECK_FAILED, datetime_range_inclusive, datetime_range_exclusive,\
RecordError
# logging setup
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(funcName)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
debug, info, warning, error = logger.debug, logger.info, logger.warning, logger.error
def test_value_checks():
"""Some very simple tests of value checks."""
# a simple validator to be tested
field_names=('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_value_check('foo', int)
validator.add_value_check('bar', float)
# some test data
data = (
('foo', 'bar'), # row 1 - header row
('12', '3.4'), # row 2 - valid
('1.2', '3.4'), # row 3 - foo invalid
('abc', '3.4'), # row 4 - foo invalid
('12', 'abc'), # row 5 - bar invalid
('', '3.4'), # row 6 - foo invalid (empty)
('12', ''), # row 7 - bar invalid (empty)
('abc', 'def') # row 8 - both invalid
)
# run the validator on the test data
problems = validator.validate(data)
assert len(problems) == 7
# N.B., expect row and column indices start from 1
problems_row2 = [p for p in problems if p['row'] == 2]
assert len(problems_row2) == 0 # should be valid
problems_row3 = [p for p in problems if p['row'] == 3]
assert len(problems_row3) == 1
p = problems_row3[0] # convenience variable
assert p['column'] == 1 # report column index
assert p['field'] == 'foo' # report field name
assert p['code'] == VALUE_CHECK_FAILED # default problem code for value checks
assert p['message'] == MESSAGES[VALUE_CHECK_FAILED] # default message
assert p['value'] == '1.2' # report bad value
assert p['record'] == ('1.2', '3.4') # report record
problems_row4 = [p for p in problems if p['row'] == 4]
assert len(problems_row4) == 1
p = problems_row4[0] # convenience variable
assert p['column'] == 1
assert p['field'] == 'foo'
assert p['code'] == VALUE_CHECK_FAILED
assert p['message'] == MESSAGES[VALUE_CHECK_FAILED]
assert p['value'] == 'abc'
assert p['record'] == ('abc', '3.4')
problems_row5 = [p for p in problems if p['row'] == 5]
assert len(problems_row5) == 1
p = problems_row5[0] # convenience variable
assert p['column'] == 2
assert p['field'] == 'bar'
assert p['code'] == VALUE_CHECK_FAILED
assert p['message'] == MESSAGES[VALUE_CHECK_FAILED]
assert p['value'] == 'abc'
assert p['record'] == ('12', 'abc')
problems_row6 = [p for p in problems if p['row'] == 6]
assert len(problems_row6) == 1
p = problems_row6[0] # convenience variable
assert p['column'] == 1
assert p['field'] == 'foo'
assert p['code'] == VALUE_CHECK_FAILED
assert p['message'] == MESSAGES[VALUE_CHECK_FAILED]
assert p['value'] == ''
assert p['record'] == ('', '3.4')
problems_row7 = [p for p in problems if p['row'] == 7]
assert len(problems_row7) == 1
p = problems_row7[0] # convenience variable
assert p['column'] == 2
assert p['field'] == 'bar'
assert p['code'] == VALUE_CHECK_FAILED
assert p['message'] == MESSAGES[VALUE_CHECK_FAILED]
assert p['value'] == ''
assert p['record'] == ('12', '')
problems_row8 = [p for p in problems if p['row'] == 8]
assert len(problems_row8) == 2 # expect both problems are found
p0 = problems_row8[0] # convenience variable
assert p0['column'] == 1
assert p0['field'] == 'foo'
assert p0['code'] == VALUE_CHECK_FAILED
assert p0['message'] == MESSAGES[VALUE_CHECK_FAILED]
assert p0['value'] == 'abc'
assert p0['record'] == ('abc', 'def')
p1 = problems_row8[1] # convenience variable
assert p1['column'] == 2
assert p1['field'] == 'bar'
assert p1['code'] == VALUE_CHECK_FAILED
assert p1['message'] == MESSAGES[VALUE_CHECK_FAILED]
assert p1['value'] == 'def'
assert p1['record'] == ('abc', 'def')
def test_header_check():
"""Test the header checks work."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_header_check() # use default code and message
validator.add_header_check(code='X1', message='custom message') # provide custom code and message
data = (
('foo', 'baz'),
('123', '456')
)
problems = validator.validate(data)
assert len(problems) == 2
p0 = problems[0]
assert p0['code'] == HEADER_CHECK_FAILED
assert p0['message'] == MESSAGES[HEADER_CHECK_FAILED]
assert p0['record'] == ('foo', 'baz')
assert p0['missing'] == set(['bar'])
assert p0['unexpected'] == set(['baz'])
assert p0['row'] == 1
p1 = problems[1]
assert p1['code'] == 'X1'
assert p1['message'] == 'custom message'
assert p1['missing'] == set(['bar'])
assert p1['unexpected'] == set(['baz'])
assert p1['record'] == ('foo', 'baz')
assert p1['row'] == 1
def test_ignore_lines():
"""Test instructions to ignore lines works."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_header_check()
validator.add_value_check('foo', int)
validator.add_value_check('bar', float)
data = (
('ignore', 'me', 'please'),
('ignore', 'me', 'too', 'please'),
('foo', 'baz'),
('1.2', 'abc')
)
problems = validator.validate(data, ignore_lines=2)
assert len(problems) == 3
header_problems = [p for p in problems if p['code'] == HEADER_CHECK_FAILED]
assert len(header_problems) == 1
assert header_problems[0]['row'] == 3
value_problems = [p for p in problems if p['code'] == VALUE_CHECK_FAILED]
assert len(value_problems) == 2
for p in value_problems:
assert p['row'] == 4
def test_record_length_checks():
"""Test the record length checks."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_record_length_check() # test default code and message
validator.add_record_length_check('X2', 'custom message')
data = (
('foo', 'bar'),
('12', '3.4'),
('12',), # be careful with syntax for singleton tuples
('12', '3.4', 'spong')
)
problems = validator.validate(data)
assert len(problems) == 4, len(problems)
# find problems reported under default code
default_problems = [p for p in problems if p['code'] == RECORD_LENGTH_CHECK_FAILED]
assert len(default_problems) == 2
d0 = default_problems[0]
assert d0['message'] == MESSAGES[RECORD_LENGTH_CHECK_FAILED]
assert d0['row'] == 3
assert d0['record'] == ('12',)
assert d0['length'] == 1
d1 = default_problems[1]
assert d1['message'] == MESSAGES[RECORD_LENGTH_CHECK_FAILED]
assert d1['row'] == 4
assert d1['record'] == ('12', '3.4', 'spong')
assert d1['length'] == 3
# find problems reported under custom code
custom_problems = [p for p in problems if p['code'] == 'X2']
assert len(custom_problems) == 2
c0 = custom_problems[0]
assert c0['message'] == 'custom message'
assert c0['row'] == 3
assert c0['record'] == ('12',)
assert c0['length'] == 1
c1 = custom_problems[1]
assert c1['message'] == 'custom message'
assert c1['row'] == 4
assert c1['record'] == ('12', '3.4', 'spong')
assert c1['length'] == 3
def test_value_checks_with_missing_values():
"""
Establish expected behaviour for value checks where there are missing values
in the records.
"""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_value_check('bar', float)
data = (
('foo', 'bar'),
('12',) # this is missing value for bar, what happens to value check?
)
problems = validator.validate(data)
# missing values are ignored - use record length checks to find these
assert len(problems) == 0
def test_value_check_enumeration():
"""Test value checks with the enumeration() function."""
field_names = ('foo', 'bar', 'baz')
validator = CSVValidator(field_names)
# define an enumeration directly with arguments
validator.add_value_check('bar', enumeration('M', 'F'))
# define an enumeration by passing in a list or tuple
flavours = ('chocolate', 'vanilla', 'strawberry')
validator.add_value_check('baz', enumeration(flavours))
data = (
('foo', 'bar', 'baz'),
('1', 'M', 'chocolate'),
('2', 'F', 'maple pecan'),
('3', 'X', 'strawberry')
)
problems = validator.validate(data)
assert len(problems) == 2
p0 = problems[0]
assert p0['code'] == VALUE_CHECK_FAILED
assert p0['row'] == 3
assert p0['column'] == 3
assert p0['field'] == 'baz'
assert p0['value'] == 'maple pecan'
assert p0['record'] == ('2', 'F', 'maple pecan')
p1 = problems[1]
assert p1['code'] == VALUE_CHECK_FAILED
assert p1['row'] == 4
assert p1['column'] == 2
assert p1['field'] == 'bar'
assert p1['value'] == 'X'
assert p1['record'] == ('3', 'X', 'strawberry')
def test_value_check_match_pattern():
"""Test value checks with the match_pattern() function."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_value_check('bar', match_pattern('\d{4}-\d{2}-\d{2}'))
data = (
('foo', 'bar'),
('1', '1999-01-01'),
('2', 'abcd-ef-gh'),
('3', 'a1999-01-01'),
('4', '1999-01-01a') # this is valid - pattern attempts to match at beginning of line
)
problems = validator.validate(data)
assert len(problems) == 2, len(problems)
for p in problems:
assert p['code'] == VALUE_CHECK_FAILED
assert problems[0]['row'] == 3
assert problems[1]['row'] == 4
def test_value_check_search_pattern():
"""Test value checks with the search_pattern() function."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_value_check('bar', search_pattern('\d{4}-\d{2}-\d{2}'))
data = (
('foo', 'bar'),
('1', '1999-01-01'),
('2', 'abcd-ef-gh'),
('3', 'a1999-01-01'), # this is valid - pattern attempts to match anywhere in line
('4', '1999-01-01a') # this is valid - pattern attempts to match anywhere in line
)
problems = validator.validate(data)
assert len(problems) == 1, len(problems)
assert problems[0]['code'] == VALUE_CHECK_FAILED
assert problems[0]['row'] == 3
def test_value_check_numeric_ranges():
"""Test value checks with numerical range functions."""
field_names = ('foo', 'bar', 'baz', 'quux')
validator = CSVValidator(field_names)
validator.add_value_check('foo', number_range_inclusive(2, 6, int))
validator.add_value_check('bar', number_range_exclusive(2, 6, int))
validator.add_value_check('baz', number_range_inclusive(2.0, 6.3, float))
validator.add_value_check('quux', number_range_exclusive(2.0, 6.3, float))
data = (
('foo', 'bar', 'baz', 'quux'),
('2', '3', '2.0', '2.1'), # valid
('1', '3', '2.0', '2.1'), # foo invalid
('2', '2', '2.0', '2.1'), # bar invalid
('2', '3', '1.9', '2.1'), # baz invalid
('2', '3', '2.0', '2.0') # quux invalid
)
problems = validator.validate(data)
assert len(problems) == 4, len(problems)
for p in problems:
assert p['code'] == VALUE_CHECK_FAILED
assert problems[0]['row'] == 3 and problems[0]['field'] == 'foo'
assert problems[1]['row'] == 4 and problems[1]['field'] == 'bar'
assert problems[2]['row'] == 5 and problems[2]['field'] == 'baz'
assert problems[3]['row'] == 6 and problems[3]['field'] == 'quux'
def test_value_checks_datetime():
"""Test value checks with datetimes."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_value_check('bar', datetime_string('%Y-%m-%d'))
data = (
('foo', 'bar'),
('A', '1999-09-09'), # valid
('B', '1999-13-09'), # invalid month
('C', '1999-09-32'), # invalid day
('D', '1999-09-09ss') # invalid string
)
problems = validator.validate(data)
assert len(problems) == 3, problems
for p in problems:
assert p['code'] == VALUE_CHECK_FAILED
assert problems[0]['row'] == 3 and problems[0]['field'] == 'bar'
assert problems[1]['row'] == 4 and problems[1]['field'] == 'bar'
assert problems[2]['row'] == 5 and problems[2]['field'] == 'bar'
def test_value_checks_datetime_range():
"""Test value checks with datetime ranges."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_value_check('bar', datetime_range_inclusive('1999-09-09',
'2009-09-09',
'%Y-%m-%d'))
validator.add_value_check('bar', datetime_range_exclusive('1999-09-09',
'2009-09-09',
'%Y-%m-%d'))
data = (
('foo', 'bar'),
('A', '1999-09-10'), # valid
('B', '1999-09-09'), # invalid (exclusive)
('C', '2009-09-09'), # invalid (exclusive)
('D', '1999-09-08'), # invalid (both)
('E', '2009-09-10') # invalid (both)
)
problems = validator.validate(data)
assert len(problems) == 6, len(problems)
assert len([p for p in problems if p['row'] == 3]) == 1
assert len([p for p in problems if p['row'] == 4]) == 1
assert len([p for p in problems if p['row'] == 5]) == 2
assert len([p for p in problems if p['row'] == 6]) == 2
def test_value_predicates():
"""Test the use of value predicates."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
foo_predicate = lambda v: math.pow(float(v), 2) < 64
validator.add_value_predicate('foo', foo_predicate)
bar_predicate = lambda v: math.sqrt(float(v)) > 8
validator.add_value_predicate('bar', bar_predicate, 'X3', 'custom message')
data = (
('foo', 'bar'),
('4', '81'), # valid
('9', '81'), # foo invalid
('4', '49') # bar invalid
)
problems = validator.validate(data)
assert len(problems) == 2, len(problems)
p0 = problems[0]
assert p0['code'] == VALUE_PREDICATE_FALSE
assert p0['message'] == MESSAGES[VALUE_PREDICATE_FALSE]
assert p0['row'] == 3
assert p0['column'] == 1
assert p0['field'] == 'foo'
assert p0['value'] == '9'
assert p0['record'] == ('9', '81')
p1 = problems[1]
assert p1['code'] == 'X3'
assert p1['message'] == 'custom message'
assert p1['row'] == 4
assert p1['column'] == 2
assert p1['field'] == 'bar'
assert p1['value'] == '49'
assert p1['record'] == ('4', '49')
def test_record_checks():
"""Test the use of record checks."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
def foo_gt_bar(r):
foo = int(r['foo'])
bar = int(r['bar'])
if foo < bar:
raise RecordError
validator.add_record_check(foo_gt_bar) # use default code and message
def foo_gt_2bar(r):
foo = int(r['foo'])
bar = int(r['bar'])
if foo < 2 * bar:
raise RecordError('X4', 'custom message')
validator.add_record_check(foo_gt_2bar)
data = (
('foo', 'bar'),
('7', '3'), # valid
('5', '3'), # invalid - not foo_gt_2bar
('1', '3') # invalid - both predicates false
)
problems = validator.validate(data)
n = len(problems)
assert n == 3, n
row3_problems = [p for p in problems if p['row'] == 3]
assert len(row3_problems) == 1
p = row3_problems[0]
assert p['code'] == 'X4'
assert p['message'] == 'custom message'
assert p['record'] == ('5', '3')
row4_problems = [p for p in problems if p['row'] == 4]
assert len(row4_problems) == 2
row4_problems_default = [p for p in row4_problems if p['code'] == RECORD_CHECK_FAILED]
assert len(row4_problems_default) == 1
p = row4_problems_default[0]
assert p['message'] == MESSAGES[RECORD_CHECK_FAILED]
assert p['record'] == ('1', '3')
row4_problems_custom = [p for p in row4_problems if p['code'] == 'X4']
assert len(row4_problems_custom) == 1
p = row4_problems_custom[0]
assert p['message'] == 'custom message'
assert p['record'] == ('1', '3')
def test_record_predicates():
"""Test the use of record predicates."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
def foo_gt_bar(r):
return int(r['foo']) > int(r['bar']) # expect record will be a dictionary
validator.add_record_predicate(foo_gt_bar) # use default code and message
def foo_gt_2bar(r):
return int(r['foo']) > 2 * int(r['bar'])
validator.add_record_predicate(foo_gt_2bar, 'X4', 'custom message')
data = (
('foo', 'bar'),
('7', '3'), # valid
('5', '3'), # invalid - not foo_gt_2bar
('1', '3') # invalid - both predicates false
)
problems = validator.validate(data)
n = len(problems)
assert n == 3, n
row3_problems = [p for p in problems if p['row'] == 3]
assert len(row3_problems) == 1
p = row3_problems[0]
assert p['code'] == 'X4'
assert p['message'] == 'custom message'
assert p['record'] == ('5', '3')
row4_problems = [p for p in problems if p['row'] == 4]
assert len(row4_problems) == 2
row4_problems_default = [p for p in row4_problems if p['code'] == RECORD_PREDICATE_FALSE]
assert len(row4_problems_default) == 1
p = row4_problems_default[0]
assert p['message'] == MESSAGES[RECORD_PREDICATE_FALSE]
assert p['record'] == ('1', '3')
row4_problems_custom = [p for p in row4_problems if p['code'] == 'X4']
assert len(row4_problems_custom) == 1
p = row4_problems_custom[0]
assert p['message'] == 'custom message'
assert p['record'] == ('1', '3')
def test_unique_checks():
"""Test the uniqueness checks."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_unique_check('foo')
data = (
('foo', 'bar'),
('1', 'A'),
('2', 'B'),
('1', 'C')
)
problems = validator.validate(data)
n = len(problems)
assert n == 1, n
p = problems[0]
assert p['code'] == UNIQUE_CHECK_FAILED
assert p['message'] == MESSAGES[UNIQUE_CHECK_FAILED]
assert p['row'] == 4
assert p['key'] == 'foo'
assert p['value'] == '1'
assert p['record'] == ('1', 'C')
def test_unique_checks_with_variable_record_lengths():
"""Test the uniqueness checks still work when record lengths vary."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_unique_check('bar')
data = (
('foo', 'bar'),
('1', 'A'),
('2'),
('3', 'A')
)
problems = validator.validate(data)
n = len(problems)
assert n == 1, n
p = problems[0]
assert p['code'] == UNIQUE_CHECK_FAILED
assert p['message'] == MESSAGES[UNIQUE_CHECK_FAILED]
assert p['row'] == 4
assert p['key'] == 'bar'
assert p['value'] == 'A'
assert p['record'] == ('3', 'A')
def test_compound_unique_checks():
"""Test the uniqueness checks on compound keys."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_unique_check(('foo', 'bar'), 'X5', 'custom message')
data = (
('foo', 'bar'),
('1', 'A'),
('2', 'B'),
('1', 'B'),
('2', 'A'),
('1', 'A')
)
problems = validator.validate(data)
n = len(problems)
assert n == 1, n
p = problems[0]
assert p['code'] == 'X5'
assert p['message'] == 'custom message'
assert p['row'] == 6
assert p['key'] == ('foo', 'bar')
assert p['value'] == ('1', 'A')
assert p['record'] == ('1', 'A')
def test_compound_unique_checks_with_variable_record_lengths():
"""Test the uniqueness checks on compound keys when record lengths vary."""
field_names = ('something', 'foo', 'bar')
validator = CSVValidator(field_names)
validator.add_unique_check(('foo', 'bar'), 'X5', 'custom message')
data = (
('something', 'foo', 'bar'),
('Z', '1', 'A'),
('Z', '2', 'B'),
('Z'),
('Z', '2', 'A'),
('Z', '1', 'A')
)
problems = validator.validate(data)
print problems
n = len(problems)
assert n == 1, n
p = problems[0]
assert p['code'] == 'X5'
assert p['message'] == 'custom message'
assert p['row'] == 6
assert p['key'] == ('foo', 'bar')
assert p['value'] == ('1', 'A')
assert p['record'] == ('Z', '1', 'A')
def test_assert_methods():
"""Test use of 'assert' methods."""
# define a custom validator class
class MyValidator(CSVValidator):
def __init__(self, threshold):
field_names = ('foo', 'bar')
super(MyValidator, self).__init__(field_names)
self._threshold = threshold
def assert_foo_plus_bar_gt_threshold(self, r):
assert int(r['foo']) + int(r['bar']) > self._threshold # use default error code and message
def assert_foo_times_bar_gt_threshold(self, r):
assert int(r['foo']) * int(r['bar']) > self._threshold, ('X6', 'custom message')
validator = MyValidator(42)
data = (
('foo', 'bar'),
('33', '10'), # valid
('7', '8'), # invalid (foo + bar less than threshold)
('3', '4'), # invalid (both)
)
problems = validator.validate(data)
n = len(problems)
assert n == 3, n
row3_problems = [p for p in problems if p['row'] == 3]
assert len(row3_problems) == 1
p = row3_problems[0]
assert p['code'] == ASSERT_CHECK_FAILED
assert p['message'] == MESSAGES[ASSERT_CHECK_FAILED]
assert p['record'] == ('7', '8')
row4_problems = [p for p in problems if p['row'] == 4]
assert len(row4_problems) == 2
row4_problems_custom = [p for p in row4_problems if p['code'] == 'X6']
assert len(row4_problems_custom) == 1, row4_problems
p = row4_problems_custom[0]
assert p['message'] == 'custom message'
assert p['record'] == ('3', '4')
row4_problems_default = [p for p in row4_problems if p['code'] == ASSERT_CHECK_FAILED]
assert len(row4_problems_default) == 1
p = row4_problems_default[0]
assert p['message'] == MESSAGES[ASSERT_CHECK_FAILED]
assert p['record'] == ('3', '4')
def test_check_methods():
"""Test use of 'check' methods."""
# define a custom validator class
class MyValidator(CSVValidator):
def __init__(self, threshold):
field_names = ('foo', 'bar')
super(MyValidator, self).__init__(field_names)
self._threshold = threshold
def check_foo_plus_bar_gt_threshold(self, r):
if int(r['foo']) + int(r['bar']) <= self._threshold:
raise RecordError # use default error code and message
def check_foo_times_bar_gt_threshold(self, r):
if int(r['foo']) * int(r['bar']) <= self._threshold:
raise RecordError('X6', 'custom message')
validator = MyValidator(42)
data = (
('foo', 'bar'),
('33', '10'), # valid
('7', '8'), # invalid (foo + bar less than threshold)
('3', '4'), # invalid (both)
)
problems = validator.validate(data)
n = len(problems)
assert n == 3, n
row3_problems = [p for p in problems if p['row'] == 3]
assert len(row3_problems) == 1
p = row3_problems[0]
assert p['code'] == RECORD_CHECK_FAILED
assert p['message'] == MESSAGES[RECORD_CHECK_FAILED]
assert p['record'] == ('7', '8')
row4_problems = [p for p in problems if p['row'] == 4]
assert len(row4_problems) == 2
row4_problems_custom = [p for p in row4_problems if p['code'] == 'X6']
assert len(row4_problems_custom) == 1
p = row4_problems_custom[0]
assert p['message'] == 'custom message'
assert p['record'] == ('3', '4')
row4_problems_default = [p for p in row4_problems if p['code'] == RECORD_CHECK_FAILED]
assert len(row4_problems_default) == 1
p = row4_problems_default[0]
assert p['message'] == MESSAGES[RECORD_CHECK_FAILED]
assert p['record'] == ('3', '4')
def test_each_and_finally_assert_methods():
"""Test 'each' and 'finally_assert' methods."""
# define a custom validator class
class MyValidator(CSVValidator):
def __init__(self, threshold):
field_names = ('foo', 'bar')
super(MyValidator, self).__init__(field_names)
self._threshold = threshold
self._bars = []
self._count = 0
def each_store_bar(self, r):
n = float(r['bar'])
self._bars.append(n)
self._count += 1
def finally_assert_mean_bar_gt_threshold(self):
mean = sum(self._bars) / self._count
assert mean > self._threshold, ('X7', 'custom message')
data = [
['foo', 'bar'],
['A', '2'],
['B', '3'],
['C', '7']
]
validator = MyValidator(5.0)
problems = validator.validate(data)
assert len(problems) == 1
p = problems[0]
assert p['code'] == 'X7'
assert p['message'] == 'custom message'
data.append(['D', '10'])
validator = MyValidator(5.0)
problems = validator.validate(data)
assert len(problems) == 0
def test_exception_handling():
"""Establish expectations for exception handling."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_value_check('foo', int)
def buggy_value_check(v):
"""I am a buggy value check."""
raise Exception('something went wrong')
validator.add_value_check('bar', buggy_value_check)
def buggy_value_predicate(v):
"""I am a buggy value predicate."""
raise Exception('something went wrong')
validator.add_value_predicate('bar', buggy_value_predicate)
def buggy_record_check(r):
"""I am a buggy record check."""
raise Exception('something went wrong')
validator.add_record_check(buggy_record_check)
def buggy_record_predicate(r):
"""I am a buggy record predicate."""
raise Exception('something went wrong')
validator.add_record_predicate(buggy_record_predicate)
def buggy_assert(r):
"""I am a buggy assert."""
raise Exception('something went wrong')
validator.assert_something_buggy = buggy_assert
def buggy_check(r):
"""I am a buggy check."""
raise Exception('something went wrong')
validator.check_something_buggy = buggy_check
def buggy_each(r):
"""I am a buggy each."""
raise Exception('something went wrong')
validator.each_something_buggy = buggy_each
def buggy_finally_assert():
"""I am a buggy finally assert."""
raise Exception('something went wrong')
validator.finally_assert_something_buggy = buggy_finally_assert
def buggy_skip(record):
"""I am a buggy skip."""
raise Exception('something went wrong')
validator.add_skip(buggy_skip)
data = (
('foo', 'bar'),
('ab', '56')
)
problems = validator.validate(data, report_unexpected_exceptions=False)
n = len(problems)
assert n == 1, n
p = problems[0]
assert p['row'] == 2
problems = validator.validate(data) # by default, exceptions are reported as problems
n = len(problems)
assert n == 10, n
unexpected_problems = [p for p in problems if p['code'] == UNEXPECTED_EXCEPTION]
assert len(unexpected_problems) == 9
for p in unexpected_problems:
e = p['exception']
assert e.args[0] == 'something went wrong', e.args
def test_summarize():
"""Test use of summarize option."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
def foo_gt_bar(r):
return int(r['foo']) > int(r['bar'])
validator.add_record_predicate(foo_gt_bar)
data = (
('foo', 'bar'),
('7', '3'), # valid
('1', '3') # invalid
)
problems = validator.validate(data, summarize=True)
n = len(problems)
assert n == 1, n
p = problems[0]
assert p['code'] == RECORD_PREDICATE_FALSE
for k in ('message', 'row', 'record'):
assert k not in p
def test_limit():
"""Test the use of the limit option."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
def foo_gt_bar(r):
return int(r['foo']) > int(r['bar'])
validator.add_record_predicate(foo_gt_bar)
data = (
('foo', 'bar'),
('7', '3'), # valid
('1', '3'), # invalid
('2', '3') # invalid
)
problems = validator.validate(data, limit=1)
n = len(problems)
assert n == 1, n
problems = validator.validate(data)
n = len(problems)
assert n == 2, n
def test_context():
"""Test passing in of context information."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
def foo_gt_bar(r):
return int(r['foo']) > int(r['bar'])
validator.add_record_predicate(foo_gt_bar)
data = (
('foo', 'bar'),
('7', '3'), # valid
('1', '3') # invalid
)
context = {'info': 'file X'}
problems = validator.validate(data, context=context)
n = len(problems)
assert n == 1, n
p = problems[0]
assert p['context'] == context
def test_write_problems():
"""Test writing problems as restructured text."""
class MockFile(object):
def __init__(self):
self.content = ''
def write(self, s):
self.content += s
file = MockFile()
problems = [
{
'code': 'X1',
'message': 'invalid foo',
'row': 2,
'field': 'foo',
'context': {
'info': 'interesting'
}
},
{
'code': 'X2',
'message': 'invalid bar',
'row': 3,
'field': 'bar',
'context': {
'info': 'very interesting'
}
}
]
expectation = """
=================
Validation Report
=================
Problems
========
X1 - invalid foo
----------------
:field: foo
:row: 2
:info: interesting
X2 - invalid bar
----------------
:field: bar
:row: 3
:info: very interesting
Summary
=======
Found 2 problems in total.
:X1: 1
:X2: 1
"""
write_problems(problems, file)
assert file.content == expectation, file.content
def test_write_problems_summarize():
"""Test writing a problem summary as restructured text."""
class MockFile(object):
def __init__(self):
self.content = ''
def write(self, s):
self.content += s
file = MockFile()
problems = [
{
'code': 'X1',
'message': 'invalid foo',
'row': 2,
'field': 'foo',
'context': {
'info': 'interesting'
}
},
{
'code': 'X2',
'message': 'invalid bar',
'row': 3,
'field': 'bar',
'context': {
'info': 'very interesting'
}
},
{
'code': 'X2',
'message': 'invalid bar',
'row': 4,
'field': 'bar',
'context': {
'info': 'very very interesting'
}
}
]
expectation = """
=================
Validation Report
=================
Summary
=======
Found 3 problems in total.
:X1: 1
:X2: 2
"""
write_problems(problems, file, summarize=True)
assert file.content == expectation, file.content
def test_write_problems_with_limit():
"""Test writing problems with a limit as restructured text."""
class MockFile(object):
def __init__(self):
self.content = ''
def write(self, s):
self.content += s
file = MockFile()
problems = [
{
'code': 'X1',
'message': 'invalid foo',
'row': 2,
'field': 'foo',
'context': {
'info': 'interesting'
}
},
{
'code': 'X2',
'message': 'invalid bar',
'row': 3,
'field': 'bar',
'context': {
'info': 'very interesting'
}
}
]
expectation = """
=================
Validation Report
=================
Problems
========
X1 - invalid foo
----------------
:field: foo
:row: 2
:info: interesting
Summary
=======
Found at least 1 problem in total.
:X1: 1
"""
write_problems(problems, file, limit=1)
assert file.content == expectation, file.content
def test_skips():
"""Test skip functions."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
validator.add_record_length_check()
validator.add_value_check('foo', int)
def skip_pragma(record):
return record[0].startswith('##')
validator.add_skip(skip_pragma)
data = (
('foo', 'bar'),
('1', 'X'),
('## this row', 'should be', 'skipped'),
('3', 'Y')
)
problems = validator.validate(data)
assert len(problems) == 0, problems
def test_guard_conditions():
"""Test some guard conditions."""
field_names = ('foo', 'bar')
validator = CSVValidator(field_names)
try:
validator.add_value_check('foo', 'i am not callable')
except AssertionError:
pass # expected
else:
assert False, 'expected exception'
|
import argparse
import asyncio
import logging
import rasa.utils
from policy import RestaurantPolicy
from rasa.core import utils
from rasa.core.agent import Agent
from rasa.core.policies.memoization import MemoizationPolicy
from rasa.core.policies.mapping_policy import MappingPolicy
logger = logging.getLogger(__name__)
class RestaurantAPI(object):
def search(self, info):
return "papi's pizza place"
async def train_dialogue(domain_file="domain.yml",
model_path="models/dialogue",
training_data_file="data/stories.md"):
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=3),
MappingPolicy(),
RestaurantPolicy(batch_size=100, epochs=400,
validation_split=0.2)])
training_data = await agent.load_data(training_data_file)
agent.train(
training_data
)
agent.persist(model_path)
return agent
def train_nlu():
from rasa.nlu.training_data import load_data
from rasa.nlu import config
from rasa.nlu.model import Trainer
training_data = load_data('data/nlu.md')
trainer = Trainer(config.load("config.yml"))
trainer.train(training_data)
model_directory = trainer.persist('models/nlu/',
fixed_model_name="current")
return model_directory
if __name__ == '__main__':
rasa.utils.configure_colored_logging(loglevel="INFO")
parser = argparse.ArgumentParser(
description='starts the bot')
parser.add_argument(
'task',
choices=["train-nlu", "train-dialogue", "run"],
help="what the bot should do - e.g. run or train?")
task = parser.parse_args().task
loop = asyncio.get_event_loop()
# decide what to do based on first parameter of the script
if task == "train-nlu":
train_nlu()
elif task == "train-dialogue":
loop.run_until_complete(train_dialogue())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.